text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def static_cdn_tag(path, cdn, cdn_only=False):
""" Return the URL of a static file, with handling of offline mode. Usage: ``{% %}`` """ |
clean_path = path.lstrip("/")
if getattr(settings, "OFFLINE", False):
return static_url(join("vendor", clean_path))
elif cdn_only:
return cdn
return urljoin(cdn, clean_path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_get(resc, req, resp):
""" Get the models identified by query parameters We return an empty list if no models are found. """ |
signals.pre_req.send(resc.model)
signals.pre_req_search.send(resc.model)
models = goldman.sess.store.search(resc.rtype, **{
'filters': req.filters,
'pages': req.pages,
'sorts': req.sorts,
})
props = to_rest_models(models, includes=req.includes)
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_search.send(resc.model) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_post(resc, req, resp):
""" Deserialize the payload & create the new single item """ |
signals.pre_req.send(resc.model)
signals.pre_req_create.send(resc.model)
props = req.deserialize()
model = resc.model()
from_rest(model, props)
goldman.sess.store.create(model)
props = to_rest_model(model, includes=req.includes)
resp.last_modified = model.updated
resp.location = '%s/%s' % (req.path, model.rid_value)
resp.status = falcon.HTTP_201
resp.serialize(props)
signals.post_req.send(resc.model)
signals.post_req_create.send(resc.model) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_credentials(self, credentials):
""" Reads credentials from configuration parameters. Each section represents an individual CredentialParams :param credentials: configuration parameters to be read """ |
self._items.clear()
for key in credentials.get_key_names():
value = credentials.get_as_nullable_string(key)
self._items.append(CredentialParams.from_tuples([key, value])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(self, correlation_id, key, credential):
""" Stores credential parameters into the store. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the credential parameters. :param credential: a credential parameters to be stored. """ |
if credential != None:
self._items.put(key, credential)
else:
self._items.remove(key) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CheckInputArgs(*interfaces):
"""Must provide at least one interface, the last one may be repeated. """ |
l = len(interfaces)
def wrapper(func):
def check_args(self, *args, **kw):
for i in range(len(args)):
if (l > i and interfaces[i].providedBy(args[i])) or interfaces[-1].providedBy(args[i]):
continue
if l > i: raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[i])
raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[-1])
func(self, *args, **kw)
return check_args
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def group_by_key_func(iterable, key_func):
""" Create a dictionary from an iterable such that the keys are the result of evaluating a key function on elements of the iterable and the values are lists of elements all of which correspond to the key. [(1, ['a', 'd']), (2, ['bb', 'ee']), (3, ['ccc', 'fff'])] [(0, [0, 6, 8, 2]), (1, [-1, 1, 3, 9])] """ |
result = defaultdict(list)
for item in iterable:
result[key_func(item)].append(item)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def formatedTime(ms):
""" convert milliseconds in a human readable time '1m' '16m 40s' '2d 7h 33m 20.123s' """ |
if ms:
s = ms / 1000.0
m, s = divmod(s, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
out = ''
if d:
out += '%gd ' % d
if h:
out += '%gh ' % h
if m:
out += '%gm ' % m
if s:
out += '%gs ' % s
return out[:-1]
return '' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _check_connectivity(self, err):
''' a method to check connectivity as source of error '''
try:
import requests
requests.get(self.uptime_ssl)
except:
from requests import Request
request_object = Request(method='GET', url=self.uptime_ssl)
request_details = self.handle_requests(request_object)
self.printer('ERROR.')
raise ConnectionError(request_details['error'])
self.printer('ERROR.')
raise err |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _request(self, **kwargs):
''' a helper method for processing all request types '''
response = None
error = ''
code = 0
# send request
from requests import request
try:
response = request(**kwargs)
# handle response
if self.handle_response:
response, error, code = self.handle_response(response)
else:
code = response.status_code
# handle errors
except Exception as err:
from requests import Request
request_object = Request(**kwargs)
try:
request_details = self.handle_requests(request_object)
error = request_details['error']
except:
error = str(err)
return response, error, code |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _get_request(self, url, params=None, **kwargs):
''' a method to catch and report http get request connectivity errors '''
# construct request kwargs
request_kwargs = {
'method': 'GET',
'url': url,
'params': params
}
for key, value in kwargs.items():
request_kwargs[key] = value
# send request and handle response
return self._request(**request_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _post_request(self, url, data=None, json=None, **kwargs):
''' a method to catch and report http post request connectivity errors '''
# construct request kwargs
request_kwargs = {
'method': 'POST',
'url': url,
'data': data,
'json': json
}
for key, value in kwargs.items():
request_kwargs[key] = value
# send request and handle response
return self._request(**request_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _options_request(self, url, **kwargs):
''' a method to catch and report http options request connectivity errors '''
# construct request kwargs
request_kwargs = {
'method': 'OPTIONS',
'url': url
}
for key, value in kwargs.items():
request_kwargs[key] = value
# send request and handle response
return self._request(**request_kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def find_soft_selectors(ids_and_clean_visible, start_num_tokens='10',
max_num_tokens='20', filter_punctuation='0'):
'''External interface for dossier.models.soft_selectors.
This at scans through `num_tokens` values between
`start_num_tokens` and `max_num_tokens` and calls
`find_soft_selectors_at_n` looking for results
All of the params can be passed from URL parameters, in which
case they can be strings and this function will type cast them
appropriately.
'''
start_num_tokens = int(start_num_tokens)
max_num_tokens = int(max_num_tokens)
filter_punctuation = bool(int(filter_punctuation))
if not ids_and_clean_visible:
logger.info('find_soft_selectors called with no ids_and_clean_visible')
return []
current_results = [] ## results from current n
previous_results = [] ## previous results from last n
overall_results = [] ## overall results to return
for num_tokens in range(start_num_tokens, max_num_tokens + 1):
## update this here
previous_results = current_results
results_at_n = find_soft_selectors_at_n(
ids_and_clean_visible, num_tokens, filter_punctuation)
if len(results_at_n) == 0:
break
best_score = results_at_n[0]['score']
## i.e. the initial condition is they all have the same score
idx_at_second = len(results_at_n)
for idx, result in enumerate(results_at_n):
if result['score'] < best_score:
idx_at_second = idx
break
current_results = results_at_n[0:idx_at_second]
if num_tokens == 8:
for r in results_at_n:
logger.info('%s --- score: %d' % (r['phrase'], r['score']))
if previous_results == []:
logger.info('Previous results are empty. Continuing.')
continue
## now, the main idea is to figure out if any strings from previous
## are substrings of those from current
## (with the scores fixed at the max for that subphrase).
## when they stop being substrings
## then those are completed phrases and should be returned as a result
for prev_result in previous_results:
is_subbed_and_same_score = False
for curr_result in current_results:
if prev_result['phrase'] in curr_result['phrase'] and \
prev_result['score'] == curr_result['score'] :
is_subbed_and_same_score = True
break
if not is_subbed_and_same_score: ## then it's a honest result
prev_result['n'] = num_tokens - 1
overall_results.append(prev_result)
if len(current_results) == 0:
## we got them all
## (we still had to collect the previous results)
## that's why this break comes after the previous for loop
break
## also add results from current_results at final n
for result in current_results:
result['n'] = num_tokens
overall_results.append(result)
## sort by score then by length
overall_results.sort(key=itemgetter('score', 'n'), reverse=True)
logger.info('OVERALL RESULTS: %d' % len(overall_results))
# for idx, result in enumerate(overall_results):
# logger.info('%d. %s --- score: %f , n = %d, hits=%d' %
# (idx, result['phrase'], result['score'], result['n'], len(result['hits']))
# )
return overall_results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def make_ngram_corpus(corpus_clean_visibles, num_tokens, filter_punctuation,
zoning_rules=False):
'''takes a list of clean_visible texts, such as from StreamItems or
FCs, tokenizes all the texts, and constructs n-grams using
`num_tokens` sized windows.
``corpus_clean_visibles`` -- list of unicode strings
``num_tokens`` --- the n of the n-grams
``filter_punctuation`` --- if True, punctuation is filtered
'''
## TODO: generatlize this zoning code, so that it works on many
## sites in the HT domain; consider finishing streamcorpus-zoner
## to do this.
if filter_punctuation:
## word tokenizer that removes punctuation
tokenize = RegexpTokenizer(r'\w+').tokenize
backpage_string = 'backpage'
end_string = 'Poster'
else:
#tokenize = word_tokenize
tokenize = lambda s: string.split(s)
backpage_string = 'backpage.com'
end_string = 'Poster\'s'
corpus = list()
for clean_vis in corpus_clean_visibles:
## crudely skip pages that have "error"
if re.search(u'error', clean_vis, re.I & re.UNICODE):
continue
## make tokens
tokens = tokenize(clean_vis) ## already a unicode string
if zoning_rules:
## filter out non backpage pages
if backpage_string not in tokens:
continue
## string that signals the beginning of the body
try:
idx0 = tokens.index('Reply')
except:
continue
## string that signals the end of the body
try:
idx1 = tokens.index(end_string)
except:
continue
tokens = tokens[idx0:idx1]
## make ngrams, attach to make strings
ngrams_strings = list()
for ngram_tuple in ngrams(tokens, num_tokens):
# ## attempt to remove unwanted phrases
## score with many_stop_words and drop bad tuples
# stop_count = sum([int(bool(tok.lower() in stop_words))
# for tok in ngram_tuple])
# if stop_count > num_tokens / 1.5:
# continue
## remove ones with many repeated words
if len(set(ngram_tuple)) < len(ngram_tuple) / 2:
continue
## this adds ngrams for the current doc
ngrams_strings.append(' '.join(ngram_tuple))
## this adds a list of all the ngrams from the current doc
## to the corpus list
corpus.append(ngrams_strings)
return corpus |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def ids_and_clean_visible_from_streamcorpus_chunk_path(corpus_path):
'''converts a streamcorpus.Chunk file into the structure that is
passed by the search engine to find_soft_selectors
'''
ch = clean_html(clean_html.default_config)
cv = clean_visible(clean_visible.default_config)
ids_and_clean_visible = []
for si in streamcorpus.Chunk(path=corpus_path):
if not si.body.clean_visible:
## attempt to make clean_visible
if not si.body.raw:
logger.critical('no raw content, so skipping: %r', si.abs_url)
continue
abs_url = si.abs_url
si = ch(si, {})
if not si:
logger.critical(
'failed to make clean_html, so skipping: %r', abs_url)
continue
si = cv(si, {})
if not si or not si.body.clean_visible:
logger.critical(
'failed to make clean_visible, so skipping: %r', abs_url)
continue
rec = (si.stream_id, si.body.clean_visible.decode('utf8'), {})
ids_and_clean_visible.append(rec)
return ids_and_clean_visible |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pre_create(sender, model):
""" Callback before creating a new login Without a password during create we are forced to set the password to something random & complex. """ |
if isinstance(model, Model) and not model.password:
model.password = random_str() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pre_save(sender, model):
""" Hash the password if being changed """ |
if isinstance(model, Model) and 'password' in model.dirty_fields:
model.salt, model.password = gen_salt_and_hash(model.password) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth_creds(cls, username, password):
""" Validate a username & password A token is returned if auth is successful & can be used to authorize future requests or ignored entirely if the authorization mechanizm does not need it. :return: string token """ |
store = goldman.sess.store
login = store.find(cls.RTYPE, 'username', username)
if not login:
msg = 'No login found by that username. Spelling error?'
raise AuthRejected(**{'detail': msg})
elif login.locked:
msg = 'The login account is currently locked out.'
raise AuthRejected(**{'detail': msg})
elif not cmp_val_salt_hash(password, login.salt, login.password):
msg = 'The password provided is incorrect. Spelling error?'
raise AuthRejected(**{'detail': msg})
else:
if not login.token:
login.token = random_str()
login.post_authenticate()
return login.token |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auth_token(cls, token):
""" Callback method for OAuth 2.0 bearer token middleware """ |
store = goldman.sess.store
login = store.find(cls.RTYPE, 'token', token)
if not login:
msg = 'No login found with that token. It may have been revoked.'
raise AuthRejected(**{'detail': msg})
elif login.locked:
msg = 'The login account is currently locked out.'
raise AuthRejected(**{'detail': msg})
else:
login.post_authenticate() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post_authenticate(self):
""" Update the login_date timestamp Initialize the thread local sess.login property with the authenticated login model. The login_date update will be debounced so writes don't occur on every hit of the the API. If the login_date was modified within 15 minutes then don't update it. """ |
goldman.sess.login = self
now = dt.now()
if not self.login_date:
self.login_date = now
else:
sec_since_updated = (now - self.login_date).seconds
min_since_updated = sec_since_updated / 60
if min_since_updated > 15:
self.login_date = now
if self.dirty:
store = goldman.sess.store
store.update(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_username(self, data, value):
""" Ensure the username is unique If the login is being created then simply check if the username is in the store & fail. Otherwise if the login is being updated check if the existing rid on a username match is the same as the login being updated otherwise fail. """ |
store = goldman.sess.store
existing = store.find(data['rtype'], 'username', value)
if existing:
if not data['rid'] or data['rid'] != existing.rid:
raise ValidationError('username is already taken') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_log_level(value):
""" Converts numbers and strings to standard log level values. :param value: a value to be converted :return: converted log level """ |
if value == None:
return LogLevel.Info
value = str(value).upper()
if ("0" == value) or ("NOTHING" == value) or ("NONE" == value):
return LogLevel.Nothing
elif ("1" == value) or ("FATAL" == value):
return LogLevel.Fatal
elif ("2" == value) or ("ERROR" == value):
return LogLevel.Error
elif ("3" == value) or ("WARN" == value) or ("WARNING" == value):
return LogLevel.Warn
elif ("4" == value) or ("INFO" == value):
return LogLevel.Info
elif ("5" == value) or ("DEBUG" == value):
return LogLevel.Debug
elif ("6" == value) or ("TRACE" == value):
return LogLevel.Trace
else:
return LogLevel.Info |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_string(level):
""" Converts log level to a string. :param level: a log level to convert :return: log level name string. """ |
if level == LogLevel.Fatal:
return "FATAL"
if level == LogLevel.Error:
return "ERROR"
if level == LogLevel.Warn:
return "WARN"
if level == LogLevel.Info:
return "INFO"
if level == LogLevel.Debug:
return "DEBUG"
if level == LogLevel.Trace:
return "TRACE"
return "UNDEF" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit(message=COMMON_COMMIT_MESSAGE, capture=True):
""" git commit with common commit message when omit. """ |
env.warn_only = True
local(u'git commit -am"{}"'.format(message)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_filestore_instance(img_dir=None, data_dir=None):
"""Return an instance of FileStore.""" |
global _filestore_instances
key = "%s:%s" % (img_dir, data_dir)
try:
instance = _filestore_instances[key]
except KeyError:
instance = FileStore(
img_dir=img_dir, data_dir=data_dir
)
_filestore_instances[key] = instance
return instance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_s3store_instance(bucket):
"""Return an instance of S3Store.""" |
global _s3store_instances
key = "%s" % bucket
try:
instance = _s3store_instances[key]
except KeyError:
instance = S3Store(
bucket=bucket
)
_s3store_instances[key] = instance
return instance |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _compare_dict(new_dict, old_dict, change_list=None, root=None):
'''
a method for recursively listing changes made to a dictionary
:param new_dict: dictionary with new key-value pairs
:param old_dict: dictionary with old key-value pairs
:param change_list: list of differences between old and new
:patam root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
new_keys = set(new_dict.keys())
old_keys = set(old_dict.keys())
missing_keys = old_keys - new_keys
extra_keys = new_keys - old_keys
same_keys = new_keys.intersection(old_keys)
for key in missing_keys:
new_path = deepcopy(root)
new_path.append(key)
change_list.append({'action': 'DELETE', 'value': None, 'path': new_path})
for key in extra_keys:
for k, v in new_dict.items():
if key == k:
new_path = deepcopy(root)
new_path.append(key)
change_list.append({'action': 'ADD', 'value': v, 'path': new_path})
for key in same_keys:
new_path = deepcopy(root)
new_path.append(key)
if new_dict[key].__class__ != old_dict[key].__class__:
change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path})
elif isinstance(new_dict[key], dict):
_compare_dict(new_dict[key], old_dict[key], change_list, new_path)
elif isinstance(new_dict[key], list):
_compare_list(new_dict[key], old_dict[key], change_list, new_path)
elif isinstance(new_dict[key], set):
_compare_set(new_dict[key], old_dict[key], change_list, new_path)
elif new_dict[key] != old_dict[key]:
change_list.append({'action': 'UPDATE', 'value': new_dict[key], 'path': new_path})
return change_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _compare_list(new_list, old_list, change_list=None, root=None):
'''
a method for recursively listing changes made to a list
:param new_list: list with new value
:param old_list: list with old values
:param change_list: list of differences between old and new
:param root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
if len(old_list) > len(new_list):
same_len = len(new_list)
for i in reversed(range(len(new_list), len(old_list))):
new_path = deepcopy(root)
new_path.append(i)
change_list.append({'action': 'REMOVE', 'value': None, 'path': new_path})
elif len(new_list) > len(old_list):
same_len = len(old_list)
append_list = []
path = deepcopy(root)
for i in range(len(old_list), len(new_list)):
append_list.append(new_list[i])
change_list.append({'action': 'APPEND', 'value': append_list, 'path': path})
else:
same_len = len(new_list)
for i in range(0, same_len):
new_path = deepcopy(root)
new_path.append(i)
if new_list[i].__class__ != old_list[i].__class__:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path})
elif isinstance(new_list[i], dict):
_compare_dict(new_list[i], old_list[i], change_list, new_path)
elif isinstance(new_list[i], list):
_compare_list(new_list[i], old_list[i], change_list, new_path)
elif isinstance(new_list[i], set):
_compare_set(new_list[i], old_list[i], change_list, new_path)
elif new_list[i] != old_list[i]:
change_list.append({'action': 'UPDATE', 'value': new_list[i], 'path': new_path})
return change_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _compare_set(new_set, old_set, change_list, root):
'''
a method for list changes made to a set
:param new_set: set with new values
:param old_set: set with old values
:param change_list: list of differences between old and new
:patam root: string with record of path to the root of the main object
:return: list of differences between old and new
'''
from copy import deepcopy
path = deepcopy(root)
missing_items = old_set - new_set
extra_items = new_set - old_set
for item in missing_items:
change_list.append({'action': 'REMOVE', 'key': None, 'value': item, 'path': path})
for item in extra_items:
change_list.append({'action': 'ADD', 'key': None, 'value': item, 'path': path})
return change_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_mongo(qry):
"""Transform a simple query with one or more filter expressions into a MongoDB query expression. :param qry: Filter expression(s), see function docstring for details. :type qry: str or list :return: MongoDB query :rtype: dict :raises: BadExpression, if one of the input expressions cannot be parsed Expressions have three parts, called in order ``field``, ``operator``, and ``value``. - `field` is the name of a field in a MongoDB document - `value` is the value to compare against: * numeric * string, you MUST use 'single' or "double" quotes * boolean: true, false - `operator` is a comparison operator: * inequalities: >, <, =, <=, >=, != * PCRE regular expression: ~ * data type: int, float, string, or bool * exists: boolean (true/false) whether field exists in record * size: for array fields, an inequality for the array size, given as a suffix to the operator: size>, size< Multiple expressions can be a single string, or a list. In either case, the form is a "disjunction of conjunctions". In the string form: * "and" joins expressions into groups * "or" joins one or more expression groups In the list form: * The inner list is a group of "and"ed expressions * The outer list "or"s the expression groups together. In the string form, parentheses before or after the "or"ed expression groups [note: even non-sensical ones like '((('], are ignored. So these can be used to clarify the groupings. **Examples** Two sets of filters, return records where either is true: {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but without parentheses: {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} Same as previous, but using lists rather than "and"/"or": {'$or': [{'a': {'$gt': 3}, 'b': 'hello'}, {'c': {'$gt': 1}, 'd': 'goodbye'}]} """ |
rev = False # filters, not constraints
# special case for empty string/list
if qry == "" or qry == []:
return {}
# break input into groups of filters
unpar = lambda s: s.strip().strip('()')
if isinstance(qry, str):
groups = []
if _TOK_OR in qry:
groups = [unpar(g).split(_TOK_AND) for g in qry.split(_TOK_OR)]
else:
groups = [unpar(qry).split(_TOK_AND)]
else:
if isinstance(qry[0], list) or isinstance(qry[0], tuple):
groups = qry
else:
groups = [qry]
# generate mongodb queries for each filter group
filters = []
for filter_exprs in groups:
mq = MongoQuery()
for e in filter_exprs:
try:
e = unpar(e)
except AttributeError:
raise BadExpression(e, "expected string, got '{t}'".format(t=type(e)))
try:
constraint = Constraint(*parse_expr(e))
except ValueError as err:
raise BadExpression(e, err)
clause = MongoClause(constraint, rev=rev)
mq.add_clause(clause)
filters.append(mq.to_mongo(rev))
# combine together filters, or strip down the one filter
if len(filters) > 1:
result = {'$or': filters}
else:
result = filters[0]
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_expr(e):
"""Parse a single constraint expression. Legal expressions are defined by the regular expression `relation_re`. :param e: Expression :type e: str :return: Tuple of field, operator, and value :rtype: tuple """ |
m = relation_re.match(e)
if m is None:
raise ValueError("error parsing expression '{}'".format(e))
field, op, val = m.groups()
# Try different types
try:
# Integer
val_int = int(val)
val = val_int
except ValueError:
try:
# Float
val_float = float(val)
val = val_float
except ValueError:
try:
# Boolean
val = {'true': True, 'false': False}[val.lower()]
except KeyError:
# String
if re.match(r'".*"|\'.*\'', val):
# strip quotes from strings
val = val[1:-1]
return field, op, val |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _set_size_code(self):
"""Set the code for a size operation. """ |
if not self._op.startswith(self.SIZE):
self._size_code = None
return
if len(self._op) == len(self.SIZE):
self._size_code = self.SZ_EQ
else:
suffix = self._op[len(self.SIZE):]
self._size_code = self.SZ_MAPPING.get(suffix, None)
if self._size_code is None:
raise ValueError('invalid "{}" suffix "{}"'.format(self.SIZE, suffix)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def passes(self, value):
"""Does the given value pass this constraint? :return: True,None if so; False,<expected> if not :rtype: tuple """ |
try:
if self._op.compare(value, self.value):
return True, None
else:
return False, self.value
except ValueError as err:
return False, str(err) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_constraint(self, op, val):
"""Add new constraint. :param op: Constraint operator :type op: ConstraintOperator :param val: Constraint value :type val: str,Number :raise: ValueError if combination of constraints is illegal """ |
if len(self.constraints) > 0:
if op.is_equality():
clist = ', '.join(map(str, self.constraints))
raise ValueError('Field {}: equality operator cannot be combined '
'with others: {}'.format(self._field.name, clist))
elif op.is_exists():
raise ValueError('Field {}: existence is implied '
'by other operators'.format(self._field.name))
constraint = Constraint(self._field, op, val)
self.constraints.append(constraint)
if self._field.has_subfield():
self._array = True
elif op.is_inequality():
self._range = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_conflicts(self):
"""Get conflicts in constraints, if any. :return: Description of each conflict, empty if none. :rtype: list(str) """ |
conflicts = []
if self._array and self._range:
conflicts.append('cannot use range expressions on arrays')
return conflicts |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_existence(self, rev):
"""Add existence constraint for the field. This is necessary because the normal meaning of 'x > 0' is: x > 0 and is present. Without the existence constraint, MongoDB will treat 'x > 0' as: 'x' > 0 *or* is absent. Of course, if the constraint is already about existence, nothing is done. :rtype: None """ |
if len(self.constraints) == 1 and (
# both 'exists' and strict equality don't require the extra clause
self.constraints[0].op.is_exists() or
self.constraints[0].op.is_equality()):
return
value = not rev # value is False if reversed, otherwise True
constraint = Constraint(self._field, ConstraintOperator.EXISTS, value)
self._existence_constraints.append(constraint) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create(self, constraint, exists_main):
"""Create MongoDB query clause for a constraint. :param constraint: The constraint :type constraint: Constraint :param exists_main: Put exists into main clause :type exists_main: bool :return: New clause :rtype: MongoClause :raise: ValueError if value doesn't make sense for operator """ |
c = constraint # alias
op = self._reverse_operator(c.op) if self._rev else c.op
mop = self._mongo_op_str(op)
# build the clause parts: location and expression
loc = MongoClause.LOC_MAIN # default location
if op.is_exists():
loc = MongoClause.LOC_MAIN2 if exists_main else MongoClause.LOC_MAIN
assert (isinstance(c.value, bool))
# for exists, reverse the value instead of the operator
not_c_val = not c.value if self._rev else c.value
expr = {c.field.name: {mop: not_c_val}}
elif op.is_size():
if op.is_variable():
# variables only support equality, and need to be in $where
loc = MongoClause.LOC_WHERE
js_op = '!=' if self._rev else '=='
expr = 'this.{}.length {} this.{}'.format(c.field.name, js_op, c.value)
elif op.is_size_eq() and not self._rev:
expr = {c.field.name: {'$size': c.value}}
else:
# inequalities also need to go into $where clause
self._check_size(op, c.value)
loc = MongoClause.LOC_WHERE
szop = ConstraintOperator(op.size_op)
if self._rev:
szop.reverse()
js_op = self._js_op_str(szop)
expr = 'this.{}.length {} {}'.format(c.field.name, js_op, c.value)
elif op.is_type():
loc = MongoClause.LOC_WHERE
type_name = self.JS_TYPES.get(c.value, None)
if type_name is None:
raise RuntimeError('Could not get JS type for {}'.format(c.value))
typeop = '!=' if self._rev else '=='
expr = 'typeof this.{} {} "{}"'.format(c.field.name, typeop, type_name)
elif op.is_regex():
expr = {c.field.name: {mop: c.value.pattern}}
else:
if mop is None:
expr = {c.field.name: c.value}
elif isinstance(c.value, bool):
# can simplify boolean {a: {'$ne': True/False}} to {a: False/True}
not_c_val = not c.value if self._rev else c.value
expr = {c.field.name: not_c_val}
else:
expr = {c.field.name: {mop: c.value}}
return loc, expr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_clause(self, clause):
"""Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None """ |
if clause.query_loc == MongoClause.LOC_MAIN:
self._main.append(clause)
elif clause.query_loc == MongoClause.LOC_MAIN2:
self._main2.append(clause)
elif clause.query_loc == MongoClause.LOC_WHERE:
self._where.append(clause)
else:
raise RuntimeError('bad clause location: {}'.format(clause.query_loc)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_mongo(self, disjunction=True):
"""Create from current state a valid MongoDB query expression. :return: MongoDB query expression :rtype: dict """ |
q = {}
# add all the main clauses to `q`
clauses = [e.expr for e in self._main]
if clauses:
if disjunction:
if len(clauses) + len(self._where) > 1:
q['$or'] = clauses
else:
# simplify 'or' of one thing
q.update(clauses[0])
else:
for c in clauses:
q.update(c)
# add all the main2 clauses; these are not or'ed
for c in (e.expr for e in self._main2):
# add to existing stuff for the field
for field in c:
if field in q:
q[field].update(c[field])
else:
q.update(c)
# add where clauses, if any, to `q`
if self._where:
wsep = ' || ' if self._where[0].is_reversed else ' && '
where_clause = wsep.join([w.expr for w in self._where])
if disjunction:
if not '$or' in q:
q['$or'] = []
q['$or'].append({'$where': where_clause})
else:
q['$where'] = where_clause
return q |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def submit(self, call, *args, **kwargs):
""" Submit a call for future execution :return: future for the call execution :rtype: StoredFuture """ |
future = StoredFuture(call, *args, **kwargs)
self._queue.put(future)
self._ensure_worker()
return future |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _dismiss_worker(self, worker):
"""Dismiss ``worker`` unless it is still required""" |
self._workers.remove(worker)
if len(self._workers) < self._min_workers:
self._workers.add(worker)
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ensure_worker(self):
"""Ensure there are enough workers available""" |
while len(self._workers) < self._min_workers or len(self._workers) < self._queue.qsize() < self._max_workers:
worker = threading.Thread(
target=self._execute_futures,
name=self.identifier + '_%d' % time.time(),
)
worker.daemon = True
self._workers.add(worker)
worker.start() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def s3_upload(acl, bucket, conn, content, content_type, path):
""" Store an object in our an S3 bucket. :param acl: S3 ACL for the object :param bucket: S3 bucket to upload to :param content: a string representation of the object to upload :param content_type: a string MIMETYPE of the object that S3 should be informed of :param path: an object specific portion of the S3 key name to be passed to gen_url to generate the the location in S3 of the new object :raise: IOError on any failure :return: S3 generated URL of the uploaded object """ |
# obj is the object that will be uploaded
obj = Key(conn.get_bucket(bucket))
obj.content_type = content_type
obj.key = path
obj.set_contents_from_string(content)
obj.set_acl(acl)
return gen_url(bucket, path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_image(self, filename='palette.png', band_width=1, length=60, max_width=0, vertical=True, alpha_channel=False):
""" Creates an image from the palette. Args: filename(Optional[string]):
filename of saved file. Defaults to ``palette.png`` in the current working directory. band_width(optional[int]):
how wide each colour band should be. Defaults to 1 pixel. length(Optional[int]):
the length of the overall image in pixels. This is the dimension orthogonal to ``band_width``. Defaults to 60 pixels. max_width(Optional[int]):
if ``band_width`` is not set and this is, this determines how wide the whole image should be. vertical(Optional[bool]):
if the image runs vertical (``True``, default) or horizontal (``False``). alpha_channel(Optional[bool]):
if ``True``, the created image will have an Alpha channel. Defaults to ``False``. """ |
# max_width is approximate
# generate output pictures for documentation automatically
if max_width < 1:
pass
else:
band_width = int(max_width/len(self._colours))
image_width = band_width * len(self._colours)
if alpha_channel:
my_image = Image.new('RGBA', (image_width, length))
else:
my_image = Image.new('RGB', (image_width, length))
image_loaded = my_image.load()
x = 0
for my_colour in self._colours:
for x1 in range(band_width):
for y in range(length):
image_loaded[x, y] = my_colour.rgb()
x = x + 1
if vertical:
my_image = my_image.rotate(270)
my_image.save(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blend(self, cycles=1):
""" Explands the existing Palette by inserting the blending colour between all Colours already in the Palette. Changes the Palette in-place. args: cycles(int):
number of *blend* cycles to apply. (Default is 1) Example usage: .. code-block:: python p1.blend() p1.to_image('p1_blended.png', 60, vertical=False) .. image:: p1_blended.png .. code-block:: python p2.blend() p2.to_image('p2_blended.png', 60, vertical=False) .. image:: p2_blended.png The *blend* functionallity can be applied several times in a sequence by use of the *cycles* parameter. This may be useful to quickly get a longer series of intermediate colours. .. code-block:: python p3 = Palette(Colour('#fff'), Colour('#7e1e9c')) p3.blend(cycles=5) p3.to_image('p3.png', max_width=360, vertical=False) .. image:: p3.png .. seealso:: :py:func:`colourettu.blend` """ |
for j in range(int(cycles)):
new_colours = []
for i, c in enumerate(self._colours):
if i != 0:
c2 = blend(c, self._colours[i-1])
new_colours.append(c2)
new_colours.append(c)
self._colours = new_colours |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def inject_init(init_path, readme_path, setup_kwargs):
'''
a method to add arguments to setup.py from module init file
:param init_path: string with path to module __init__ file
:param readme_path: string with path to module README.rst file
:param setup_kwargs: dictionary with existing setup keyword arguments
:return: dictionary with injected keyword arguments
'''
import re
from os import path
from copy import deepcopy
# retrieve init text
init_text = ''
if not path.exists(init_path):
raise ValueError('%s is not a valid path' % init_path)
init_text = open(init_path).read()
# retrieve init settings
init_kwargs = {
'version': '',
'author': '',
'url': '',
'description': '',
'license': '',
}
for key in init_kwargs.keys():
key_regex = re.compile("__%s__\s?\=\s?'(.*?)'" % key)
key_search = key_regex.findall(init_text)
if key_search:
init_kwargs[key] = key_search[0]
# retrieve modifiable settings
mod_kwargs = {
'module': '',
'email': '',
'entry': '',
'authors': ''
}
for key in mod_kwargs.keys():
key_regex = re.compile("__%s__\s?\=\s?'(.*?)'" % key)
key_search = key_regex.findall(init_text)
if key_search:
mod_kwargs[key] = key_search[0]
if mod_kwargs['module']:
init_kwargs['name'] = mod_kwargs['module']
if mod_kwargs['entry']:
init_kwargs['entry_points'] = {"console_scripts": [mod_kwargs['entry']]}
if mod_kwargs['email']:
init_kwargs['author_email'] = mod_kwargs['email']
init_kwargs['maintainer_email'] = mod_kwargs['email']
if mod_kwargs['authors']:
del init_kwargs['author']
init_kwargs['author_list'] = mod_kwargs['authors'].split(' ')
# add readme
if not path.exists(readme_path):
raise ValueError('%s is not a valid path' % readme_path)
try:
readme_text = open(readme_path).read()
init_kwargs['long_description'] = str(readme_text)
except:
raise ValueError('%s is not a valid text file.' % readme_path)
# merge kwargs
setup_kwargs.update(**init_kwargs)
updated_kwargs = deepcopy(setup_kwargs)
for key, value in updated_kwargs.items():
if not value:
del setup_kwargs[key]
return setup_kwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bump(dev=False, patch=False, minor=False, major=False, nocommit=False):
"""Bump version number and commit change.""" |
if sum([int(x) for x in (patch, minor, major)]) > 1:
raise ValueError('Only one of patch, minor, major can be incremented.')
if check_staged():
raise EnvironmentError('There are staged changes, abort.')
with open(str(INIT_PATH)) as f:
lines = f.readlines()
for i, line in enumerate(lines):
varmatch = re.match("__([a-z]+)__ = '([^']+)'", line)
if varmatch:
if varmatch.group(1) == 'version':
version = Version(varmatch.group(2))
vdict = version._version._asdict()
print('Current version:', version)
increment_release = True
if dev:
if vdict['dev']:
vdict['dev'] = (vdict['dev'][0], vdict['dev'][1] + 1)
increment_release = False
if sum([int(x) for x in (patch, minor, major)]) > 0:
raise ValueError('Cannot increment patch, minor, or major between dev versions.')
else:
vdict['dev'] = ('dev', 0)
else:
if vdict['dev']:
vdict['dev'] = None
increment_release = False
if increment_release:
rel = vdict['release']
if major:
vdict['release'] = (rel[0] + 1, 0, 0)
elif patch:
vdict['release'] = (rel[0], rel[1], rel[2] + 1)
else: # minor is default
vdict['release'] = (rel[0], rel[1] + 1, 0)
version._version = _Version(**vdict)
print('Version bumped to:', version)
lines[i] = "__version__ = '{!s}'\n".format(version)
break
with open(str(INIT_PATH), 'w') as f:
f.writelines(lines)
if not nocommit:
call(['git', 'add', 'bucketcache/__init__.py'])
call(['git', 'commit', '-m', 'Bumped version number to {!s}'.format(version)])
return version |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tag():
"""Tag current version.""" |
if check_unstaged():
raise EnvironmentError('There are staged changes, abort.')
with open(str(INIT_PATH)) as f:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", f.read()))
version = metadata['version']
check_output(['git', 'tag', version, '-m', 'Release v{}'.format(version)]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload():
"""Upload source to PyPI using twine.""" |
try:
o = check_output(['twine', 'upload'] + glob('dist/*'))
except CalledProcessError:
call(['twine', 'upload'] + glob('dist/*'))
raise
print(o.decode('utf-8')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def release():
"""Bump version, tag, build, gen docs.""" |
if check_staged():
raise EnvironmentError('There are staged changes, abort.')
if check_unstaged():
raise EnvironmentError('There are unstaged changes, abort.')
bump()
tag()
build()
doc_gen()
puts(colored.yellow("Remember to upload documentation and package:"))
with indent(2):
puts(colored.cyan("shovel doc.upload"))
puts(colored.cyan("shovel version.upload")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_rid(model, rid):
""" Ensure the resource id is proper """ |
rid_field = getattr(model, model.rid_field)
if isinstance(rid_field, IntType):
try:
int(rid)
except (TypeError, ValueError):
abort(exceptions.InvalidURL(**{
'detail': 'The resource id {} in your request is not '
'syntactically correct. Only numeric type '
'resource id\'s are allowed'.format(rid)
})) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find(model, rid):
""" Find a model from the store by resource id """ |
validate_rid(model, rid)
rid_field = model.rid_field
model = goldman.sess.store.find(model.RTYPE, rid_field, rid)
if not model:
abort(exceptions.DocumentNotFound)
return model |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_blank(model, props):
""" Set empty strings to None where allowed This is done on fields with `allow_blank=True` which takes an incoming empty string & sets it to None so validations are skipped. This is useful on fields that aren't required with format validations like URLType, EmailType, etc. """ |
blank = model.get_fields_by_prop('allow_blank', True)
for field in blank:
try:
if props[field] == '':
props[field] = None
except KeyError:
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_hide(model, props):
""" Purge fields not allowed during a REST deserialization This is done on fields with `from_rest=False`. """ |
hide = model.get_fields_by_prop('from_rest', False)
for field in hide:
try:
del props[field]
except KeyError:
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_ignore(model, props):
""" Purge fields that are completely unknown """ |
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_lower(model, props):
""" Lowercase fields requesting it during a REST deserialization """ |
for field in model.to_lower:
try:
props[field] = props[field].lower()
except (AttributeError, KeyError):
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_on_create(model, props):
""" Assign the default values when creating a model This is done on fields with `on_create=<value>`. """ |
fields = model.get_fields_with_prop('on_create')
for field in fields:
props[field[0]] = field[1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_on_update(model, props):
""" Assign the default values when updating a model This is done on fields with `on_update=<value>`. """ |
fields = model.get_fields_with_prop('on_update')
for field in fields:
props[field[0]] = field[1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _from_rest_reject_update(model):
""" Reject any field updates not allowed on POST This is done on fields with `reject_update=True`. """ |
dirty = model.dirty_fields
fields = model.get_fields_by_prop('reject_update', True)
reject = []
for field in fields:
if field in dirty:
reject.append(field)
if reject:
mod_fail('These fields cannot be updated: %s' % ', '.join(reject)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_rest(model, props):
""" Map the REST data onto the model Additionally, perform the following tasks: * set all blank strings to None where needed * purge all fields not allowed as incoming data * purge all unknown fields from the incoming data * lowercase certain fields that need it * merge new data with existing & validate * mutate the existing model * abort on validation errors * coerce all the values """ |
req = goldman.sess.req
_from_rest_blank(model, props)
_from_rest_hide(model, props)
_from_rest_ignore(model, props)
_from_rest_lower(model, props)
if req.is_posting:
_from_rest_on_create(model, props)
elif req.is_patching:
_from_rest_on_update(model, props)
model.merge(props, validate=True)
if req.is_patching:
_from_rest_reject_update(model) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _to_rest_hide(model, props):
""" Purge fields not allowed during a REST serialization This is done on fields with `to_rest=False`. """ |
hide = model.get_fields_by_prop('to_rest', False)
for field in hide:
try:
del props[field]
except KeyError:
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _to_rest_includes(models, includes):
""" Fetch the models to be included The includes should follow a few basic rules: * the include MUST not already be an array member of the included array (no dupes) * the include MUST not be the same as the primary data if the primary data is a single resource object (no dupes) * the include MUST not be an array member of the primary data if the primary data an array of resource objects (no dupes) Basically, each included array member should be the only instance of that resource object in the entire restified data. """ |
included = []
includes = includes or []
if not isinstance(models, list):
models = [models]
for include in includes:
for model in models:
rel = getattr(model, include)
if hasattr(rel, 'model') and rel.model:
rel_models = [rel.model]
elif hasattr(rel, 'models') and rel.models:
rel_models = rel.models
for rel_model in rel_models:
if rel_model in models or rel_model in included:
continue
else:
included.append(rel_model)
for idx, val in enumerate(included):
included[idx] = _to_rest(val)
return included |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _to_rest_rels(model, props):
""" Move the relationships to appropriate location in the props All to_ones should be in a to_one key while all to_manys should be in a to_many key. """ |
props['to_many'] = {}
props['to_one'] = {}
for key in model.to_one:
try:
props['to_one'][key] = props.pop(key)
except KeyError:
continue
for key in model.to_many:
try:
props['to_many'][key] = props.pop(key)
except KeyError:
continue |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _to_rest(model, includes=None):
""" Convert the model into a dict for serialization Notify schematics of the sparse fields requested while also forcing the resource id & resource type fields to always be present no matter the request. Additionally, any includes are implicitly added as well & automatically loaded. Then normalize the includes, hide private fields, & munge the relationships into a format the serializers are expecting. """ |
includes = includes or []
sparse = goldman.sess.req.fields.get(model.rtype, [])
if sparse:
sparse += [model.rid_field, model.rtype_field]
sparse += includes
props = model.to_primitive(
load_rels=includes,
sparse_fields=sparse,
)
props['rid'] = props.pop(model.rid_field)
props['rtype'] = props.pop(model.rtype_field)
_to_rest_hide(model, props)
_to_rest_rels(model, props)
return props |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_rest_model(model, includes=None):
""" Convert the single model into a dict for serialization :return: dict """ |
props = {}
props['data'] = _to_rest(model, includes=includes)
props['included'] = _to_rest_includes(model, includes=includes)
return props |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_rest_models(models, includes=None):
""" Convert the models into a dict for serialization models should be an array of single model objects that will each be serialized. :return: dict """ |
props = {}
props['data'] = []
for model in models:
props['data'].append(_to_rest(model, includes=includes))
props['included'] = _to_rest_includes(models, includes=includes)
return props |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def collect_by_type(self, typ):
'''A more efficient way to collect nodes of a specified type than
collect_nodes.
'''
nodes = []
if isinstance(self, typ):
nodes.append(self)
for c in self:
nodes.extend(c.collect_by_type(typ))
return nodes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_proxy(self, proxy, update=True):
""" Set proxy for chrome session """ |
update_web_driver = False
if self.current_proxy != proxy:
# Did we change proxies?
update_web_driver = True
self.current_proxy = proxy
if proxy is None:
# TODO: Need to be able to remove a proxy if one is set
pass
else:
proxy_parts = cutil.get_proxy_parts(proxy)
if proxy_parts.get('user') is not None:
# Proxy has auth, create extension to add to driver
self.opts.add_extension(self._proxy_extension(proxy_parts))
else:
# Use the full proxy address passed in
self.opts.add_argument('--proxy-server={}'.format(proxy))
# Recreate webdriver with new proxy settings
if update_web_driver is True:
self._update() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _header_extension(self, remove_headers=[], add_or_modify_headers={}):
"""Create modheaders extension Source: https://vimmaniac.com/blog/bangal/modify-and-add-custom-headers-in-selenium-chrome-driver/ kwargs: remove_headers (list):
headers name to remove add_or_modify_headers (dict):
ie. {"Header-Name": "Header Value"} return str -> plugin path """ |
import string
import zipfile
plugin_file = 'custom_headers_plugin.zip'
if remove_headers is None:
remove_headers = []
if add_or_modify_headers is None:
add_or_modify_headers = {}
if isinstance(remove_headers, list) is False:
logger.error("remove_headers must be a list")
return None
if isinstance(add_or_modify_headers, dict) is False:
logger.error("add_or_modify_headers must be dict")
return None
# only keeping the unique headers key in remove_headers list
remove_headers = list(set(remove_headers))
manifest_json = """
{
"version": "1.0.0",
"manifest_version": 2,
"name": "Chrome HeaderModV",
"permissions": [
"webRequest",
"tabs",
"unlimitedStorage",
"storage",
"<all_urls>",
"webRequestBlocking"
],
"background": {
"scripts": ["background.js"]
},
"minimum_chrome_version":"22.0.0"
}
"""
background_js = string.Template("""
function callbackFn(details) {
var remove_headers = ${remove_headers};
var add_or_modify_headers = ${add_or_modify_headers};
function inarray(arr, obj) {
return (arr.indexOf(obj) != -1);
}
// remove headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (inarray(remove_headers, details.requestHeaders[i].name)) {
details.requestHeaders.splice(i, 1);
var index = remove_headers.indexOf(5);
remove_headers.splice(index, 1);
}
if (!remove_headers.length) break;
}
// modify headers
for (var i = 0; i < details.requestHeaders.length; ++i) {
if (add_or_modify_headers.hasOwnProperty(details.requestHeaders[i].name)) {
details.requestHeaders[i].value = add_or_modify_headers[details.requestHeaders[i].name];
delete add_or_modify_headers[details.requestHeaders[i].name];
}
}
// add modify
for (var prop in add_or_modify_headers) {
details.requestHeaders.push(
{name: prop, value: add_or_modify_headers[prop]}
);
}
return {requestHeaders: details.requestHeaders};
}
chrome.webRequest.onBeforeSendHeaders.addListener(
callbackFn,
{urls: ["<all_urls>"]},
['blocking', 'requestHeaders']
);
"""
).substitute(remove_headers=remove_headers,
add_or_modify_headers=add_or_modify_headers,
)
with zipfile.ZipFile(plugin_file, 'w') as zp:
zp.writestr("manifest.json", manifest_json)
zp.writestr("background.js", background_js)
return plugin_file |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_template(self, template):
""" Sets template to be used when generating output :param template TEmplate instance :type instance of BasicTemplate """ |
if isinstance(template, templates.BasicTemplate):
self.template = template
else:
raise TypeError('converter#set_template:'
'Template must inherit from BasicTemplate') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename=None):
""" Generates output and saves to given file :param filename File name :type str or unicode """ |
if filename is None:
raise IOError('Converter#save: Undefined filename')
cnt = self.output()
with (open(filename, 'wb+')) as f:
f.write(cnt.encode('utf-8')) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_file(self, filename):
""" Read and adds given file's content to data array that will be used to generate output :param filename File name to add :type str or unicode """ |
with (open(filename, 'rb')) as f:
data = f.read()
# below won't handle the same name files
# in different paths
fname = os.path.basename(filename)
self.files[fname] = base64.b64encode(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output(self):
""" Generates output from data array :returns Pythoned file :rtype str or unicode """ |
if len(self.files) < 1:
raise Exception('Converter#output: No files to convert')
return self.template.render(self.files) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Update the screens contents in every loop. """ |
# this is not really neccesary because the surface is black after initializing
self.corners.fill(BLACK)
self.corners.draw_dot((0, 0), self.colors[0])
self.corners.draw_dot((self.screen.width - 1, 0), self.colors[0])
self.corners.draw_dot((self.screen.width - 1, self.screen.height - 1),
self.colors[0])
self.corners.draw_dot((0, self.screen.height - 1), self.colors[0])
self.lines.fill(BLACK)
self.lines.draw_line((1, 0), (self.lines.width - 1, 0), self.colors[1])
self.lines.draw_line((0, 1), (0, self.lines.height - 1), self.colors[3])
self.lines.draw_line((0, 0), (self.lines.width - 1,
self.lines.height - 1), self.colors[2])
self.rects.fill(BLACK)
self.rects.draw_rect((0, 0), (int(self.rects.width / 2) - 1,
self.rects.height),
self.colors[2], self.colors[3])
self.rects.draw_rect((int(self.rects.width / 2) + 1, 0),
(int(self.rects.width / 2) - 1,
self.rects.height),
self.colors[3], self.colors[2])
self.circle.fill(BLACK)
radius = int(min(self.circle.width, self.circle.height) / 2) - 1
self.circle.draw_circle((int(self.circle.width / 2) - 1,
int(self.circle.height / 2) - 1), radius,
self.colors[4], self.colors[5])
self.filled.fill(self.colors[6]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(self):
""" Send the current screen content to Mate Light. """ |
self.screen.reset()
self.screen.blit(self.corners)
self.screen.blit(self.lines, (1, 1))
self.screen.blit(self.rects, (int(self.screen.width / 2) + 1, 1))
self.screen.blit(self.circle, (0, int(self.screen.height / 2) + 1))
self.screen.blit(self.filled, (int(self.screen.width / 2) + 1,
int(self.screen.height / 2) + 1))
self.screen.update()
self.clock.tick() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle_events(self):
""" Loop through all events. """ |
for event in pymlgame.get_events():
if event.type == E_NEWCTLR:
#print(datetime.now(), '### new player connected with uid', event.uid)
self.players[event.uid] = {'name': 'alien_{}'.format(event.uid), 'score': 0}
elif event.type == E_DISCONNECT:
#print(datetime.now(), '### player with uid {} disconnected'.format(event.uid))
self.players.pop(event.uid)
elif event.type == E_KEYDOWN:
#print(datetime.now(), '###', self.players[event.uid]['name'], 'pressed', event.button)
self.colors.append(self.colors.pop(0))
elif event.type == E_KEYUP:
#print(datetime.now(), '###', self.players[event.uid]['name'], 'released', event.button)
self.colors.append(self.colors.pop(0))
elif event.type == E_PING:
#print(datetime.now(), '### ping from', self.players[event.uid]['name'])
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gameloop(self):
""" A game loop that circles through the methods. """ |
try:
while True:
self.handle_events()
self.update()
self.render()
except KeyboardInterrupt:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, string):
"""The write method for a CalcpkgOutput object- print the string""" |
if ("" == string or '\n' == string or '\r' == string):
return
# Filter out any \r newlines.
string = string.replace("\r", "")
# if '\r\n' in string:
# string = util.replaceNewlines(string, '\r\n')
if self.printData:
print >> sys.__stdout__, string
if self.logData:
self.logWrite(string) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def logWrite(self, string):
"""Only write text to the log file, do not print""" |
logFile = open(self.logFile, 'at')
logFile.write(string + '\n')
logFile.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setupLogFile(self):
"""Set up the logging file for a new session- include date and some whitespace""" |
self.logWrite("\n###############################################")
self.logWrite("calcpkg.py log from " + str(datetime.datetime.now()))
self.changeLogging(True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getLoggingLocation(self):
"""Return the path for the calcpkg.log file - at the moment, only use a Linux path since I don't know where Windows thinks logs should go.""" |
if sys.platform == "win32":
modulePath = os.path.realpath(__file__)
modulePath = modulePath[:modulePath.rfind("/")]
return modulePath
else:
return "/tmp"
return "" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def twitter_timeline(screen_name, since_id=None):
""" Return relevant twitter timeline """ |
consumer_key = twitter_credential('consumer_key')
consumer_secret = twitter_credential('consumer_secret')
access_token = twitter_credential('access_token')
access_token_secret = twitter_credential('access_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return get_all_tweets(screen_name, api, since_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def twitter_credential(name):
""" Grab twitter credential from settings """ |
credential_name = 'TWITTER_' + name.upper()
if hasattr(settings, credential_name):
return getattr(settings, credential_name)
else:
raise AttributeError('Missing twitter credential in settings: ' + credential_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def modify_class(original_class, modifier_class, override=True):
""" Adds class methods from modifier_class to original_class. If override is True existing methods in original_class are overriden by those provided by modifier_class. """ |
# get members to add
modifier_methods = inspect.getmembers(modifier_class, inspect.ismethod)
# set methods
for method_tuple in modifier_methods:
name = method_tuple[0]
method = method_tuple[1]
if isinstance(method, types.UnboundMethodType):
if hasattr(original_class, name) and not override:
return None
else:
setattr(original_class, name, method.im_func) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def resize_image(image, tuple_wh, preserve_aspect=True):
"""Resizes an instance of a PIL Image. In order to prevent un-intended side effects, this function always returns a copy of the image, as the resize function from PIL returns a copy but the thumbnail function does not. Args: image: An instance of a PIL Image. tuple_wh: A tuple containing the (width, height) for resizing. preserve_aspect: A boolean that determines whether or not the resizing should preserve the image's aspect ratio. Returns: A resized copy of the provided PIL image. """ |
if preserve_aspect:
img_cpy = image.copy()
img_cpy.thumbnail(tuple_wh)
return img_cpy
else:
return image.resize(tuple_wh) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def format_datetime(date):
""" Convert datetime to UTC ISO 8601 """ |
# todo: test me
if date.utcoffset() is None:
return date.isoformat() + 'Z'
utc_offset_sec = date.utcoffset()
utc_date = date - utc_offset_sec
utc_date_without_offset = utc_date.replace(tzinfo=None)
return utc_date_without_offset.isoformat() + 'Z' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(self, msg, error=False):
"""Log message helper.""" |
output = self.stdout
if error:
output = self.stderr
output.write(msg)
output.write('\n') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_emails(self, email_filename, index=0):
"""Generator function that parse and extract emails from the file `email_filename` starting from the position `index`. Yield: An instance of `mailbox.mboxMessage` for each email in the file. """ |
self.log("Parsing email dump: %s." % email_filename)
mbox = mailbox.mbox(email_filename, factory=CustomMessage)
# Get each email from mbox file
#
# The following implementation was used because the object
# mbox does not support slicing. Converting the object to a
# tuple (as represented in the code down here) was a valid
# option but its performance was too poor.
#
# for message in tuple(mbox)[index:]:
# yield message
#
key = index
while key in mbox:
key += 1
yield key-1, mbox[key-1] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_emails(self, mailinglist_dir, all, exclude_lists):
"""Generator function that get the emails from each mailing list dump dirctory. If `all` is set to True all the emails in the mbox will be imported if not it will just resume from the last message previously imported. The lists set in `exclude_lists` won't be imported. Yield: A tuple in the form: (mailing list name, email message). """ |
self.log("Getting emails dumps from: %s" % mailinglist_dir)
# Get the list of directories ending with .mbox
mailing_lists_mboxes = (mbox for mbox in os.listdir(mailinglist_dir)
if mbox.endswith('.mbox'))
# Get messages from each mbox
for mbox in mailing_lists_mboxes:
mbox_path = os.path.join(mailinglist_dir, mbox, mbox)
mailinglist_name = mbox.split('.')[0]
# Check if the mailinglist is set not to be imported
if exclude_lists and mailinglist_name in exclude_lists:
continue
# Find the index of the last imported message
if all:
n_msgs = 0
else:
try:
mailinglist = MailingList.objects.get(
name=mailinglist_name
)
n_msgs = mailinglist.last_imported_index
except MailingList.DoesNotExist:
n_msgs = 0
for index, msg in self.parse_emails(mbox_path, n_msgs):
yield mailinglist_name, msg, index |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_thread(self, email, mailinglist):
"""Group messages by thread looking for similar subjects""" |
subject_slug = slugify(email.subject_clean)
thread = self.THREAD_CACHE.get(subject_slug, {}).get(mailinglist.id)
if thread is None:
thread = Thread.all_objects.get_or_create(
mailinglist=mailinglist,
subject_token=subject_slug
)[0]
if self.THREAD_CACHE.get(subject_slug) is None:
self.THREAD_CACHE[subject_slug] = dict()
self.THREAD_CACHE[subject_slug][mailinglist.id] = thread
thread.latest_message = email
thread.update_keywords()
thread.save()
return thread |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_email(self, list_name, email_msg, index):
"""Save email message into the database.""" |
msg_id = email_msg.get('Message-ID')
if not msg_id:
return
# Update last imported message into the DB
mailinglist, created = MailingList.objects.get_or_create(
name=list_name
)
mailinglist.last_imported_index = index
if created:
# if the mailinglist is newly created it's sure that the message
# is not in the DB yet.
self.create_email(mailinglist, email_msg)
else:
# If the message is already at the database don't do anything
try:
Message.all_objects.get(
message_id=msg_id,
thread__mailinglist=mailinglist
)
except Message.DoesNotExist:
self.create_email(mailinglist, email_msg)
mailinglist.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def import_emails(self, archives_path, all, exclude_lists=None):
"""Get emails from the filesystem from the `archives_path` and store them into the database. If `all` is set to True all the filesystem storage will be imported otherwise the importation will resume from the last message previously imported. The lists set in `exclude_lists` won't be imported. """ |
count = 0
email_generator = self.get_emails(archives_path, all, exclude_lists)
for mailinglist_name, msg, index in email_generator:
try:
self.save_email(mailinglist_name, msg, index)
except:
# This anti-pattern is needed to avoid the transations to
# get stuck in case of errors.
transaction.rollback()
raise
count += 1
if count % 1000 == 0:
transaction.commit()
transaction.commit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def handle(self, *args, **options):
"""Main command method.""" |
# Already running, so quit
if os.path.exists(self.lock_file):
self.log(("This script is already running. "
"(If your are sure it's not please "
"delete the lock file in {}')").format(self.lock_file))
sys.exit(0)
if not os.path.exists(os.path.dirname(self.lock_file)):
os.mkdir(os.path.dirname(self.lock_file), 0755)
archives_path = options.get('archives_path')
self.log('Using archives_path `%s`' % settings.SUPER_ARCHIVES_PATH)
if not os.path.exists(archives_path):
msg = 'archives_path ({}) does not exist'.format(archives_path)
raise CommandError(msg)
run_lock = file(self.lock_file, 'w')
run_lock.close()
try:
self.import_emails(
archives_path,
options.get('all'),
options.get('exclude_lists'),
)
except Exception as e:
logging.exception(e)
raise
finally:
os.remove(self.lock_file)
for mlist in MailingList.objects.all():
mlist.update_privacy()
mlist.save() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_managers(sender, **kwargs):
""" Make sure all classes have the appropriate managers """ |
cls = sender
if issubclass(cls, ModelBase):
cls.add_to_class('permitted', PermittedManager()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def vote_total(self):
""" Calculates vote total as total_upvotes - total_downvotes. We are adding a method here instead of relying on django-secretballot's addition since that doesn't work for subclasses. """ |
modelbase_obj = self.modelbase_obj
return modelbase_obj.votes.filter(vote=+1).count() - modelbase_obj.votes.filter(vote=-1).count() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def comment_count(self):
""" Counts total number of comments on ModelBase object. Comments should always be recorded on ModelBase objects. """ |
# Get the comment model.
comment_model = comments.get_model()
modelbase_content_type = ContentType.objects.get(app_label="panya", model="modelbase")
# Create a qs filtered for the ModelBase or content_type objects.
qs = comment_model.objects.filter(
content_type__in = [self.content_type, modelbase_content_type],
object_pk = smart_unicode(self.pk),
site__pk = settings.SITE_ID,
)
# The is_public and is_removed fields are implementation details of the
# built-in comment model's spam filtering system, so they might not
# be present on a custom comment model subclass. If they exist, we
# should filter on them.
field_names = [f.name for f in comment_model._meta.fields]
if 'is_public' in field_names:
qs = qs.filter(is_public=True)
if getattr(settings, 'COMMENTS_HIDE_REMOVED', True) and 'is_removed' in field_names:
qs = qs.filter(is_removed=False)
# Return ammount of items in qs.
return qs.count() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sync(collector):
"""Sync an environment""" |
amazon = collector.configuration['amazon']
aws_syncr = collector.configuration['aws_syncr']
# Convert everything before we try and sync anything
log.info("Converting configuration")
converted = {}
for thing in collector.configuration["__registered__"]:
if thing in collector.configuration:
converted[thing] = collector.configuration[thing]
# Do the sync
for typ in collector.configuration["__registered__"]:
if typ in converted:
thing = converted[typ]
if not aws_syncr.artifact or aws_syncr.artifact == typ:
log.info("Syncing {0}".format(typ))
for name, item in thing.items.items():
thing.sync_one(aws_syncr, amazon, item)
if not amazon.changes:
log.info("No changes were made!!") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deploy_lambda(collector):
"""Deploy a lambda function""" |
amazon = collector.configuration['amazon']
aws_syncr = collector.configuration['aws_syncr']
find_lambda_function(aws_syncr, collector.configuration).deploy(aws_syncr, amazon) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deploy_gateway(collector):
"""Deploy the apigateway to a particular stage""" |
configuration = collector.configuration
aws_syncr = configuration['aws_syncr']
aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration)
gateway.deploy(aws_syncr, amazon, stage)
if not configuration['amazon'].changes:
log.info("No changes were made!!") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.