repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pricingassistant/mrq | mrq/job.py | Job.kill | def kill(self, block=False, reason="unknown"):
""" Forcefully kill all greenlets associated with this job """
current_greenletid = id(gevent.getcurrent())
trace = "Job killed: %s" % reason
for greenlet, job in context._GLOBAL_CONTEXT["greenlets"].values():
greenletid = id(greenlet)
if job and job.id == self.id and greenletid != current_greenletid:
greenlet.kill(block=block)
trace += "\n\n--- Greenlet %s ---\n" % greenletid
trace += "".join(traceback.format_stack(greenlet.gr_frame))
context._GLOBAL_CONTEXT["greenlets"].pop(greenletid, None)
if reason == "timeout" and self.data["status"] != "timeout":
updates = {
"exceptiontype": "TimeoutInterrupt",
"traceback": trace
}
self._save_status("timeout", updates=updates, exception=False) | python | def kill(self, block=False, reason="unknown"):
""" Forcefully kill all greenlets associated with this job """
current_greenletid = id(gevent.getcurrent())
trace = "Job killed: %s" % reason
for greenlet, job in context._GLOBAL_CONTEXT["greenlets"].values():
greenletid = id(greenlet)
if job and job.id == self.id and greenletid != current_greenletid:
greenlet.kill(block=block)
trace += "\n\n--- Greenlet %s ---\n" % greenletid
trace += "".join(traceback.format_stack(greenlet.gr_frame))
context._GLOBAL_CONTEXT["greenlets"].pop(greenletid, None)
if reason == "timeout" and self.data["status"] != "timeout":
updates = {
"exceptiontype": "TimeoutInterrupt",
"traceback": trace
}
self._save_status("timeout", updates=updates, exception=False) | [
"def",
"kill",
"(",
"self",
",",
"block",
"=",
"False",
",",
"reason",
"=",
"\"unknown\"",
")",
":",
"current_greenletid",
"=",
"id",
"(",
"gevent",
".",
"getcurrent",
"(",
")",
")",
"trace",
"=",
"\"Job killed: %s\"",
"%",
"reason",
"for",
"greenlet",
"... | Forcefully kill all greenlets associated with this job | [
"Forcefully",
"kill",
"all",
"greenlets",
"associated",
"with",
"this",
"job"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L371-L390 | train | 207,800 |
pricingassistant/mrq | mrq/job.py | Job._save_traceback_history | def _save_traceback_history(self, status, trace, job_exc):
""" Create traceback history or add a new traceback to history. """
failure_date = datetime.datetime.utcnow()
new_history = {
"date": failure_date,
"status": status,
"exceptiontype": job_exc.__name__
}
traces = trace.split("---- Original exception: -----")
if len(traces) > 1:
new_history["original_traceback"] = traces[1]
worker = context.get_current_worker()
if worker:
new_history["worker"] = worker.id
new_history["traceback"] = traces[0]
self.collection.update({
"_id": self.id
}, {"$push": {"traceback_history": new_history}}) | python | def _save_traceback_history(self, status, trace, job_exc):
""" Create traceback history or add a new traceback to history. """
failure_date = datetime.datetime.utcnow()
new_history = {
"date": failure_date,
"status": status,
"exceptiontype": job_exc.__name__
}
traces = trace.split("---- Original exception: -----")
if len(traces) > 1:
new_history["original_traceback"] = traces[1]
worker = context.get_current_worker()
if worker:
new_history["worker"] = worker.id
new_history["traceback"] = traces[0]
self.collection.update({
"_id": self.id
}, {"$push": {"traceback_history": new_history}}) | [
"def",
"_save_traceback_history",
"(",
"self",
",",
"status",
",",
"trace",
",",
"job_exc",
")",
":",
"failure_date",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"new_history",
"=",
"{",
"\"date\"",
":",
"failure_date",
",",
"\"status\"",
":",... | Create traceback history or add a new traceback to history. | [
"Create",
"traceback",
"history",
"or",
"add",
"a",
"new",
"traceback",
"to",
"history",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L409-L428 | train | 207,801 |
pricingassistant/mrq | mrq/job.py | Job.trace_memory_clean_caches | def trace_memory_clean_caches(self):
""" Avoid polluting results with some builtin python caches """
urllib.parse.clear_cache()
re.purge()
linecache.clearcache()
copyreg.clear_extension_cache()
if hasattr(fnmatch, "purge"):
fnmatch.purge() # pylint: disable=no-member
elif hasattr(fnmatch, "_purge"):
fnmatch._purge() # pylint: disable=no-member
if hasattr(encodings, "_cache") and len(encodings._cache) > 0:
encodings._cache = {}
for handler in context.log.handlers:
handler.flush() | python | def trace_memory_clean_caches(self):
""" Avoid polluting results with some builtin python caches """
urllib.parse.clear_cache()
re.purge()
linecache.clearcache()
copyreg.clear_extension_cache()
if hasattr(fnmatch, "purge"):
fnmatch.purge() # pylint: disable=no-member
elif hasattr(fnmatch, "_purge"):
fnmatch._purge() # pylint: disable=no-member
if hasattr(encodings, "_cache") and len(encodings._cache) > 0:
encodings._cache = {}
for handler in context.log.handlers:
handler.flush() | [
"def",
"trace_memory_clean_caches",
"(",
"self",
")",
":",
"urllib",
".",
"parse",
".",
"clear_cache",
"(",
")",
"re",
".",
"purge",
"(",
")",
"linecache",
".",
"clearcache",
"(",
")",
"copyreg",
".",
"clear_extension_cache",
"(",
")",
"if",
"hasattr",
"("... | Avoid polluting results with some builtin python caches | [
"Avoid",
"polluting",
"results",
"with",
"some",
"builtin",
"python",
"caches"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L585-L602 | train | 207,802 |
pricingassistant/mrq | mrq/job.py | Job.trace_memory_start | def trace_memory_start(self):
""" Starts measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
gc.collect()
self._memory_start = self.worker.get_memory()["total"] | python | def trace_memory_start(self):
""" Starts measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
gc.collect()
self._memory_start = self.worker.get_memory()["total"] | [
"def",
"trace_memory_start",
"(",
"self",
")",
":",
"self",
".",
"trace_memory_clean_caches",
"(",
")",
"objgraph",
".",
"show_growth",
"(",
"limit",
"=",
"30",
")",
"gc",
".",
"collect",
"(",
")",
"self",
".",
"_memory_start",
"=",
"self",
".",
"worker",
... | Starts measuring memory consumption | [
"Starts",
"measuring",
"memory",
"consumption"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L604-L612 | train | 207,803 |
pricingassistant/mrq | mrq/job.py | Job.trace_memory_stop | def trace_memory_stop(self):
""" Stops measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
trace_type = context.get_current_config()["trace_memory_type"]
if trace_type:
filename = '%s/%s-%s.png' % (
context.get_current_config()["trace_memory_output_dir"],
trace_type,
self.id)
chain = objgraph.find_backref_chain(
random.choice(
objgraph.by_type(trace_type)
),
objgraph.is_proper_module
)
objgraph.show_chain(chain, filename=filename)
del filename
del chain
gc.collect()
self._memory_stop = self.worker.get_memory()["total"]
diff = self._memory_stop - self._memory_start
context.log.debug("Memory diff for job %s : %s" % (self.id, diff))
# We need to update it later than the results, we need them off memory
# already.
self.collection.update(
{"_id": self.id},
{"$set": {
"memory_diff": diff
}},
w=1
) | python | def trace_memory_stop(self):
""" Stops measuring memory consumption """
self.trace_memory_clean_caches()
objgraph.show_growth(limit=30)
trace_type = context.get_current_config()["trace_memory_type"]
if trace_type:
filename = '%s/%s-%s.png' % (
context.get_current_config()["trace_memory_output_dir"],
trace_type,
self.id)
chain = objgraph.find_backref_chain(
random.choice(
objgraph.by_type(trace_type)
),
objgraph.is_proper_module
)
objgraph.show_chain(chain, filename=filename)
del filename
del chain
gc.collect()
self._memory_stop = self.worker.get_memory()["total"]
diff = self._memory_stop - self._memory_start
context.log.debug("Memory diff for job %s : %s" % (self.id, diff))
# We need to update it later than the results, we need them off memory
# already.
self.collection.update(
{"_id": self.id},
{"$set": {
"memory_diff": diff
}},
w=1
) | [
"def",
"trace_memory_stop",
"(",
"self",
")",
":",
"self",
".",
"trace_memory_clean_caches",
"(",
")",
"objgraph",
".",
"show_growth",
"(",
"limit",
"=",
"30",
")",
"trace_type",
"=",
"context",
".",
"get_current_config",
"(",
")",
"[",
"\"trace_memory_type\"",
... | Stops measuring memory consumption | [
"Stops",
"measuring",
"memory",
"consumption"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L614-L654 | train | 207,804 |
pricingassistant/mrq | mrq/queue_regular.py | QueueRegular.list_job_ids | def list_job_ids(self, skip=0, limit=20):
""" Returns a list of job ids on a queue """
return [str(x["_id"]) for x in self.collection.find(
{"status": "queued"},
sort=[("_id", -1 if self.is_reverse else 1)],
projection={"_id": 1})
] | python | def list_job_ids(self, skip=0, limit=20):
""" Returns a list of job ids on a queue """
return [str(x["_id"]) for x in self.collection.find(
{"status": "queued"},
sort=[("_id", -1 if self.is_reverse else 1)],
projection={"_id": 1})
] | [
"def",
"list_job_ids",
"(",
"self",
",",
"skip",
"=",
"0",
",",
"limit",
"=",
"20",
")",
":",
"return",
"[",
"str",
"(",
"x",
"[",
"\"_id\"",
"]",
")",
"for",
"x",
"in",
"self",
".",
"collection",
".",
"find",
"(",
"{",
"\"status\"",
":",
"\"queu... | Returns a list of job ids on a queue | [
"Returns",
"a",
"list",
"of",
"job",
"ids",
"on",
"a",
"queue"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_regular.py#L64-L71 | train | 207,805 |
pricingassistant/mrq | mrq/queue_regular.py | QueueRegular.dequeue_jobs | def dequeue_jobs(self, max_jobs=1, job_class=None, worker=None):
""" Fetch a maximum of max_jobs from this queue """
if job_class is None:
from .job import Job
job_class = Job
count = 0
# TODO: remove _id sort after full migration to datequeued
sort_order = [("datequeued", -1 if self.is_reverse else 1), ("_id", -1 if self.is_reverse else 1)]
# MongoDB optimization: with many jobs it's faster to fetch the IDs first and do the atomic update second
# Some jobs may have been stolen by another worker in the meantime but it's a balance (should we over-fetch?)
# job_ids = None
# if max_jobs > 5:
# job_ids = [x["_id"] for x in self.collection.find(
# self.base_dequeue_query,
# limit=max_jobs,
# sort=sort_order,
# projection={"_id": 1}
# )]
# if len(job_ids) == 0:
# return
for i in range(max_jobs): # if job_ids is None else len(job_ids)):
# if job_ids is not None:
# query = {
# "status": "queued",
# "_id": job_ids[i]
# }
# sort_order = None
# else:
query = self.base_dequeue_query
job_data = self.collection.find_one_and_update(
query,
{"$set": {
"status": "started",
"datestarted": datetime.datetime.utcnow(),
"worker": worker.id if worker else None
}, "$unset": {
"dateexpires": 1 # we don't want started jobs to expire unexpectedly
}},
sort=sort_order,
return_document=ReturnDocument.AFTER,
projection={
"_id": 1,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
"queue": 1,
"datequeued": 1
}
)
if not job_data:
break
if worker:
worker.status = "spawn"
count += 1
context.metric("queues.%s.dequeued" % job_data["queue"], 1)
job = job_class(job_data["_id"], queue=self.id, start=False)
job.set_data(job_data)
job.datestarted = datetime.datetime.utcnow()
context.metric("jobs.status.started")
yield job
context.metric("queues.all.dequeued", count) | python | def dequeue_jobs(self, max_jobs=1, job_class=None, worker=None):
""" Fetch a maximum of max_jobs from this queue """
if job_class is None:
from .job import Job
job_class = Job
count = 0
# TODO: remove _id sort after full migration to datequeued
sort_order = [("datequeued", -1 if self.is_reverse else 1), ("_id", -1 if self.is_reverse else 1)]
# MongoDB optimization: with many jobs it's faster to fetch the IDs first and do the atomic update second
# Some jobs may have been stolen by another worker in the meantime but it's a balance (should we over-fetch?)
# job_ids = None
# if max_jobs > 5:
# job_ids = [x["_id"] for x in self.collection.find(
# self.base_dequeue_query,
# limit=max_jobs,
# sort=sort_order,
# projection={"_id": 1}
# )]
# if len(job_ids) == 0:
# return
for i in range(max_jobs): # if job_ids is None else len(job_ids)):
# if job_ids is not None:
# query = {
# "status": "queued",
# "_id": job_ids[i]
# }
# sort_order = None
# else:
query = self.base_dequeue_query
job_data = self.collection.find_one_and_update(
query,
{"$set": {
"status": "started",
"datestarted": datetime.datetime.utcnow(),
"worker": worker.id if worker else None
}, "$unset": {
"dateexpires": 1 # we don't want started jobs to expire unexpectedly
}},
sort=sort_order,
return_document=ReturnDocument.AFTER,
projection={
"_id": 1,
"path": 1,
"params": 1,
"status": 1,
"retry_count": 1,
"queue": 1,
"datequeued": 1
}
)
if not job_data:
break
if worker:
worker.status = "spawn"
count += 1
context.metric("queues.%s.dequeued" % job_data["queue"], 1)
job = job_class(job_data["_id"], queue=self.id, start=False)
job.set_data(job_data)
job.datestarted = datetime.datetime.utcnow()
context.metric("jobs.status.started")
yield job
context.metric("queues.all.dequeued", count) | [
"def",
"dequeue_jobs",
"(",
"self",
",",
"max_jobs",
"=",
"1",
",",
"job_class",
"=",
"None",
",",
"worker",
"=",
"None",
")",
":",
"if",
"job_class",
"is",
"None",
":",
"from",
".",
"job",
"import",
"Job",
"job_class",
"=",
"Job",
"count",
"=",
"0",... | Fetch a maximum of max_jobs from this queue | [
"Fetch",
"a",
"maximum",
"of",
"max_jobs",
"from",
"this",
"queue"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_regular.py#L73-L151 | train | 207,806 |
pricingassistant/mrq | mrq/dashboard/utils.py | jsonify | def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId
"""
return Response(
json.dumps(
dict(
*args,
**kwargs),
cls=MongoJSONEncoder),
mimetype='application/json') | python | def jsonify(*args, **kwargs):
""" jsonify with support for MongoDB ObjectId
"""
return Response(
json.dumps(
dict(
*args,
**kwargs),
cls=MongoJSONEncoder),
mimetype='application/json') | [
"def",
"jsonify",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"Response",
"(",
"json",
".",
"dumps",
"(",
"dict",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
",",
"cls",
"=",
"MongoJSONEncoder",
")",
",",
"mimetype",
"=",
"... | jsonify with support for MongoDB ObjectId | [
"jsonify",
"with",
"support",
"for",
"MongoDB",
"ObjectId"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/dashboard/utils.py#L9-L18 | train | 207,807 |
pricingassistant/mrq | mrq/agent.py | Agent.queuestats | def queuestats(self):
""" Compute ETAs for every known queue & subqueue """
start_time = time.time()
log.debug("Starting queue stats...")
# Fetch all known queues
queues = [Queue(q) for q in Queue.all_known()]
new_queues = {queue.id for queue in queues}
old_queues = set(self.queue_etas.keys())
for deleted_queue in old_queues.difference(new_queues):
self.queue_etas.pop(deleted_queue)
t = time.time()
stats = {}
for queue in queues:
cnt = queue.count_jobs_to_dequeue()
eta = self.queue_etas[queue.id].next(cnt, t=t)
# Number of jobs to dequeue, ETA, Time of stats
stats[queue.id] = "%d %s %d" % (cnt, eta if eta is not None else "N", int(t))
with connections.redis.pipeline(transaction=True) as pipe:
if random.randint(0, 100) == 0 or len(stats) == 0:
pipe.delete(self.redis_queuestats_key)
if len(stats) > 0:
pipe.hmset(self.redis_queuestats_key, stats)
pipe.execute()
log.debug("... done queue stats in %0.4fs" % (time.time() - start_time)) | python | def queuestats(self):
""" Compute ETAs for every known queue & subqueue """
start_time = time.time()
log.debug("Starting queue stats...")
# Fetch all known queues
queues = [Queue(q) for q in Queue.all_known()]
new_queues = {queue.id for queue in queues}
old_queues = set(self.queue_etas.keys())
for deleted_queue in old_queues.difference(new_queues):
self.queue_etas.pop(deleted_queue)
t = time.time()
stats = {}
for queue in queues:
cnt = queue.count_jobs_to_dequeue()
eta = self.queue_etas[queue.id].next(cnt, t=t)
# Number of jobs to dequeue, ETA, Time of stats
stats[queue.id] = "%d %s %d" % (cnt, eta if eta is not None else "N", int(t))
with connections.redis.pipeline(transaction=True) as pipe:
if random.randint(0, 100) == 0 or len(stats) == 0:
pipe.delete(self.redis_queuestats_key)
if len(stats) > 0:
pipe.hmset(self.redis_queuestats_key, stats)
pipe.execute()
log.debug("... done queue stats in %0.4fs" % (time.time() - start_time)) | [
"def",
"queuestats",
"(",
"self",
")",
":",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"log",
".",
"debug",
"(",
"\"Starting queue stats...\"",
")",
"# Fetch all known queues",
"queues",
"=",
"[",
"Queue",
"(",
"q",
")",
"for",
"q",
"in",
"Queue",
... | Compute ETAs for every known queue & subqueue | [
"Compute",
"ETAs",
"for",
"every",
"known",
"queue",
"&",
"subqueue"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/agent.py#L159-L191 | train | 207,808 |
pricingassistant/mrq | mrq/subpool.py | subpool_map | def subpool_map(pool_size, func, iterable):
""" Starts a Gevent pool and run a map. Takes care of setting current_job and cleaning up. """
from .context import get_current_job, set_current_job, log
if not pool_size:
return [func(*args) for args in iterable]
counter = itertools_count()
current_job = get_current_job()
def inner_func(*args):
""" As each call to 'func' will be done in a random greenlet of the subpool, we need to
register their IDs with set_current_job() to make get_current_job() calls work properly
inside 'func'.
"""
next(counter)
if current_job:
set_current_job(current_job)
try:
ret = func(*args)
except Exception as exc:
trace = traceback.format_exc()
exc.subpool_traceback = trace
raise
if current_job:
set_current_job(None)
return ret
def inner_iterable():
""" This will be called inside the pool's main greenlet, which ID also needs to be registered """
if current_job:
set_current_job(current_job)
for x in iterable:
yield x
if current_job:
set_current_job(None)
start_time = time.time()
pool = gevent.pool.Pool(size=pool_size)
ret = pool.map(inner_func, inner_iterable())
pool.join(raise_error=True)
total_time = time.time() - start_time
log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
return ret | python | def subpool_map(pool_size, func, iterable):
""" Starts a Gevent pool and run a map. Takes care of setting current_job and cleaning up. """
from .context import get_current_job, set_current_job, log
if not pool_size:
return [func(*args) for args in iterable]
counter = itertools_count()
current_job = get_current_job()
def inner_func(*args):
""" As each call to 'func' will be done in a random greenlet of the subpool, we need to
register their IDs with set_current_job() to make get_current_job() calls work properly
inside 'func'.
"""
next(counter)
if current_job:
set_current_job(current_job)
try:
ret = func(*args)
except Exception as exc:
trace = traceback.format_exc()
exc.subpool_traceback = trace
raise
if current_job:
set_current_job(None)
return ret
def inner_iterable():
""" This will be called inside the pool's main greenlet, which ID also needs to be registered """
if current_job:
set_current_job(current_job)
for x in iterable:
yield x
if current_job:
set_current_job(None)
start_time = time.time()
pool = gevent.pool.Pool(size=pool_size)
ret = pool.map(inner_func, inner_iterable())
pool.join(raise_error=True)
total_time = time.time() - start_time
log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time))
return ret | [
"def",
"subpool_map",
"(",
"pool_size",
",",
"func",
",",
"iterable",
")",
":",
"from",
".",
"context",
"import",
"get_current_job",
",",
"set_current_job",
",",
"log",
"if",
"not",
"pool_size",
":",
"return",
"[",
"func",
"(",
"*",
"args",
")",
"for",
"... | Starts a Gevent pool and run a map. Takes care of setting current_job and cleaning up. | [
"Starts",
"a",
"Gevent",
"pool",
"and",
"run",
"a",
"map",
".",
"Takes",
"care",
"of",
"setting",
"current_job",
"and",
"cleaning",
"up",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/subpool.py#L7-L58 | train | 207,809 |
pricingassistant/mrq | mrq/subpool.py | subpool_imap | def subpool_imap(pool_size, func, iterable, flatten=False, unordered=False, buffer_size=None):
""" Generator version of subpool_map. Should be used with unordered=True for optimal performance """
from .context import get_current_job, set_current_job, log
if not pool_size:
for args in iterable:
yield func(*args)
counter = itertools_count()
current_job = get_current_job()
def inner_func(*args):
""" As each call to 'func' will be done in a random greenlet of the subpool, we need to
register their IDs with set_current_job() to make get_current_job() calls work properly
inside 'func'.
"""
next(counter)
if current_job:
set_current_job(current_job)
try:
ret = func(*args)
except Exception as exc:
trace = traceback.format_exc()
exc.subpool_traceback = trace
raise
if current_job:
set_current_job(None)
return ret
def inner_iterable():
""" This will be called inside the pool's main greenlet, which ID also needs to be registered """
if current_job:
set_current_job(current_job)
for x in iterable:
yield x
if current_job:
set_current_job(None)
start_time = time.time()
pool = gevent.pool.Pool(size=pool_size)
if unordered:
iterator = pool.imap_unordered(inner_func, inner_iterable(), maxsize=buffer_size or pool_size)
else:
iterator = pool.imap(inner_func, inner_iterable())
for x in iterator:
if flatten:
for y in x:
yield y
else:
yield x
pool.join(raise_error=True)
total_time = time.time() - start_time
log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time)) | python | def subpool_imap(pool_size, func, iterable, flatten=False, unordered=False, buffer_size=None):
""" Generator version of subpool_map. Should be used with unordered=True for optimal performance """
from .context import get_current_job, set_current_job, log
if not pool_size:
for args in iterable:
yield func(*args)
counter = itertools_count()
current_job = get_current_job()
def inner_func(*args):
""" As each call to 'func' will be done in a random greenlet of the subpool, we need to
register their IDs with set_current_job() to make get_current_job() calls work properly
inside 'func'.
"""
next(counter)
if current_job:
set_current_job(current_job)
try:
ret = func(*args)
except Exception as exc:
trace = traceback.format_exc()
exc.subpool_traceback = trace
raise
if current_job:
set_current_job(None)
return ret
def inner_iterable():
""" This will be called inside the pool's main greenlet, which ID also needs to be registered """
if current_job:
set_current_job(current_job)
for x in iterable:
yield x
if current_job:
set_current_job(None)
start_time = time.time()
pool = gevent.pool.Pool(size=pool_size)
if unordered:
iterator = pool.imap_unordered(inner_func, inner_iterable(), maxsize=buffer_size or pool_size)
else:
iterator = pool.imap(inner_func, inner_iterable())
for x in iterator:
if flatten:
for y in x:
yield y
else:
yield x
pool.join(raise_error=True)
total_time = time.time() - start_time
log.debug("SubPool ran %s greenlets in %0.6fs" % (counter, total_time)) | [
"def",
"subpool_imap",
"(",
"pool_size",
",",
"func",
",",
"iterable",
",",
"flatten",
"=",
"False",
",",
"unordered",
"=",
"False",
",",
"buffer_size",
"=",
"None",
")",
":",
"from",
".",
"context",
"import",
"get_current_job",
",",
"set_current_job",
",",
... | Generator version of subpool_map. Should be used with unordered=True for optimal performance | [
"Generator",
"version",
"of",
"subpool_map",
".",
"Should",
"be",
"used",
"with",
"unordered",
"=",
"True",
"for",
"optimal",
"performance"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/subpool.py#L61-L123 | train | 207,810 |
pricingassistant/mrq | mrq/scheduler.py | _hash_task | def _hash_task(task):
""" Returns a unique hash for identify a task and its params """
params = task.get("params")
if params:
params = json.dumps(sorted(list(task["params"].items()), key=lambda x: x[0])) # pylint: disable=no-member
full = [str(task.get(x)) for x in ["path", "interval", "dailytime", "weekday", "monthday", "queue"]]
full.extend([str(params)])
return " ".join(full) | python | def _hash_task(task):
""" Returns a unique hash for identify a task and its params """
params = task.get("params")
if params:
params = json.dumps(sorted(list(task["params"].items()), key=lambda x: x[0])) # pylint: disable=no-member
full = [str(task.get(x)) for x in ["path", "interval", "dailytime", "weekday", "monthday", "queue"]]
full.extend([str(params)])
return " ".join(full) | [
"def",
"_hash_task",
"(",
"task",
")",
":",
"params",
"=",
"task",
".",
"get",
"(",
"\"params\"",
")",
"if",
"params",
":",
"params",
"=",
"json",
".",
"dumps",
"(",
"sorted",
"(",
"list",
"(",
"task",
"[",
"\"params\"",
"]",
".",
"items",
"(",
")"... | Returns a unique hash for identify a task and its params | [
"Returns",
"a",
"unique",
"hash",
"for",
"identify",
"a",
"task",
"and",
"its",
"params"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/scheduler.py#L8-L18 | train | 207,811 |
pricingassistant/mrq | mrq/scheduler.py | Scheduler.check_config_integrity | def check_config_integrity(self):
""" Make sure the scheduler config is valid """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
if len(tasks_by_hash) != len(self.config_tasks):
raise Exception("Fatal error: there was a hash duplicate in the scheduled tasks config.")
for h, task in tasks_by_hash.items():
if task.get("monthday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h)
if task.get("weekday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h)
if not task.get("monthday") and not task.get("weekday") and not task.get("dailytime") and not task.get("interval"):
raise Exception("Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h) | python | def check_config_integrity(self):
""" Make sure the scheduler config is valid """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
if len(tasks_by_hash) != len(self.config_tasks):
raise Exception("Fatal error: there was a hash duplicate in the scheduled tasks config.")
for h, task in tasks_by_hash.items():
if task.get("monthday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'monthday' and without 'dailytime' (%s)" % h)
if task.get("weekday") and not task.get("dailytime"):
raise Exception("Fatal error: you can't schedule a task with 'weekday' and without 'dailytime' (%s)" % h)
if not task.get("monthday") and not task.get("weekday") and not task.get("dailytime") and not task.get("interval"):
raise Exception("Fatal error: scheduler must be specified one of monthday,weekday,dailytime,interval. (%s)" % h) | [
"def",
"check_config_integrity",
"(",
"self",
")",
":",
"tasks_by_hash",
"=",
"{",
"_hash_task",
"(",
"t",
")",
":",
"t",
"for",
"t",
"in",
"self",
".",
"config_tasks",
"}",
"if",
"len",
"(",
"tasks_by_hash",
")",
"!=",
"len",
"(",
"self",
".",
"config... | Make sure the scheduler config is valid | [
"Make",
"sure",
"the",
"scheduler",
"config",
"is",
"valid"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/scheduler.py#L29-L43 | train | 207,812 |
pricingassistant/mrq | mrq/scheduler.py | Scheduler.sync_config_tasks | def sync_config_tasks(self):
""" Performs the first sync of a list of tasks, often defined in the config file. """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
for task in self.all_tasks:
if tasks_by_hash.get(task["hash"]):
del tasks_by_hash[task["hash"]]
else:
self.collection.remove({"_id": task["_id"]})
log.debug("Scheduler: deleted %s" % task["hash"])
# What remains are the new ones to be inserted
for h, task in tasks_by_hash.items():
task["hash"] = h
task["datelastqueued"] = datetime.datetime.fromtimestamp(0)
if task.get("dailytime"):
# Because MongoDB can store datetimes but not times,
# we add today's date to the dailytime.
# The date part will be discarded in check()
task["dailytime"] = datetime.datetime.combine(
datetime.datetime.utcnow(), task["dailytime"])
task["interval"] = 3600 * 24
# Avoid to queue task in check() if today dailytime is already passed
if datetime.datetime.utcnow().time() > task["dailytime"].time():
task["datelastqueued"] = datetime.datetime.utcnow()
self.collection.find_one_and_update({"hash": task["hash"]}, {"$set": task}, upsert=True)
log.debug("Scheduler: added %s" % task["hash"]) | python | def sync_config_tasks(self):
""" Performs the first sync of a list of tasks, often defined in the config file. """
tasks_by_hash = {_hash_task(t): t for t in self.config_tasks}
for task in self.all_tasks:
if tasks_by_hash.get(task["hash"]):
del tasks_by_hash[task["hash"]]
else:
self.collection.remove({"_id": task["_id"]})
log.debug("Scheduler: deleted %s" % task["hash"])
# What remains are the new ones to be inserted
for h, task in tasks_by_hash.items():
task["hash"] = h
task["datelastqueued"] = datetime.datetime.fromtimestamp(0)
if task.get("dailytime"):
# Because MongoDB can store datetimes but not times,
# we add today's date to the dailytime.
# The date part will be discarded in check()
task["dailytime"] = datetime.datetime.combine(
datetime.datetime.utcnow(), task["dailytime"])
task["interval"] = 3600 * 24
# Avoid to queue task in check() if today dailytime is already passed
if datetime.datetime.utcnow().time() > task["dailytime"].time():
task["datelastqueued"] = datetime.datetime.utcnow()
self.collection.find_one_and_update({"hash": task["hash"]}, {"$set": task}, upsert=True)
log.debug("Scheduler: added %s" % task["hash"]) | [
"def",
"sync_config_tasks",
"(",
"self",
")",
":",
"tasks_by_hash",
"=",
"{",
"_hash_task",
"(",
"t",
")",
":",
"t",
"for",
"t",
"in",
"self",
".",
"config_tasks",
"}",
"for",
"task",
"in",
"self",
".",
"all_tasks",
":",
"if",
"tasks_by_hash",
".",
"ge... | Performs the first sync of a list of tasks, often defined in the config file. | [
"Performs",
"the",
"first",
"sync",
"of",
"a",
"list",
"of",
"tasks",
"often",
"defined",
"in",
"the",
"config",
"file",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/scheduler.py#L45-L74 | train | 207,813 |
pricingassistant/mrq | mrq/helpers.py | ratelimit | def ratelimit(key, limit, per=1, redis=None):
""" Returns an integer with the number of available actions for the
current period in seconds. If zero, rate was already reached. """
if redis is None:
redis = connections.redis
# http://redis.io/commands/INCR
now = int(time.time())
k = "ratelimit:%s:%s" % (key, now // per)
with redis.pipeline(transaction=True) as pipeline:
pipeline.incr(k, 1)
pipeline.expire(k, per + 10)
value = pipeline.execute()
current = int(value[0]) - 1
if current >= limit:
return 0
else:
return limit - current | python | def ratelimit(key, limit, per=1, redis=None):
""" Returns an integer with the number of available actions for the
current period in seconds. If zero, rate was already reached. """
if redis is None:
redis = connections.redis
# http://redis.io/commands/INCR
now = int(time.time())
k = "ratelimit:%s:%s" % (key, now // per)
with redis.pipeline(transaction=True) as pipeline:
pipeline.incr(k, 1)
pipeline.expire(k, per + 10)
value = pipeline.execute()
current = int(value[0]) - 1
if current >= limit:
return 0
else:
return limit - current | [
"def",
"ratelimit",
"(",
"key",
",",
"limit",
",",
"per",
"=",
"1",
",",
"redis",
"=",
"None",
")",
":",
"if",
"redis",
"is",
"None",
":",
"redis",
"=",
"connections",
".",
"redis",
"# http://redis.io/commands/INCR",
"now",
"=",
"int",
"(",
"time",
"."... | Returns an integer with the number of available actions for the
current period in seconds. If zero, rate was already reached. | [
"Returns",
"an",
"integer",
"with",
"the",
"number",
"of",
"available",
"actions",
"for",
"the",
"current",
"period",
"in",
"seconds",
".",
"If",
"zero",
"rate",
"was",
"already",
"reached",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/helpers.py#L6-L28 | train | 207,814 |
pricingassistant/mrq | mrq/worker.py | Worker.greenlet_logs | def greenlet_logs(self):
""" This greenlet always runs in background to update current
logs in MongoDB every 10 seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead?
"""
while True:
try:
self.flush_logs()
except Exception as e: # pylint: disable=broad-except
self.log.error("When flushing logs: %s" % e)
finally:
time.sleep(self.config["report_interval"]) | python | def greenlet_logs(self):
""" This greenlet always runs in background to update current
logs in MongoDB every 10 seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead?
"""
while True:
try:
self.flush_logs()
except Exception as e: # pylint: disable=broad-except
self.log.error("When flushing logs: %s" % e)
finally:
time.sleep(self.config["report_interval"]) | [
"def",
"greenlet_logs",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"self",
".",
"flush_logs",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"self",
".",
"log",
".",
"error",
"(",
"\"When flushing logs: %s\"",
... | This greenlet always runs in background to update current
logs in MongoDB every 10 seconds.
Caution: it might get delayed when doing long blocking operations.
Should we do this in a thread instead? | [
"This",
"greenlet",
"always",
"runs",
"in",
"background",
"to",
"update",
"current",
"logs",
"in",
"MongoDB",
"every",
"10",
"seconds",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L168-L182 | train | 207,815 |
pricingassistant/mrq | mrq/worker.py | Worker.refresh_queues | def refresh_queues(self, fatal=False):
""" Updates the list of currently known queues and subqueues """
try:
queues = []
prefixes = [q for q in self.config["queues"] if q.endswith("/")]
known_subqueues = Queue.all_known(prefixes=prefixes)
for q in self.config["queues"]:
queues.append(Queue(q))
if q.endswith("/"):
for subqueue in known_subqueues:
if subqueue.startswith(q):
queues.append(Queue(subqueue))
self.queues = queues
except Exception as e: # pylint: disable=broad-except
self.log.error("When refreshing subqueues: %s", e)
if fatal:
raise | python | def refresh_queues(self, fatal=False):
""" Updates the list of currently known queues and subqueues """
try:
queues = []
prefixes = [q for q in self.config["queues"] if q.endswith("/")]
known_subqueues = Queue.all_known(prefixes=prefixes)
for q in self.config["queues"]:
queues.append(Queue(q))
if q.endswith("/"):
for subqueue in known_subqueues:
if subqueue.startswith(q):
queues.append(Queue(subqueue))
self.queues = queues
except Exception as e: # pylint: disable=broad-except
self.log.error("When refreshing subqueues: %s", e)
if fatal:
raise | [
"def",
"refresh_queues",
"(",
"self",
",",
"fatal",
"=",
"False",
")",
":",
"try",
":",
"queues",
"=",
"[",
"]",
"prefixes",
"=",
"[",
"q",
"for",
"q",
"in",
"self",
".",
"config",
"[",
"\"queues\"",
"]",
"if",
"q",
".",
"endswith",
"(",
"\"/\"",
... | Updates the list of currently known queues and subqueues | [
"Updates",
"the",
"list",
"of",
"currently",
"known",
"queues",
"and",
"subqueues"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L190-L210 | train | 207,816 |
pricingassistant/mrq | mrq/worker.py | Worker.get_worker_report | def get_worker_report(self, with_memory=False):
""" Returns a dict containing all the data we can about the current status of the worker and
its jobs. """
greenlets = []
for greenlet in list(self.gevent_pool):
g = {}
short_stack = []
stack = traceback.format_stack(greenlet.gr_frame)
for s in stack[1:]:
if "/gevent/hub.py" in s:
break
short_stack.append(s)
g["stack"] = short_stack
job = get_current_job(id(greenlet))
if job:
job.save()
if job.data:
g["path"] = job.data["path"]
g["datestarted"] = job.datestarted
g["id"] = str(job.id)
g["time"] = getattr(greenlet, "_trace_time", 0)
g["switches"] = getattr(greenlet, "_trace_switches", None)
# pylint: disable=protected-access
if job._current_io is not None:
g["io"] = job._current_io
greenlets.append(g)
# When faking network latency, all sockets are affected, including OS ones, but
# we still want reliable reports so this is disabled.
if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]):
cpu = {
"user": 0,
"system": 0,
"percent": 0
}
mem = {"rss": 0, "swap": 0, "total": 0}
else:
cpu_times = self.process.cpu_times()
cpu = {
"user": cpu_times.user,
"system": cpu_times.system,
"percent": self.process.cpu_percent(0)
}
mem = self.get_memory()
# Avoid sharing passwords or sensitive config!
whitelisted_config = [
"max_jobs",
"max_memory"
"greenlets",
"processes",
"queues",
"dequeue_strategy",
"scheduler",
"name",
"local_ip",
"external_ip",
"agent_id",
"worker_group"
]
io = None
if self._traced_io:
io = {}
for k, v in iteritems(self._traced_io):
if k == "total":
io[k] = v
else:
io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1])
used_pool_slots = len(self.gevent_pool)
used_avg = self.pool_usage_average.next(used_pool_slots)
return {
"status": self.status,
"config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config},
"done_jobs": self.done_jobs,
"usage_avg": used_avg / self.pool_size,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"name": self.name,
"io": io,
"_id": str(self.id),
"process": {
"pid": self.process.pid,
"cpu": cpu,
"mem": mem
# https://code.google.com/p/psutil/wiki/Documentation
# get_open_files
# get_connections
# get_num_ctx_switches
# get_num_fds
# get_io_counters
# get_nice
},
"jobs": greenlets
} | python | def get_worker_report(self, with_memory=False):
""" Returns a dict containing all the data we can about the current status of the worker and
its jobs. """
greenlets = []
for greenlet in list(self.gevent_pool):
g = {}
short_stack = []
stack = traceback.format_stack(greenlet.gr_frame)
for s in stack[1:]:
if "/gevent/hub.py" in s:
break
short_stack.append(s)
g["stack"] = short_stack
job = get_current_job(id(greenlet))
if job:
job.save()
if job.data:
g["path"] = job.data["path"]
g["datestarted"] = job.datestarted
g["id"] = str(job.id)
g["time"] = getattr(greenlet, "_trace_time", 0)
g["switches"] = getattr(greenlet, "_trace_switches", None)
# pylint: disable=protected-access
if job._current_io is not None:
g["io"] = job._current_io
greenlets.append(g)
# When faking network latency, all sockets are affected, including OS ones, but
# we still want reliable reports so this is disabled.
if (not with_memory) or (self.config["add_network_latency"] != "0" and self.config["add_network_latency"]):
cpu = {
"user": 0,
"system": 0,
"percent": 0
}
mem = {"rss": 0, "swap": 0, "total": 0}
else:
cpu_times = self.process.cpu_times()
cpu = {
"user": cpu_times.user,
"system": cpu_times.system,
"percent": self.process.cpu_percent(0)
}
mem = self.get_memory()
# Avoid sharing passwords or sensitive config!
whitelisted_config = [
"max_jobs",
"max_memory"
"greenlets",
"processes",
"queues",
"dequeue_strategy",
"scheduler",
"name",
"local_ip",
"external_ip",
"agent_id",
"worker_group"
]
io = None
if self._traced_io:
io = {}
for k, v in iteritems(self._traced_io):
if k == "total":
io[k] = v
else:
io[k] = sorted(list(v.items()), reverse=True, key=lambda x: x[1])
used_pool_slots = len(self.gevent_pool)
used_avg = self.pool_usage_average.next(used_pool_slots)
return {
"status": self.status,
"config": {k: v for k, v in iteritems(self.config) if k in whitelisted_config},
"done_jobs": self.done_jobs,
"usage_avg": used_avg / self.pool_size,
"datestarted": self.datestarted,
"datereported": datetime.datetime.utcnow(),
"name": self.name,
"io": io,
"_id": str(self.id),
"process": {
"pid": self.process.pid,
"cpu": cpu,
"mem": mem
# https://code.google.com/p/psutil/wiki/Documentation
# get_open_files
# get_connections
# get_num_ctx_switches
# get_num_fds
# get_io_counters
# get_nice
},
"jobs": greenlets
} | [
"def",
"get_worker_report",
"(",
"self",
",",
"with_memory",
"=",
"False",
")",
":",
"greenlets",
"=",
"[",
"]",
"for",
"greenlet",
"in",
"list",
"(",
"self",
".",
"gevent_pool",
")",
":",
"g",
"=",
"{",
"}",
"short_stack",
"=",
"[",
"]",
"stack",
"=... | Returns a dict containing all the data we can about the current status of the worker and
its jobs. | [
"Returns",
"a",
"dict",
"containing",
"all",
"the",
"data",
"we",
"can",
"about",
"the",
"current",
"status",
"of",
"the",
"worker",
"and",
"its",
"jobs",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L233-L333 | train | 207,817 |
pricingassistant/mrq | mrq/worker.py | Worker.greenlet_timeouts | def greenlet_timeouts(self):
""" This greenlet kills jobs in other greenlets if they timeout.
"""
while True:
now = datetime.datetime.utcnow()
for greenlet in list(self.gevent_pool):
job = get_current_job(id(greenlet))
if job and job.timeout and job.datestarted:
expires = job.datestarted + datetime.timedelta(seconds=job.timeout)
if now > expires:
job.kill(block=False, reason="timeout")
time.sleep(1) | python | def greenlet_timeouts(self):
""" This greenlet kills jobs in other greenlets if they timeout.
"""
while True:
now = datetime.datetime.utcnow()
for greenlet in list(self.gevent_pool):
job = get_current_job(id(greenlet))
if job and job.timeout and job.datestarted:
expires = job.datestarted + datetime.timedelta(seconds=job.timeout)
if now > expires:
job.kill(block=False, reason="timeout")
time.sleep(1) | [
"def",
"greenlet_timeouts",
"(",
"self",
")",
":",
"while",
"True",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"for",
"greenlet",
"in",
"list",
"(",
"self",
".",
"gevent_pool",
")",
":",
"job",
"=",
"get_current_job",
"(",
"... | This greenlet kills jobs in other greenlets if they timeout. | [
"This",
"greenlet",
"kills",
"jobs",
"in",
"other",
"greenlets",
"if",
"they",
"timeout",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L358-L371 | train | 207,818 |
pricingassistant/mrq | mrq/worker.py | Worker.wait_for_idle | def wait_for_idle(self):
""" Waits until the worker has nothing more to do. Very useful in tests """
# Be mindful that this is being executed in a different greenlet than the work_* methods.
while True:
time.sleep(0.01)
with self.work_lock:
if self.status != "wait":
continue
if len(self.gevent_pool) > 0:
continue
# Force a refresh of the current subqueues, one might just have been created.
self.refresh_queues()
# We might be dequeueing a new subqueue. Double check that we don't have anything more to do
outcome, dequeue_jobs = self.work_once(free_pool_slots=1, max_jobs=None)
if outcome is "wait" and dequeue_jobs == 0:
break | python | def wait_for_idle(self):
""" Waits until the worker has nothing more to do. Very useful in tests """
# Be mindful that this is being executed in a different greenlet than the work_* methods.
while True:
time.sleep(0.01)
with self.work_lock:
if self.status != "wait":
continue
if len(self.gevent_pool) > 0:
continue
# Force a refresh of the current subqueues, one might just have been created.
self.refresh_queues()
# We might be dequeueing a new subqueue. Double check that we don't have anything more to do
outcome, dequeue_jobs = self.work_once(free_pool_slots=1, max_jobs=None)
if outcome is "wait" and dequeue_jobs == 0:
break | [
"def",
"wait_for_idle",
"(",
"self",
")",
":",
"# Be mindful that this is being executed in a different greenlet than the work_* methods.",
"while",
"True",
":",
"time",
".",
"sleep",
"(",
"0.01",
")",
"with",
"self",
".",
"work_lock",
":",
"if",
"self",
".",
"status"... | Waits until the worker has nothing more to do. Very useful in tests | [
"Waits",
"until",
"the",
"worker",
"has",
"nothing",
"more",
"to",
"do",
".",
"Very",
"useful",
"in",
"tests"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L417-L441 | train | 207,819 |
pricingassistant/mrq | mrq/worker.py | Worker.work_once | def work_once(self, free_pool_slots=1, max_jobs=None):
""" Does one lookup for new jobs, inside the inner work loop """
dequeued_jobs = 0
available_queues = [
queue for queue in self.queues
if queue.root_id not in self.paused_queues and
queue.id not in self.paused_queues
]
for queue_i in range(len(available_queues)):
queue = available_queues[(queue_i + self.queue_offset) % len(available_queues)]
max_jobs_per_queue = free_pool_slots - dequeued_jobs
if max_jobs_per_queue <= 0:
queue_i -= 1
break
if self.config["dequeue_strategy"] == "parallel":
max_jobs_per_queue = max(1, int(max_jobs_per_queue / (len(available_queues) - queue_i)))
for job in queue.dequeue_jobs(
max_jobs=max_jobs_per_queue,
job_class=self.job_class,
worker=self
):
dequeued_jobs += 1
self.gevent_pool.spawn(self.perform_job, job)
# At the next pass, start at the next queue to avoid always dequeuing the same one
if self.config["dequeue_strategy"] == "parallel":
self.queue_offset = (self.queue_offset + queue_i + 1) % len(self.queues)
# TODO consider this when dequeuing jobs to have strict limits
if max_jobs and self.done_jobs >= max_jobs:
self.log.info("Reached max_jobs=%s" % self.done_jobs)
return "break", dequeued_jobs
# We seem to have exhausted available jobs, we can sleep for a
# while.
if dequeued_jobs == 0:
if self.config["dequeue_strategy"] == "burst":
self.log.info("Burst mode: stopping now because queues were empty")
return "break", dequeued_jobs
return "wait", dequeued_jobs
return None, dequeued_jobs | python | def work_once(self, free_pool_slots=1, max_jobs=None):
""" Does one lookup for new jobs, inside the inner work loop """
dequeued_jobs = 0
available_queues = [
queue for queue in self.queues
if queue.root_id not in self.paused_queues and
queue.id not in self.paused_queues
]
for queue_i in range(len(available_queues)):
queue = available_queues[(queue_i + self.queue_offset) % len(available_queues)]
max_jobs_per_queue = free_pool_slots - dequeued_jobs
if max_jobs_per_queue <= 0:
queue_i -= 1
break
if self.config["dequeue_strategy"] == "parallel":
max_jobs_per_queue = max(1, int(max_jobs_per_queue / (len(available_queues) - queue_i)))
for job in queue.dequeue_jobs(
max_jobs=max_jobs_per_queue,
job_class=self.job_class,
worker=self
):
dequeued_jobs += 1
self.gevent_pool.spawn(self.perform_job, job)
# At the next pass, start at the next queue to avoid always dequeuing the same one
if self.config["dequeue_strategy"] == "parallel":
self.queue_offset = (self.queue_offset + queue_i + 1) % len(self.queues)
# TODO consider this when dequeuing jobs to have strict limits
if max_jobs and self.done_jobs >= max_jobs:
self.log.info("Reached max_jobs=%s" % self.done_jobs)
return "break", dequeued_jobs
# We seem to have exhausted available jobs, we can sleep for a
# while.
if dequeued_jobs == 0:
if self.config["dequeue_strategy"] == "burst":
self.log.info("Burst mode: stopping now because queues were empty")
return "break", dequeued_jobs
return "wait", dequeued_jobs
return None, dequeued_jobs | [
"def",
"work_once",
"(",
"self",
",",
"free_pool_slots",
"=",
"1",
",",
"max_jobs",
"=",
"None",
")",
":",
"dequeued_jobs",
"=",
"0",
"available_queues",
"=",
"[",
"queue",
"for",
"queue",
"in",
"self",
".",
"queues",
"if",
"queue",
".",
"root_id",
"not"... | Does one lookup for new jobs, inside the inner work loop | [
"Does",
"one",
"lookup",
"for",
"new",
"jobs",
"inside",
"the",
"inner",
"work",
"loop"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L566-L618 | train | 207,820 |
pricingassistant/mrq | mrq/worker.py | Worker.work_wait | def work_wait(self):
""" Wait for new jobs to arrive """
if len(self.queues_with_notify) > 0:
# https://github.com/antirez/redis/issues/874
connections.redis.blpop(*(self.queues_with_notify + [max(1, int(self.config["max_latency"]))]))
else:
gevent.sleep(self.config["max_latency"]) | python | def work_wait(self):
""" Wait for new jobs to arrive """
if len(self.queues_with_notify) > 0:
# https://github.com/antirez/redis/issues/874
connections.redis.blpop(*(self.queues_with_notify + [max(1, int(self.config["max_latency"]))]))
else:
gevent.sleep(self.config["max_latency"]) | [
"def",
"work_wait",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"queues_with_notify",
")",
">",
"0",
":",
"# https://github.com/antirez/redis/issues/874",
"connections",
".",
"redis",
".",
"blpop",
"(",
"*",
"(",
"self",
".",
"queues_with_notify",
"+... | Wait for new jobs to arrive | [
"Wait",
"for",
"new",
"jobs",
"to",
"arrive"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/worker.py#L620-L627 | train | 207,821 |
pricingassistant/mrq | mrq/queue.py | Queue.serialize_job_ids | def serialize_job_ids(self, job_ids):
""" Returns job_ids serialized for storage in Redis """
if len(job_ids) == 0 or self.use_large_ids:
return job_ids
elif isinstance(job_ids[0], ObjectId):
return [x.binary for x in job_ids]
else:
return [bytes.fromhex(str(x)) for x in job_ids] | python | def serialize_job_ids(self, job_ids):
""" Returns job_ids serialized for storage in Redis """
if len(job_ids) == 0 or self.use_large_ids:
return job_ids
elif isinstance(job_ids[0], ObjectId):
return [x.binary for x in job_ids]
else:
return [bytes.fromhex(str(x)) for x in job_ids] | [
"def",
"serialize_job_ids",
"(",
"self",
",",
"job_ids",
")",
":",
"if",
"len",
"(",
"job_ids",
")",
"==",
"0",
"or",
"self",
".",
"use_large_ids",
":",
"return",
"job_ids",
"elif",
"isinstance",
"(",
"job_ids",
"[",
"0",
"]",
",",
"ObjectId",
")",
":"... | Returns job_ids serialized for storage in Redis | [
"Returns",
"job_ids",
"serialized",
"for",
"storage",
"in",
"Redis"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L99-L106 | train | 207,822 |
pricingassistant/mrq | mrq/queue.py | Queue.unserialize_job_ids | def unserialize_job_ids(self, job_ids):
""" Unserialize job_ids stored in Redis """
if len(job_ids) == 0 or self.use_large_ids:
return job_ids
else:
return [binascii.hexlify(x.encode('utf-8') if (PY3 and isinstance(x, str)) else x).decode('ascii')
for x in job_ids] | python | def unserialize_job_ids(self, job_ids):
""" Unserialize job_ids stored in Redis """
if len(job_ids) == 0 or self.use_large_ids:
return job_ids
else:
return [binascii.hexlify(x.encode('utf-8') if (PY3 and isinstance(x, str)) else x).decode('ascii')
for x in job_ids] | [
"def",
"unserialize_job_ids",
"(",
"self",
",",
"job_ids",
")",
":",
"if",
"len",
"(",
"job_ids",
")",
"==",
"0",
"or",
"self",
".",
"use_large_ids",
":",
"return",
"job_ids",
"else",
":",
"return",
"[",
"binascii",
".",
"hexlify",
"(",
"x",
".",
"enco... | Unserialize job_ids stored in Redis | [
"Unserialize",
"job_ids",
"stored",
"in",
"Redis"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L108-L114 | train | 207,823 |
pricingassistant/mrq | mrq/queue.py | Queue.all_active | def all_active(cls):
""" List active queues, based on their lengths in Redis. Warning, uses the unscalable KEYS redis command """
prefix = context.get_current_config()["redis_prefix"]
queues = []
for key in context.connections.redis.keys():
if key.startswith(prefix):
queues.append(Queue(key[len(prefix) + 3:]))
return queues | python | def all_active(cls):
""" List active queues, based on their lengths in Redis. Warning, uses the unscalable KEYS redis command """
prefix = context.get_current_config()["redis_prefix"]
queues = []
for key in context.connections.redis.keys():
if key.startswith(prefix):
queues.append(Queue(key[len(prefix) + 3:]))
return queues | [
"def",
"all_active",
"(",
"cls",
")",
":",
"prefix",
"=",
"context",
".",
"get_current_config",
"(",
")",
"[",
"\"redis_prefix\"",
"]",
"queues",
"=",
"[",
"]",
"for",
"key",
"in",
"context",
".",
"connections",
".",
"redis",
".",
"keys",
"(",
")",
":"... | List active queues, based on their lengths in Redis. Warning, uses the unscalable KEYS redis command | [
"List",
"active",
"queues",
"based",
"on",
"their",
"lengths",
"in",
"Redis",
".",
"Warning",
"uses",
"the",
"unscalable",
"KEYS",
"redis",
"command"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L152-L161 | train | 207,824 |
pricingassistant/mrq | mrq/queue.py | Queue.all_known | def all_known(cls, sources=None, prefixes=None):
""" List all currently known queues """
sources = sources or ("config", "jobs", "raw_subqueues")
queues = set()
if "config" in sources and not prefixes:
# Some queues are explicitly declared in the config (including all root raw queues)
cfg = context.get_current_config()
queues_from_config = [
t.get("queue")
for t in (cfg.get("tasks") or {}).values()
if t.get("queue")
]
queues_from_config += Queue.get_queues_config().keys()
queues_from_config += [
t.get("retry_queue")
for t in Queue.get_queues_config().values()
if t.get("retry_queue")
]
queues |= set(queues_from_config)
if "jobs" in sources:
# This will get all queues from mongodb, including those where we have only non-queued jobs
for q in context.connections.mongodb_jobs.mrq_jobs.distinct("queue"):
if prefixes and not any(q.startswith(p) for p in prefixes):
continue
queues.add(q)
if "raw_subqueues" in sources:
for q in Queue.get_queues_config():
if prefixes and not any(q + "/" == p for p in prefixes):
continue
queue_obj = Queue(q)
if queue_obj.is_raw and queue_obj.has_subqueues:
# TODO: optimize this with a single SUNION on all keys
queues |= queue_obj.get_known_subqueues()
return queues | python | def all_known(cls, sources=None, prefixes=None):
""" List all currently known queues """
sources = sources or ("config", "jobs", "raw_subqueues")
queues = set()
if "config" in sources and not prefixes:
# Some queues are explicitly declared in the config (including all root raw queues)
cfg = context.get_current_config()
queues_from_config = [
t.get("queue")
for t in (cfg.get("tasks") or {}).values()
if t.get("queue")
]
queues_from_config += Queue.get_queues_config().keys()
queues_from_config += [
t.get("retry_queue")
for t in Queue.get_queues_config().values()
if t.get("retry_queue")
]
queues |= set(queues_from_config)
if "jobs" in sources:
# This will get all queues from mongodb, including those where we have only non-queued jobs
for q in context.connections.mongodb_jobs.mrq_jobs.distinct("queue"):
if prefixes and not any(q.startswith(p) for p in prefixes):
continue
queues.add(q)
if "raw_subqueues" in sources:
for q in Queue.get_queues_config():
if prefixes and not any(q + "/" == p for p in prefixes):
continue
queue_obj = Queue(q)
if queue_obj.is_raw and queue_obj.has_subqueues:
# TODO: optimize this with a single SUNION on all keys
queues |= queue_obj.get_known_subqueues()
return queues | [
"def",
"all_known",
"(",
"cls",
",",
"sources",
"=",
"None",
",",
"prefixes",
"=",
"None",
")",
":",
"sources",
"=",
"sources",
"or",
"(",
"\"config\"",
",",
"\"jobs\"",
",",
"\"raw_subqueues\"",
")",
"queues",
"=",
"set",
"(",
")",
"if",
"\"config\"",
... | List all currently known queues | [
"List",
"all",
"currently",
"known",
"queues"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L164-L208 | train | 207,825 |
pricingassistant/mrq | mrq/queue.py | Queue.all | def all(cls):
""" List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow. """
# Start with raw queues we know exist from the config
queues = {x: 0 for x in Queue.get_queues_config()}
stats = list(context.connections.mongodb_jobs.mrq_jobs.aggregate([
{"$match": {"status": "queued"}},
{"$group": {"_id": "$queue", "jobs": {"$sum": 1}}}
]))
queues.update({x["_id"]: x["jobs"] for x in stats})
return queues | python | def all(cls):
""" List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow. """
# Start with raw queues we know exist from the config
queues = {x: 0 for x in Queue.get_queues_config()}
stats = list(context.connections.mongodb_jobs.mrq_jobs.aggregate([
{"$match": {"status": "queued"}},
{"$group": {"_id": "$queue", "jobs": {"$sum": 1}}}
]))
queues.update({x["_id"]: x["jobs"] for x in stats})
return queues | [
"def",
"all",
"(",
"cls",
")",
":",
"# Start with raw queues we know exist from the config",
"queues",
"=",
"{",
"x",
":",
"0",
"for",
"x",
"in",
"Queue",
".",
"get_queues_config",
"(",
")",
"}",
"stats",
"=",
"list",
"(",
"context",
".",
"connections",
".",... | List all queues in MongoDB via aggregation, with their queued jobs counts. Might be slow. | [
"List",
"all",
"queues",
"in",
"MongoDB",
"via",
"aggregation",
"with",
"their",
"queued",
"jobs",
"counts",
".",
"Might",
"be",
"slow",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L211-L224 | train | 207,826 |
pricingassistant/mrq | mrq/queue.py | Queue.notify | def notify(self, new_jobs_count):
""" We just queued new_jobs_count jobs on this queue, wake up the workers if needed """
if not self.use_notify():
return
# Not really useful to send more than 100 notifs (to be configured)
count = min(new_jobs_count, 100)
notify_key = redis_key("notify", self)
context.connections.redis.lpush(notify_key, *([1] * count))
context.connections.redis.expire(notify_key, max(1, int(context.get_current_config()["max_latency"] * 2))) | python | def notify(self, new_jobs_count):
""" We just queued new_jobs_count jobs on this queue, wake up the workers if needed """
if not self.use_notify():
return
# Not really useful to send more than 100 notifs (to be configured)
count = min(new_jobs_count, 100)
notify_key = redis_key("notify", self)
context.connections.redis.lpush(notify_key, *([1] * count))
context.connections.redis.expire(notify_key, max(1, int(context.get_current_config()["max_latency"] * 2))) | [
"def",
"notify",
"(",
"self",
",",
"new_jobs_count",
")",
":",
"if",
"not",
"self",
".",
"use_notify",
"(",
")",
":",
"return",
"# Not really useful to send more than 100 notifs (to be configured)",
"count",
"=",
"min",
"(",
"new_jobs_count",
",",
"100",
")",
"not... | We just queued new_jobs_count jobs on this queue, wake up the workers if needed | [
"We",
"just",
"queued",
"new_jobs_count",
"jobs",
"on",
"this",
"queue",
"wake",
"up",
"the",
"workers",
"if",
"needed"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue.py#L230-L242 | train | 207,827 |
pricingassistant/mrq | mrq/queue_raw.py | QueueRaw.empty | def empty(self):
""" Empty a queue. """
with context.connections.redis.pipeline(transaction=True) as pipe:
pipe.delete(self.redis_key)
pipe.delete(self.redis_key_known_subqueues)
pipe.execute() | python | def empty(self):
""" Empty a queue. """
with context.connections.redis.pipeline(transaction=True) as pipe:
pipe.delete(self.redis_key)
pipe.delete(self.redis_key_known_subqueues)
pipe.execute() | [
"def",
"empty",
"(",
"self",
")",
":",
"with",
"context",
".",
"connections",
".",
"redis",
".",
"pipeline",
"(",
"transaction",
"=",
"True",
")",
"as",
"pipe",
":",
"pipe",
".",
"delete",
"(",
"self",
".",
"redis_key",
")",
"pipe",
".",
"delete",
"(... | Empty a queue. | [
"Empty",
"a",
"queue",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L39-L44 | train | 207,828 |
pricingassistant/mrq | mrq/queue_raw.py | QueueRaw.enqueue_raw_jobs | def enqueue_raw_jobs(self, params_list):
""" Add Jobs to this queue with raw parameters. They are not yet in MongoDB. """
if len(params_list) == 0:
return
if self.is_subqueue:
context.connections.redis.sadd(self.redis_key_known_subqueues, self.id)
# ZSET
if self.is_sorted:
if not isinstance(params_list, dict) and self.is_timed:
now = time.time()
params_list = {x: now for x in params_list}
context.connections.redis.zadd(self.redis_key, **params_list)
# SET
elif self.is_set:
context.connections.redis.sadd(self.redis_key, *params_list)
# LIST
else:
context.connections.redis.rpush(self.redis_key, *params_list)
context.metric("queues.%s.enqueued" % self.id, len(params_list))
context.metric("queues.all.enqueued", len(params_list)) | python | def enqueue_raw_jobs(self, params_list):
""" Add Jobs to this queue with raw parameters. They are not yet in MongoDB. """
if len(params_list) == 0:
return
if self.is_subqueue:
context.connections.redis.sadd(self.redis_key_known_subqueues, self.id)
# ZSET
if self.is_sorted:
if not isinstance(params_list, dict) and self.is_timed:
now = time.time()
params_list = {x: now for x in params_list}
context.connections.redis.zadd(self.redis_key, **params_list)
# SET
elif self.is_set:
context.connections.redis.sadd(self.redis_key, *params_list)
# LIST
else:
context.connections.redis.rpush(self.redis_key, *params_list)
context.metric("queues.%s.enqueued" % self.id, len(params_list))
context.metric("queues.all.enqueued", len(params_list)) | [
"def",
"enqueue_raw_jobs",
"(",
"self",
",",
"params_list",
")",
":",
"if",
"len",
"(",
"params_list",
")",
"==",
"0",
":",
"return",
"if",
"self",
".",
"is_subqueue",
":",
"context",
".",
"connections",
".",
"redis",
".",
"sadd",
"(",
"self",
".",
"re... | Add Jobs to this queue with raw parameters. They are not yet in MongoDB. | [
"Add",
"Jobs",
"to",
"this",
"queue",
"with",
"raw",
"parameters",
".",
"They",
"are",
"not",
"yet",
"in",
"MongoDB",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L68-L95 | train | 207,829 |
pricingassistant/mrq | mrq/queue_raw.py | QueueRaw.remove_raw_jobs | def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list)) | python | def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list)) | [
"def",
"remove_raw_jobs",
"(",
"self",
",",
"params_list",
")",
":",
"if",
"len",
"(",
"params_list",
")",
"==",
"0",
":",
"return",
"# ZSET",
"if",
"self",
".",
"is_sorted",
":",
"context",
".",
"connections",
".",
"redis",
".",
"zrem",
"(",
"self",
"... | Remove jobs from a raw queue with their raw params. | [
"Remove",
"jobs",
"from",
"a",
"raw",
"queue",
"with",
"their",
"raw",
"params",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L97-L117 | train | 207,830 |
pricingassistant/mrq | mrq/queue_raw.py | QueueRaw.count_jobs_to_dequeue | def count_jobs_to_dequeue(self):
""" Returns the number of jobs that can be dequeued right now from the queue. """
# timed ZSET
if self.is_timed:
return context.connections.redis.zcount(
self.redis_key,
"-inf",
time.time())
# In all other cases, it's the same as .size()
else:
return self.size() | python | def count_jobs_to_dequeue(self):
""" Returns the number of jobs that can be dequeued right now from the queue. """
# timed ZSET
if self.is_timed:
return context.connections.redis.zcount(
self.redis_key,
"-inf",
time.time())
# In all other cases, it's the same as .size()
else:
return self.size() | [
"def",
"count_jobs_to_dequeue",
"(",
"self",
")",
":",
"# timed ZSET",
"if",
"self",
".",
"is_timed",
":",
"return",
"context",
".",
"connections",
".",
"redis",
".",
"zcount",
"(",
"self",
".",
"redis_key",
",",
"\"-inf\"",
",",
"time",
".",
"time",
"(",
... | Returns the number of jobs that can be dequeued right now from the queue. | [
"Returns",
"the",
"number",
"of",
"jobs",
"that",
"can",
"be",
"dequeued",
"right",
"now",
"from",
"the",
"queue",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L147-L159 | train | 207,831 |
pricingassistant/mrq | mrq/queue_raw.py | QueueRaw.get_sorted_graph | def get_sorted_graph(
self,
start=0,
stop=100,
slices=100,
include_inf=False,
exact=False):
""" Returns a graph of the distribution of jobs in a sorted set """
if not self.is_sorted:
raise Exception("Not a sorted queue")
with context.connections.redis.pipeline(transaction=exact) as pipe:
interval = old_div(float(stop - start), slices)
for i in range(0, slices):
pipe.zcount(self.redis_key,
(start + i * interval),
"(%s" % (start + (i + 1) * interval))
if include_inf:
pipe.zcount(self.redis_key, stop, "+inf")
pipe.zcount(self.redis_key, "-inf", "(%s" % start)
data = pipe.execute()
if include_inf:
return data[-1:] + data[:-1]
return data | python | def get_sorted_graph(
self,
start=0,
stop=100,
slices=100,
include_inf=False,
exact=False):
""" Returns a graph of the distribution of jobs in a sorted set """
if not self.is_sorted:
raise Exception("Not a sorted queue")
with context.connections.redis.pipeline(transaction=exact) as pipe:
interval = old_div(float(stop - start), slices)
for i in range(0, slices):
pipe.zcount(self.redis_key,
(start + i * interval),
"(%s" % (start + (i + 1) * interval))
if include_inf:
pipe.zcount(self.redis_key, stop, "+inf")
pipe.zcount(self.redis_key, "-inf", "(%s" % start)
data = pipe.execute()
if include_inf:
return data[-1:] + data[:-1]
return data | [
"def",
"get_sorted_graph",
"(",
"self",
",",
"start",
"=",
"0",
",",
"stop",
"=",
"100",
",",
"slices",
"=",
"100",
",",
"include_inf",
"=",
"False",
",",
"exact",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"is_sorted",
":",
"raise",
"Exceptio... | Returns a graph of the distribution of jobs in a sorted set | [
"Returns",
"a",
"graph",
"of",
"the",
"distribution",
"of",
"jobs",
"in",
"a",
"sorted",
"set"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/queue_raw.py#L239-L265 | train | 207,832 |
pricingassistant/mrq | mrq/processes.py | Process.install_signal_handlers | def install_signal_handlers(self):
""" Handle events like Ctrl-C from the command line. """
self.graceful_stop = False
def request_shutdown_now():
self.shutdown_now()
def request_shutdown_graceful():
# Second time CTRL-C, shutdown now
if self.graceful_stop:
self.shutdown_now()
else:
self.graceful_stop = True
self.shutdown_graceful()
# First time CTRL-C, try to shutdown gracefully
gevent.signal(signal.SIGINT, request_shutdown_graceful)
# User (or Heroku) requests a stop now, just mark tasks as interrupted.
gevent.signal(signal.SIGTERM, request_shutdown_now) | python | def install_signal_handlers(self):
""" Handle events like Ctrl-C from the command line. """
self.graceful_stop = False
def request_shutdown_now():
self.shutdown_now()
def request_shutdown_graceful():
# Second time CTRL-C, shutdown now
if self.graceful_stop:
self.shutdown_now()
else:
self.graceful_stop = True
self.shutdown_graceful()
# First time CTRL-C, try to shutdown gracefully
gevent.signal(signal.SIGINT, request_shutdown_graceful)
# User (or Heroku) requests a stop now, just mark tasks as interrupted.
gevent.signal(signal.SIGTERM, request_shutdown_now) | [
"def",
"install_signal_handlers",
"(",
"self",
")",
":",
"self",
".",
"graceful_stop",
"=",
"False",
"def",
"request_shutdown_now",
"(",
")",
":",
"self",
".",
"shutdown_now",
"(",
")",
"def",
"request_shutdown_graceful",
"(",
")",
":",
"# Second time CTRL-C, shut... | Handle events like Ctrl-C from the command line. | [
"Handle",
"events",
"like",
"Ctrl",
"-",
"C",
"from",
"the",
"command",
"line",
"."
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L22-L43 | train | 207,833 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.set_commands | def set_commands(self, commands, timeout=None):
""" Sets the processes' desired commands for this pool and manages diff to reach that state """
self.desired_commands = commands
target_commands = list(self.desired_commands)
for process in list(self.processes):
found = False
for i in range(len(target_commands)):
if process["command"] == target_commands[i]:
target_commands.pop(i)
found = True
break
if not found:
self.stop_process(process, timeout)
# What is left are the commands to add
# TODO: we should only do this once memory conditions allow
for command in target_commands:
self.spawn(command) | python | def set_commands(self, commands, timeout=None):
""" Sets the processes' desired commands for this pool and manages diff to reach that state """
self.desired_commands = commands
target_commands = list(self.desired_commands)
for process in list(self.processes):
found = False
for i in range(len(target_commands)):
if process["command"] == target_commands[i]:
target_commands.pop(i)
found = True
break
if not found:
self.stop_process(process, timeout)
# What is left are the commands to add
# TODO: we should only do this once memory conditions allow
for command in target_commands:
self.spawn(command) | [
"def",
"set_commands",
"(",
"self",
",",
"commands",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"desired_commands",
"=",
"commands",
"target_commands",
"=",
"list",
"(",
"self",
".",
"desired_commands",
")",
"for",
"process",
"in",
"list",
"(",
"s... | Sets the processes' desired commands for this pool and manages diff to reach that state | [
"Sets",
"the",
"processes",
"desired",
"commands",
"for",
"this",
"pool",
"and",
"manages",
"diff",
"to",
"reach",
"that",
"state"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L57-L76 | train | 207,834 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.spawn | def spawn(self, command):
""" Spawns a new process and adds it to the pool """
# process_name
# output
# time before starting (wait for port?)
# start_new_session=True : avoid sending parent signals to child
env = dict(os.environ)
env["MRQ_IS_SUBPROCESS"] = "1"
env.update(self.extra_env or {})
# Extract env variables from shell commands.
parts = shlex.split(command)
for p in list(parts):
if "=" in p:
env[p.split("=")[0]] = p[len(p.split("=")[0]) + 1:]
parts.pop(0)
else:
break
p = subprocess.Popen(parts, shell=False, close_fds=True, env=env, cwd=os.getcwd())
self.processes.append({
"subprocess": p,
"pid": p.pid,
"command": command,
"psutil": psutil.Process(pid=p.pid)
}) | python | def spawn(self, command):
""" Spawns a new process and adds it to the pool """
# process_name
# output
# time before starting (wait for port?)
# start_new_session=True : avoid sending parent signals to child
env = dict(os.environ)
env["MRQ_IS_SUBPROCESS"] = "1"
env.update(self.extra_env or {})
# Extract env variables from shell commands.
parts = shlex.split(command)
for p in list(parts):
if "=" in p:
env[p.split("=")[0]] = p[len(p.split("=")[0]) + 1:]
parts.pop(0)
else:
break
p = subprocess.Popen(parts, shell=False, close_fds=True, env=env, cwd=os.getcwd())
self.processes.append({
"subprocess": p,
"pid": p.pid,
"command": command,
"psutil": psutil.Process(pid=p.pid)
}) | [
"def",
"spawn",
"(",
"self",
",",
"command",
")",
":",
"# process_name",
"# output",
"# time before starting (wait for port?)",
"# start_new_session=True : avoid sending parent signals to child",
"env",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"env",
"[",
"\"MRQ_IS_SU... | Spawns a new process and adds it to the pool | [
"Spawns",
"a",
"new",
"process",
"and",
"adds",
"it",
"to",
"the",
"pool"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L78-L106 | train | 207,835 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.wait | def wait(self):
""" Waits for the pool to be fully stopped """
while True:
if not self.greenlet_watch:
break
if self.stopping:
gevent.sleep(0.1)
else:
gevent.sleep(1) | python | def wait(self):
""" Waits for the pool to be fully stopped """
while True:
if not self.greenlet_watch:
break
if self.stopping:
gevent.sleep(0.1)
else:
gevent.sleep(1) | [
"def",
"wait",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"not",
"self",
".",
"greenlet_watch",
":",
"break",
"if",
"self",
".",
"stopping",
":",
"gevent",
".",
"sleep",
"(",
"0.1",
")",
"else",
":",
"gevent",
".",
"sleep",
"(",
"1",
")"
] | Waits for the pool to be fully stopped | [
"Waits",
"for",
"the",
"pool",
"to",
"be",
"fully",
"stopped"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L112-L122 | train | 207,836 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.watch_processes | def watch_processes(self):
""" Manages the status of all the known processes """
for process in list(self.processes):
self.watch_process(process)
# Cleanup processes
self.processes = [p for p in self.processes if not p.get("dead")]
if self.stopping and len(self.processes) == 0:
self.stop_watch() | python | def watch_processes(self):
""" Manages the status of all the known processes """
for process in list(self.processes):
self.watch_process(process)
# Cleanup processes
self.processes = [p for p in self.processes if not p.get("dead")]
if self.stopping and len(self.processes) == 0:
self.stop_watch() | [
"def",
"watch_processes",
"(",
"self",
")",
":",
"for",
"process",
"in",
"list",
"(",
"self",
".",
"processes",
")",
":",
"self",
".",
"watch_process",
"(",
"process",
")",
"# Cleanup processes",
"self",
".",
"processes",
"=",
"[",
"p",
"for",
"p",
"in",... | Manages the status of all the known processes | [
"Manages",
"the",
"status",
"of",
"all",
"the",
"known",
"processes"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L130-L140 | train | 207,837 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.watch_process | def watch_process(self, process):
""" Manages the status of a single process """
status = process["psutil"].status()
# TODO: how to avoid zombies?
# print process["pid"], status
if process.get("terminate"):
if status in ("zombie", "dead"):
process["dead"] = True
elif process.get("terminate_at"):
if time.time() > (process["terminate_at"] + 5):
log.warning("Process %s had to be sent SIGKILL" % (process["pid"], ))
process["subprocess"].send_signal(signal.SIGKILL)
elif time.time() > process["terminate_at"]:
log.warning("Process %s had to be sent SIGTERM" % (process["pid"], ))
process["subprocess"].send_signal(signal.SIGTERM)
else:
if status in ("zombie", "dead"):
# Restart a new process right away (TODO: sleep a bit? max retries?)
process["dead"] = True
self.spawn(process["command"])
elif status not in ("running", "sleeping"):
log.warning("Process %s was in status %s" % (process["pid"], status)) | python | def watch_process(self, process):
""" Manages the status of a single process """
status = process["psutil"].status()
# TODO: how to avoid zombies?
# print process["pid"], status
if process.get("terminate"):
if status in ("zombie", "dead"):
process["dead"] = True
elif process.get("terminate_at"):
if time.time() > (process["terminate_at"] + 5):
log.warning("Process %s had to be sent SIGKILL" % (process["pid"], ))
process["subprocess"].send_signal(signal.SIGKILL)
elif time.time() > process["terminate_at"]:
log.warning("Process %s had to be sent SIGTERM" % (process["pid"], ))
process["subprocess"].send_signal(signal.SIGTERM)
else:
if status in ("zombie", "dead"):
# Restart a new process right away (TODO: sleep a bit? max retries?)
process["dead"] = True
self.spawn(process["command"])
elif status not in ("running", "sleeping"):
log.warning("Process %s was in status %s" % (process["pid"], status)) | [
"def",
"watch_process",
"(",
"self",
",",
"process",
")",
":",
"status",
"=",
"process",
"[",
"\"psutil\"",
"]",
".",
"status",
"(",
")",
"# TODO: how to avoid zombies?",
"# print process[\"pid\"], status",
"if",
"process",
".",
"get",
"(",
"\"terminate\"",
")",
... | Manages the status of a single process | [
"Manages",
"the",
"status",
"of",
"a",
"single",
"process"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L142-L168 | train | 207,838 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.stop | def stop(self, timeout=None):
""" Initiates a graceful stop of the processes """
self.stopping = True
for process in list(self.processes):
self.stop_process(process, timeout=timeout) | python | def stop(self, timeout=None):
""" Initiates a graceful stop of the processes """
self.stopping = True
for process in list(self.processes):
self.stop_process(process, timeout=timeout) | [
"def",
"stop",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"stopping",
"=",
"True",
"for",
"process",
"in",
"list",
"(",
"self",
".",
"processes",
")",
":",
"self",
".",
"stop_process",
"(",
"process",
",",
"timeout",
"=",
"timeo... | Initiates a graceful stop of the processes | [
"Initiates",
"a",
"graceful",
"stop",
"of",
"the",
"processes"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L172-L178 | train | 207,839 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.stop_process | def stop_process(self, process, timeout=None):
""" Initiates a graceful stop of one process """
process["terminate"] = True
if timeout is not None:
process["terminate_at"] = time.time() + timeout
process["subprocess"].send_signal(signal.SIGINT) | python | def stop_process(self, process, timeout=None):
""" Initiates a graceful stop of one process """
process["terminate"] = True
if timeout is not None:
process["terminate_at"] = time.time() + timeout
process["subprocess"].send_signal(signal.SIGINT) | [
"def",
"stop_process",
"(",
"self",
",",
"process",
",",
"timeout",
"=",
"None",
")",
":",
"process",
"[",
"\"terminate\"",
"]",
"=",
"True",
"if",
"timeout",
"is",
"not",
"None",
":",
"process",
"[",
"\"terminate_at\"",
"]",
"=",
"time",
".",
"time",
... | Initiates a graceful stop of one process | [
"Initiates",
"a",
"graceful",
"stop",
"of",
"one",
"process"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L180-L186 | train | 207,840 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.terminate | def terminate(self):
""" Terminates the processes right now with a SIGTERM """
for process in list(self.processes):
process["subprocess"].send_signal(signal.SIGTERM)
self.stop_watch() | python | def terminate(self):
""" Terminates the processes right now with a SIGTERM """
for process in list(self.processes):
process["subprocess"].send_signal(signal.SIGTERM)
self.stop_watch() | [
"def",
"terminate",
"(",
"self",
")",
":",
"for",
"process",
"in",
"list",
"(",
"self",
".",
"processes",
")",
":",
"process",
"[",
"\"subprocess\"",
"]",
".",
"send_signal",
"(",
"signal",
".",
"SIGTERM",
")",
"self",
".",
"stop_watch",
"(",
")"
] | Terminates the processes right now with a SIGTERM | [
"Terminates",
"the",
"processes",
"right",
"now",
"with",
"a",
"SIGTERM"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L188-L194 | train | 207,841 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.kill | def kill(self):
""" Kills the processes right now with a SIGKILL """
for process in list(self.processes):
process["subprocess"].send_signal(signal.SIGKILL)
self.stop_watch() | python | def kill(self):
""" Kills the processes right now with a SIGKILL """
for process in list(self.processes):
process["subprocess"].send_signal(signal.SIGKILL)
self.stop_watch() | [
"def",
"kill",
"(",
"self",
")",
":",
"for",
"process",
"in",
"list",
"(",
"self",
".",
"processes",
")",
":",
"process",
"[",
"\"subprocess\"",
"]",
".",
"send_signal",
"(",
"signal",
".",
"SIGKILL",
")",
"self",
".",
"stop_watch",
"(",
")"
] | Kills the processes right now with a SIGKILL | [
"Kills",
"the",
"processes",
"right",
"now",
"with",
"a",
"SIGKILL"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L196-L202 | train | 207,842 |
pricingassistant/mrq | mrq/processes.py | ProcessPool.stop_watch | def stop_watch(self):
""" Stops the periodic watch greenlet, thus the pool itself """
if self.greenlet_watch:
self.greenlet_watch.kill(block=False)
self.greenlet_watch = None | python | def stop_watch(self):
""" Stops the periodic watch greenlet, thus the pool itself """
if self.greenlet_watch:
self.greenlet_watch.kill(block=False)
self.greenlet_watch = None | [
"def",
"stop_watch",
"(",
"self",
")",
":",
"if",
"self",
".",
"greenlet_watch",
":",
"self",
".",
"greenlet_watch",
".",
"kill",
"(",
"block",
"=",
"False",
")",
"self",
".",
"greenlet_watch",
"=",
"None"
] | Stops the periodic watch greenlet, thus the pool itself | [
"Stops",
"the",
"periodic",
"watch",
"greenlet",
"thus",
"the",
"pool",
"itself"
] | d0a5a34de9cba38afa94fb7c9e17f9b570b79a50 | https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/processes.py#L204-L209 | train | 207,843 |
rahiel/telegram-send | telegram_send.py | send | def send(messages=None, conf=None, parse_mode=None, disable_web_page_preview=False, files=None, images=None,
captions=None, locations=None, timeout=30):
"""Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds.
"""
conf = expanduser(conf) if conf else get_config_path()
config = configparser.ConfigParser()
if not config.read(conf) or not config.has_section("telegram"):
raise ConfigError("Config not found")
missing_options = set(["token", "chat_id"]) - set(config.options("telegram"))
if len(missing_options) > 0:
raise ConfigError("Missing options in config: {}".format(", ".join(missing_options)))
token = config.get("telegram", "token")
chat_id = int(config.get("telegram", "chat_id")) if config.get("telegram", "chat_id").isdigit() else config.get("telegram", "chat_id")
request = telegram.utils.request.Request(read_timeout=timeout)
bot = telegram.Bot(token, request=request)
# We let the user specify "text" as a parse mode to be more explicit about
# the lack of formatting applied to the message, but "text" isn't a supported
# parse_mode in python-telegram-bot. Instead, set the parse_mode to None
# in this case.
if parse_mode == "text":
parse_mode = None
if messages:
def send_message(message):
return bot.send_message(chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview)
for m in messages:
if len(m) > MAX_MESSAGE_LENGTH:
warn(markup("Message longer than MAX_MESSAGE_LENGTH=%d, splitting into smaller messages." % MAX_MESSAGE_LENGTH, "red"))
ms = split_message(m, MAX_MESSAGE_LENGTH)
for m in ms:
send_message(m)
elif len(m) == 0:
continue
else:
send_message(m)
if files:
for f in files:
bot.send_document(chat_id=chat_id, document=f)
if images:
if captions:
# make captions equal length when not all images have captions
captions += [None] * (len(images) - len(captions))
for (i, c) in zip(images, captions):
bot.send_photo(chat_id=chat_id, photo=i, caption=c)
else:
for i in images:
bot.send_photo(chat_id=chat_id, photo=i)
if locations:
it = iter(locations)
for loc in it:
if "," in loc:
lat, lon = loc.split(",")
else:
lat = loc
lon = next(it)
bot.send_location(chat_id=chat_id, latitude=float(lat), longitude=float(lon)) | python | def send(messages=None, conf=None, parse_mode=None, disable_web_page_preview=False, files=None, images=None,
captions=None, locations=None, timeout=30):
"""Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds.
"""
conf = expanduser(conf) if conf else get_config_path()
config = configparser.ConfigParser()
if not config.read(conf) or not config.has_section("telegram"):
raise ConfigError("Config not found")
missing_options = set(["token", "chat_id"]) - set(config.options("telegram"))
if len(missing_options) > 0:
raise ConfigError("Missing options in config: {}".format(", ".join(missing_options)))
token = config.get("telegram", "token")
chat_id = int(config.get("telegram", "chat_id")) if config.get("telegram", "chat_id").isdigit() else config.get("telegram", "chat_id")
request = telegram.utils.request.Request(read_timeout=timeout)
bot = telegram.Bot(token, request=request)
# We let the user specify "text" as a parse mode to be more explicit about
# the lack of formatting applied to the message, but "text" isn't a supported
# parse_mode in python-telegram-bot. Instead, set the parse_mode to None
# in this case.
if parse_mode == "text":
parse_mode = None
if messages:
def send_message(message):
return bot.send_message(chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview)
for m in messages:
if len(m) > MAX_MESSAGE_LENGTH:
warn(markup("Message longer than MAX_MESSAGE_LENGTH=%d, splitting into smaller messages." % MAX_MESSAGE_LENGTH, "red"))
ms = split_message(m, MAX_MESSAGE_LENGTH)
for m in ms:
send_message(m)
elif len(m) == 0:
continue
else:
send_message(m)
if files:
for f in files:
bot.send_document(chat_id=chat_id, document=f)
if images:
if captions:
# make captions equal length when not all images have captions
captions += [None] * (len(images) - len(captions))
for (i, c) in zip(images, captions):
bot.send_photo(chat_id=chat_id, photo=i, caption=c)
else:
for i in images:
bot.send_photo(chat_id=chat_id, photo=i)
if locations:
it = iter(locations)
for loc in it:
if "," in loc:
lat, lon = loc.split(",")
else:
lat = loc
lon = next(it)
bot.send_location(chat_id=chat_id, latitude=float(lat), longitude=float(lon)) | [
"def",
"send",
"(",
"messages",
"=",
"None",
",",
"conf",
"=",
"None",
",",
"parse_mode",
"=",
"None",
",",
"disable_web_page_preview",
"=",
"False",
",",
"files",
"=",
"None",
",",
"images",
"=",
"None",
",",
"captions",
"=",
"None",
",",
"locations",
... | Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds. | [
"Send",
"data",
"over",
"Telegram",
".",
"All",
"arguments",
"are",
"optional",
"."
] | 019162232bdc4fc9e986ffcf6e4c5572306c0b82 | https://github.com/rahiel/telegram-send/blob/019162232bdc4fc9e986ffcf6e4c5572306c0b82/telegram_send.py#L135-L226 | train | 207,844 |
rahiel/telegram-send | telegram_send.py | split_message | def split_message(message, max_length):
"""Split large message into smaller messages each smaller than the max_length."""
ms = []
while len(message) > max_length:
ms.append(message[:max_length])
message = message[max_length:]
ms.append(message)
return ms | python | def split_message(message, max_length):
"""Split large message into smaller messages each smaller than the max_length."""
ms = []
while len(message) > max_length:
ms.append(message[:max_length])
message = message[max_length:]
ms.append(message)
return ms | [
"def",
"split_message",
"(",
"message",
",",
"max_length",
")",
":",
"ms",
"=",
"[",
"]",
"while",
"len",
"(",
"message",
")",
">",
"max_length",
":",
"ms",
".",
"append",
"(",
"message",
"[",
":",
"max_length",
"]",
")",
"message",
"=",
"message",
"... | Split large message into smaller messages each smaller than the max_length. | [
"Split",
"large",
"message",
"into",
"smaller",
"messages",
"each",
"smaller",
"than",
"the",
"max_length",
"."
] | 019162232bdc4fc9e986ffcf6e4c5572306c0b82 | https://github.com/rahiel/telegram-send/blob/019162232bdc4fc9e986ffcf6e4c5572306c0b82/telegram_send.py#L431-L438 | train | 207,845 |
JetBrains/teamcity-messages | teamcity/pylint_reporter.py | TeamCityReporter.handle_message | def handle_message(self, msg):
"""Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message
"""
if msg.msg_id not in self.msg_types:
self.report_message_type(msg)
self.msg_types.add(msg.msg_id)
self.tc.message('inspection', typeId=msg.msg_id, message=msg.msg,
file=os.path.relpath(msg.abspath).replace('\\', '/'),
line=str(msg.line),
SEVERITY=TC_SEVERITY.get(msg.category)) | python | def handle_message(self, msg):
"""Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message
"""
if msg.msg_id not in self.msg_types:
self.report_message_type(msg)
self.msg_types.add(msg.msg_id)
self.tc.message('inspection', typeId=msg.msg_id, message=msg.msg,
file=os.path.relpath(msg.abspath).replace('\\', '/'),
line=str(msg.line),
SEVERITY=TC_SEVERITY.get(msg.category)) | [
"def",
"handle_message",
"(",
"self",
",",
"msg",
")",
":",
"if",
"msg",
".",
"msg_id",
"not",
"in",
"self",
".",
"msg_types",
":",
"self",
".",
"report_message_type",
"(",
"msg",
")",
"self",
".",
"msg_types",
".",
"add",
"(",
"msg",
".",
"msg_id",
... | Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message | [
"Issues",
"an",
"inspection",
"service",
"message",
"based",
"on",
"a",
"PyLint",
"message",
".",
"Registers",
"each",
"message",
"type",
"upon",
"first",
"encounter",
"."
] | 44f6d1fde33a48547a8f9fe31814522347a87b39 | https://github.com/JetBrains/teamcity-messages/blob/44f6d1fde33a48547a8f9fe31814522347a87b39/teamcity/pylint_reporter.py#L49-L62 | train | 207,846 |
JetBrains/teamcity-messages | teamcity/pylint_reporter.py | TeamCityReporter.display_reports | def display_reports(self, layout):
"""Issues the final PyLint score as a TeamCity build statistic value"""
try:
score = self.linter.stats['global_note']
except (AttributeError, KeyError):
pass
else:
self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score)) | python | def display_reports(self, layout):
"""Issues the final PyLint score as a TeamCity build statistic value"""
try:
score = self.linter.stats['global_note']
except (AttributeError, KeyError):
pass
else:
self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score)) | [
"def",
"display_reports",
"(",
"self",
",",
"layout",
")",
":",
"try",
":",
"score",
"=",
"self",
".",
"linter",
".",
"stats",
"[",
"'global_note'",
"]",
"except",
"(",
"AttributeError",
",",
"KeyError",
")",
":",
"pass",
"else",
":",
"self",
".",
"tc"... | Issues the final PyLint score as a TeamCity build statistic value | [
"Issues",
"the",
"final",
"PyLint",
"score",
"as",
"a",
"TeamCity",
"build",
"statistic",
"value"
] | 44f6d1fde33a48547a8f9fe31814522347a87b39 | https://github.com/JetBrains/teamcity-messages/blob/44f6d1fde33a48547a8f9fe31814522347a87b39/teamcity/pylint_reporter.py#L64-L71 | train | 207,847 |
conbus/fbmq | fbmq/fbmq.py | Page.set_webhook_handler | def set_webhook_handler(self, scope, callback):
"""
Allows adding a webhook_handler as an alternative to the decorators
"""
scope = scope.lower()
if scope == 'after_send':
self._after_send = callback
return
if scope not in Page.WEBHOOK_ENDPOINTS:
raise ValueError("The 'scope' argument must be one of {}.".format(Page.WEBHOOK_ENDPOINTS))
self._webhook_handlers[scope] = callback | python | def set_webhook_handler(self, scope, callback):
"""
Allows adding a webhook_handler as an alternative to the decorators
"""
scope = scope.lower()
if scope == 'after_send':
self._after_send = callback
return
if scope not in Page.WEBHOOK_ENDPOINTS:
raise ValueError("The 'scope' argument must be one of {}.".format(Page.WEBHOOK_ENDPOINTS))
self._webhook_handlers[scope] = callback | [
"def",
"set_webhook_handler",
"(",
"self",
",",
"scope",
",",
"callback",
")",
":",
"scope",
"=",
"scope",
".",
"lower",
"(",
")",
"if",
"scope",
"==",
"'after_send'",
":",
"self",
".",
"_after_send",
"=",
"callback",
"return",
"if",
"scope",
"not",
"in"... | Allows adding a webhook_handler as an alternative to the decorators | [
"Allows",
"adding",
"a",
"webhook_handler",
"as",
"an",
"alternative",
"to",
"the",
"decorators"
] | 2e016597e49d4d3d8bd52a4da5d778b992697649 | https://github.com/conbus/fbmq/blob/2e016597e49d4d3d8bd52a4da5d778b992697649/fbmq/fbmq.py#L523-L536 | train | 207,848 |
mapbox/rio-mucho | riomucho/utils.py | getWindows | def getWindows(input):
"""Get a source's windows"""
with rasterio.open(input) as src:
return [[window, ij] for ij, window in src.block_windows()] | python | def getWindows(input):
"""Get a source's windows"""
with rasterio.open(input) as src:
return [[window, ij] for ij, window in src.block_windows()] | [
"def",
"getWindows",
"(",
"input",
")",
":",
"with",
"rasterio",
".",
"open",
"(",
"input",
")",
"as",
"src",
":",
"return",
"[",
"[",
"window",
",",
"ij",
"]",
"for",
"ij",
",",
"window",
"in",
"src",
".",
"block_windows",
"(",
")",
"]"
] | Get a source's windows | [
"Get",
"a",
"source",
"s",
"windows"
] | b2267bda2a7ac8557c9328742aeaab6adc825315 | https://github.com/mapbox/rio-mucho/blob/b2267bda2a7ac8557c9328742aeaab6adc825315/riomucho/utils.py#L14-L17 | train | 207,849 |
mapbox/rio-mucho | examples/simple_read.py | read_function | def read_function(data, window, ij, g_args):
"""Takes an array, and sets any value above the mean to the max, the rest to 0"""
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max()
return output | python | def read_function(data, window, ij, g_args):
"""Takes an array, and sets any value above the mean to the max, the rest to 0"""
output = (data[0] > numpy.mean(data[0])).astype(data[0].dtype) * data[0].max()
return output | [
"def",
"read_function",
"(",
"data",
",",
"window",
",",
"ij",
",",
"g_args",
")",
":",
"output",
"=",
"(",
"data",
"[",
"0",
"]",
">",
"numpy",
".",
"mean",
"(",
"data",
"[",
"0",
"]",
")",
")",
".",
"astype",
"(",
"data",
"[",
"0",
"]",
"."... | Takes an array, and sets any value above the mean to the max, the rest to 0 | [
"Takes",
"an",
"array",
"and",
"sets",
"any",
"value",
"above",
"the",
"mean",
"to",
"the",
"max",
"the",
"rest",
"to",
"0"
] | b2267bda2a7ac8557c9328742aeaab6adc825315 | https://github.com/mapbox/rio-mucho/blob/b2267bda2a7ac8557c9328742aeaab6adc825315/examples/simple_read.py#L7-L10 | train | 207,850 |
mapbox/rio-mucho | riomucho/__init__.py | tb_capture | def tb_capture(func):
"""A decorator which captures worker tracebacks.
Tracebacks in particular, are captured. Inspired by an example in
https://bugs.python.org/issue13831.
This decorator wraps rio-mucho worker tasks.
Parameters
----------
func : function
A function to be decorated.
Returns
-------
func
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except Exception:
raise MuchoChildError()
return wrapper | python | def tb_capture(func):
"""A decorator which captures worker tracebacks.
Tracebacks in particular, are captured. Inspired by an example in
https://bugs.python.org/issue13831.
This decorator wraps rio-mucho worker tasks.
Parameters
----------
func : function
A function to be decorated.
Returns
-------
func
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except Exception:
raise MuchoChildError()
return wrapper | [
"def",
"tb_capture",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
"except",
"Exception",
... | A decorator which captures worker tracebacks.
Tracebacks in particular, are captured. Inspired by an example in
https://bugs.python.org/issue13831.
This decorator wraps rio-mucho worker tasks.
Parameters
----------
func : function
A function to be decorated.
Returns
-------
func | [
"A",
"decorator",
"which",
"captures",
"worker",
"tracebacks",
"."
] | b2267bda2a7ac8557c9328742aeaab6adc825315 | https://github.com/mapbox/rio-mucho/blob/b2267bda2a7ac8557c9328742aeaab6adc825315/riomucho/__init__.py#L41-L68 | train | 207,851 |
mapbox/rio-mucho | riomucho/__init__.py | init_worker | def init_worker(inpaths, g_args):
"""The multiprocessing worker initializer
Parameters
----------
inpaths : list of str
A list of dataset paths.
g_args : dict
Global arguments.
Returns
-------
None
"""
global global_args
global srcs
global_args = g_args
srcs = [rasterio.open(i) for i in inpaths] | python | def init_worker(inpaths, g_args):
"""The multiprocessing worker initializer
Parameters
----------
inpaths : list of str
A list of dataset paths.
g_args : dict
Global arguments.
Returns
-------
None
"""
global global_args
global srcs
global_args = g_args
srcs = [rasterio.open(i) for i in inpaths] | [
"def",
"init_worker",
"(",
"inpaths",
",",
"g_args",
")",
":",
"global",
"global_args",
"global",
"srcs",
"global_args",
"=",
"g_args",
"srcs",
"=",
"[",
"rasterio",
".",
"open",
"(",
"i",
")",
"for",
"i",
"in",
"inpaths",
"]"
] | The multiprocessing worker initializer
Parameters
----------
inpaths : list of str
A list of dataset paths.
g_args : dict
Global arguments.
Returns
-------
None | [
"The",
"multiprocessing",
"worker",
"initializer"
] | b2267bda2a7ac8557c9328742aeaab6adc825315 | https://github.com/mapbox/rio-mucho/blob/b2267bda2a7ac8557c9328742aeaab6adc825315/riomucho/__init__.py#L71-L89 | train | 207,852 |
pseudo-lang/pseudo | pseudo/helpers.py | safe_serialize_type | def safe_serialize_type(l):
'''serialize only with letters, numbers and _'''
if isinstance(l, str):
return l
elif isinstance(l, list):
return '%s_%s_' % (l[0], ''.join(map(safe_serialize_type, l[1:])))
else:
return str(l) | python | def safe_serialize_type(l):
'''serialize only with letters, numbers and _'''
if isinstance(l, str):
return l
elif isinstance(l, list):
return '%s_%s_' % (l[0], ''.join(map(safe_serialize_type, l[1:])))
else:
return str(l) | [
"def",
"safe_serialize_type",
"(",
"l",
")",
":",
"if",
"isinstance",
"(",
"l",
",",
"str",
")",
":",
"return",
"l",
"elif",
"isinstance",
"(",
"l",
",",
"list",
")",
":",
"return",
"'%s_%s_'",
"%",
"(",
"l",
"[",
"0",
"]",
",",
"''",
".",
"join"... | serialize only with letters, numbers and _ | [
"serialize",
"only",
"with",
"letters",
"numbers",
"and",
"_"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/helpers.py#L9-L17 | train | 207,853 |
pseudo-lang/pseudo | pseudo/pseudo_tree.py | method_call | def method_call(receiver, message, args, pseudo_type=None):
'''A shortcut for a method call, expands a str receiver to a identifier'''
if not isinstance(receiver, Node):
receiver = local(receiver)
return Node('method_call', receiver=receiver, message=message, args=args, pseudo_type=pseudo_type) | python | def method_call(receiver, message, args, pseudo_type=None):
'''A shortcut for a method call, expands a str receiver to a identifier'''
if not isinstance(receiver, Node):
receiver = local(receiver)
return Node('method_call', receiver=receiver, message=message, args=args, pseudo_type=pseudo_type) | [
"def",
"method_call",
"(",
"receiver",
",",
"message",
",",
"args",
",",
"pseudo_type",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"receiver",
",",
"Node",
")",
":",
"receiver",
"=",
"local",
"(",
"receiver",
")",
"return",
"Node",
"(",
"'m... | A shortcut for a method call, expands a str receiver to a identifier | [
"A",
"shortcut",
"for",
"a",
"method",
"call",
"expands",
"a",
"str",
"receiver",
"to",
"a",
"identifier"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/pseudo_tree.py#L31-L36 | train | 207,854 |
pseudo-lang/pseudo | pseudo/pseudo_tree.py | call | def call(function, args, pseudo_type=None):
'''A shortcut for a call with an identifier callee'''
if not isinstance(function, Node):
function = local(function)
return Node('call', function=function, args=args, pseudo_type=pseudo_type) | python | def call(function, args, pseudo_type=None):
'''A shortcut for a call with an identifier callee'''
if not isinstance(function, Node):
function = local(function)
return Node('call', function=function, args=args, pseudo_type=pseudo_type) | [
"def",
"call",
"(",
"function",
",",
"args",
",",
"pseudo_type",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"function",
",",
"Node",
")",
":",
"function",
"=",
"local",
"(",
"function",
")",
"return",
"Node",
"(",
"'call'",
",",
"function",... | A shortcut for a call with an identifier callee | [
"A",
"shortcut",
"for",
"a",
"call",
"with",
"an",
"identifier",
"callee"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/pseudo_tree.py#L39-L44 | train | 207,855 |
pseudo-lang/pseudo | pseudo/pseudo_tree.py | to_node | def to_node(value):
'''Expand to a literal node if a basic type otherwise just returns the node'''
if isinstance(value, Node):
return value
elif isinstance(value, str):
return Node('string', value=value, pseudo_type='String')
elif isinstance(value, int):
return Node('int', value=value, pseudo_type='Int')
elif isinstance(value, bool):
return Node('boolean', value=str(value).lower(), pseudo_type='Boolean')
elif isinstance(value, float):
return Node('float', value=value, pseudo_type='Float')
elif value is None:
return Node('null', pseudo_type='Void')
else:
1/0 | python | def to_node(value):
'''Expand to a literal node if a basic type otherwise just returns the node'''
if isinstance(value, Node):
return value
elif isinstance(value, str):
return Node('string', value=value, pseudo_type='String')
elif isinstance(value, int):
return Node('int', value=value, pseudo_type='Int')
elif isinstance(value, bool):
return Node('boolean', value=str(value).lower(), pseudo_type='Boolean')
elif isinstance(value, float):
return Node('float', value=value, pseudo_type='Float')
elif value is None:
return Node('null', pseudo_type='Void')
else:
1/0 | [
"def",
"to_node",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"Node",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"Node",
"(",
"'string'",
",",
"value",
"=",
"value",
",",
"pseud... | Expand to a literal node if a basic type otherwise just returns the node | [
"Expand",
"to",
"a",
"literal",
"node",
"if",
"a",
"basic",
"type",
"otherwise",
"just",
"returns",
"the",
"node"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/pseudo_tree.py#L75-L91 | train | 207,856 |
pseudo-lang/pseudo | pseudo/api_translator.py | to_op | def to_op(op, reversed=False):
'''
create a function that transforms a method to a binary op
often we need to convert a pseudo method
<receiver>.<message>(<z>) to a binary op
<receiver> <op> <message>
that's a decorator that helps for that
'''
def transformer(receiver, param, pseudo_type):
if not reversed:
return Node('binary_op', op=op, left=receiver, right=param, pseudo_type=pseudo_type)
return Node('binary_op', op=op, left=param, right=receiver, pseudo_type=pseudo_type)
return transformer | python | def to_op(op, reversed=False):
'''
create a function that transforms a method to a binary op
often we need to convert a pseudo method
<receiver>.<message>(<z>) to a binary op
<receiver> <op> <message>
that's a decorator that helps for that
'''
def transformer(receiver, param, pseudo_type):
if not reversed:
return Node('binary_op', op=op, left=receiver, right=param, pseudo_type=pseudo_type)
return Node('binary_op', op=op, left=param, right=receiver, pseudo_type=pseudo_type)
return transformer | [
"def",
"to_op",
"(",
"op",
",",
"reversed",
"=",
"False",
")",
":",
"def",
"transformer",
"(",
"receiver",
",",
"param",
",",
"pseudo_type",
")",
":",
"if",
"not",
"reversed",
":",
"return",
"Node",
"(",
"'binary_op'",
",",
"op",
"=",
"op",
",",
"lef... | create a function that transforms a method to a binary op
often we need to convert a pseudo method
<receiver>.<message>(<z>) to a binary op
<receiver> <op> <message>
that's a decorator that helps for that | [
"create",
"a",
"function",
"that",
"transforms",
"a",
"method",
"to",
"a",
"binary",
"op"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/api_translator.py#L9-L22 | train | 207,857 |
pseudo-lang/pseudo | pseudo/api_translator.py | ApiTranslator.leaking | def leaking(self, z, module, name, node, context, *data):
'''
an expression leaking ...
assignment nodes into the nearest block list of nodes
c++ guys, stay calm
'''
# input(node.y)
args = [node.receiver] + node.args if node.type == 'standard_method_call' else node.args
z = z(module, name, args)
if context == 'expression':
if isinstance(z, NormalLeakingNode):
leaked_nodes, exp = z.as_expression()
else:
leaked_nodes, exp = z.as_expression()
zz = local(z.temp_name(getattr(z, 'default', '')), node.pseudo_type)
leaked_nodes = z.as_assignment(zz)
exp = local(zz, node.pseudo_type)
if exp is None or exp.pseudo_type == 'Void':
raise PseudoTypeError("pseudo can't handle values with void type in expression: %s?%s" % (module, name))
self.leaked_nodes += leaked_nodes
return exp
elif context == 'assignment':
if isinstance(z, NormalLeakingNode):
leaked_nodes, exp = z.as_expression()
if exp is None or exp.pseudo_type == 'Void':
raise PseudoTypeError("pseudo can't handle values with void type in expression: %s?%s" % (module, name))
self.leaked_nodes += leaked_nodes
return assignment(data[0], exp)
else:
self.leaked_nodes += z.as_assignment(data[0])
return None
elif context == 'block':
leaked_nodes, exp = z.as_expression()
self.leaked_nodes += leaked_nodes
return exp | python | def leaking(self, z, module, name, node, context, *data):
'''
an expression leaking ...
assignment nodes into the nearest block list of nodes
c++ guys, stay calm
'''
# input(node.y)
args = [node.receiver] + node.args if node.type == 'standard_method_call' else node.args
z = z(module, name, args)
if context == 'expression':
if isinstance(z, NormalLeakingNode):
leaked_nodes, exp = z.as_expression()
else:
leaked_nodes, exp = z.as_expression()
zz = local(z.temp_name(getattr(z, 'default', '')), node.pseudo_type)
leaked_nodes = z.as_assignment(zz)
exp = local(zz, node.pseudo_type)
if exp is None or exp.pseudo_type == 'Void':
raise PseudoTypeError("pseudo can't handle values with void type in expression: %s?%s" % (module, name))
self.leaked_nodes += leaked_nodes
return exp
elif context == 'assignment':
if isinstance(z, NormalLeakingNode):
leaked_nodes, exp = z.as_expression()
if exp is None or exp.pseudo_type == 'Void':
raise PseudoTypeError("pseudo can't handle values with void type in expression: %s?%s" % (module, name))
self.leaked_nodes += leaked_nodes
return assignment(data[0], exp)
else:
self.leaked_nodes += z.as_assignment(data[0])
return None
elif context == 'block':
leaked_nodes, exp = z.as_expression()
self.leaked_nodes += leaked_nodes
return exp | [
"def",
"leaking",
"(",
"self",
",",
"z",
",",
"module",
",",
"name",
",",
"node",
",",
"context",
",",
"*",
"data",
")",
":",
"# input(node.y)",
"args",
"=",
"[",
"node",
".",
"receiver",
"]",
"+",
"node",
".",
"args",
"if",
"node",
".",
"type",
... | an expression leaking ...
assignment nodes into the nearest block list of nodes
c++ guys, stay calm | [
"an",
"expression",
"leaking",
"..."
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/api_translator.py#L165-L201 | train | 207,858 |
pseudo-lang/pseudo | pseudo/api_translator.py | ApiTranslator._expand_api | def _expand_api(self, api, receiver, args, pseudo_type, equivalent):
'''
the heart of api translation dsl
function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg
'''
if callable(api):
if receiver:
return api(receiver, *(args + [pseudo_type]))
else:
return api(*(args + [pseudo_type]))
elif isinstance(api, str):
if '(' in api:
call_api, arg_code = api[:-1].split('(')
new_args = [self._parse_part(
a.strip(), receiver, args, equivalent) for a in arg_code.split(',')]
else:
call_api, arg_code = api, ''
new_args = args
if '#' in call_api:
a, b = call_api.split('#')
method_receiver = self._parse_part(
a, receiver, args, equivalent) if a else receiver
return method_call(method_receiver, b, new_args, pseudo_type=pseudo_type)
elif '.' in call_api:
a, b = call_api.split('.')
static_receiver = self._parse_part(
a, receiver, args, equivalent) if a else receiver
if b[-1] != '!':
return Node('static_call', receiver=static_receiver, message=b, args=new_args, pseudo_type=pseudo_type)
else:
return Node('attr', object=static_receiver, attr=b[:-1], pseudo_type=pseudo_type)
else:
if receiver:
return call(call_api, [receiver] + new_args, pseudo_type=pseudo_type)
else:
return call(call_api, new_args, pseudo_type=pseudo_type)
else:
raise PseudoDSLError('%s not supported by api dsl' % str(api)) | python | def _expand_api(self, api, receiver, args, pseudo_type, equivalent):
'''
the heart of api translation dsl
function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg
'''
if callable(api):
if receiver:
return api(receiver, *(args + [pseudo_type]))
else:
return api(*(args + [pseudo_type]))
elif isinstance(api, str):
if '(' in api:
call_api, arg_code = api[:-1].split('(')
new_args = [self._parse_part(
a.strip(), receiver, args, equivalent) for a in arg_code.split(',')]
else:
call_api, arg_code = api, ''
new_args = args
if '#' in call_api:
a, b = call_api.split('#')
method_receiver = self._parse_part(
a, receiver, args, equivalent) if a else receiver
return method_call(method_receiver, b, new_args, pseudo_type=pseudo_type)
elif '.' in call_api:
a, b = call_api.split('.')
static_receiver = self._parse_part(
a, receiver, args, equivalent) if a else receiver
if b[-1] != '!':
return Node('static_call', receiver=static_receiver, message=b, args=new_args, pseudo_type=pseudo_type)
else:
return Node('attr', object=static_receiver, attr=b[:-1], pseudo_type=pseudo_type)
else:
if receiver:
return call(call_api, [receiver] + new_args, pseudo_type=pseudo_type)
else:
return call(call_api, new_args, pseudo_type=pseudo_type)
else:
raise PseudoDSLError('%s not supported by api dsl' % str(api)) | [
"def",
"_expand_api",
"(",
"self",
",",
"api",
",",
"receiver",
",",
"args",
",",
"pseudo_type",
",",
"equivalent",
")",
":",
"if",
"callable",
"(",
"api",
")",
":",
"if",
"receiver",
":",
"return",
"api",
"(",
"receiver",
",",
"*",
"(",
"args",
"+",... | the heart of api translation dsl
function or <z>(<arg>, ..) can be expanded, <z> can be just a name for a global function, or #name for method, <arg> can be %{self} for self or %{n} for nth arg | [
"the",
"heart",
"of",
"api",
"translation",
"dsl"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/api_translator.py#L248-L287 | train | 207,859 |
pseudo-lang/pseudo | pseudo/__init__.py | generate_main | def generate_main(main, language):
'''
generate output code for main in `language`
`main` is a dict/Node or a list of dicts/Nodes with pseudo ast
e.g.
> print(generate_main({'type': 'int', 'value': 0, 'pseudo_type': 'Int'}, 'rb'))
2
> print(generate_main([pseudo.pseudo_tree.to_node('a'), pseudo.pseudo_tree.to_node(0)], 'js'))
'a';
0;
'''
base = {'type': 'module', 'custom_exceptions': [], 'definitions': [], 'constants': [], 'main': [], 'pseudo_type': 'Void'}
base_node = pseudo.loader.convert_to_syntax_tree(base)
if isinstance(main, dict):
base['main'] = [main]
elif isinstance(main, list):
if main and isinstance(main[0], dict):
base['main'] = main
else:
base_node.main = main
elif isinstance(main, pseudo.pseudo_tree.Node):
base_node.main = [main]
if base['main']:
q = pseudo.loader.convert_to_syntax_tree(base)
else:
q = base_node
return generate(q, language) | python | def generate_main(main, language):
'''
generate output code for main in `language`
`main` is a dict/Node or a list of dicts/Nodes with pseudo ast
e.g.
> print(generate_main({'type': 'int', 'value': 0, 'pseudo_type': 'Int'}, 'rb'))
2
> print(generate_main([pseudo.pseudo_tree.to_node('a'), pseudo.pseudo_tree.to_node(0)], 'js'))
'a';
0;
'''
base = {'type': 'module', 'custom_exceptions': [], 'definitions': [], 'constants': [], 'main': [], 'pseudo_type': 'Void'}
base_node = pseudo.loader.convert_to_syntax_tree(base)
if isinstance(main, dict):
base['main'] = [main]
elif isinstance(main, list):
if main and isinstance(main[0], dict):
base['main'] = main
else:
base_node.main = main
elif isinstance(main, pseudo.pseudo_tree.Node):
base_node.main = [main]
if base['main']:
q = pseudo.loader.convert_to_syntax_tree(base)
else:
q = base_node
return generate(q, language) | [
"def",
"generate_main",
"(",
"main",
",",
"language",
")",
":",
"base",
"=",
"{",
"'type'",
":",
"'module'",
",",
"'custom_exceptions'",
":",
"[",
"]",
",",
"'definitions'",
":",
"[",
"]",
",",
"'constants'",
":",
"[",
"]",
",",
"'main'",
":",
"[",
"... | generate output code for main in `language`
`main` is a dict/Node or a list of dicts/Nodes with pseudo ast
e.g.
> print(generate_main({'type': 'int', 'value': 0, 'pseudo_type': 'Int'}, 'rb'))
2
> print(generate_main([pseudo.pseudo_tree.to_node('a'), pseudo.pseudo_tree.to_node(0)], 'js'))
'a';
0; | [
"generate",
"output",
"code",
"for",
"main",
"in",
"language"
] | d0856d13e01a646156d3363f8c1bf352e6ea6315 | https://github.com/pseudo-lang/pseudo/blob/d0856d13e01a646156d3363f8c1bf352e6ea6315/pseudo/__init__.py#L59-L87 | train | 207,860 |
SpamScope/mail-parser | mailparser/utils.py | ported_string | def ported_string(raw_data, encoding='utf-8', errors='ignore'):
"""
Give as input raw data and output a str in Python 3
and unicode in Python 2.
Args:
raw_data: Python 2 str, Python 3 bytes or str to porting
encoding: string giving the name of an encoding
errors: his specifies the treatment of characters
which are invalid in the input encoding
Returns:
str (Python 3) or unicode (Python 2)
"""
if not raw_data:
return six.text_type()
if isinstance(raw_data, six.text_type):
return raw_data.strip()
if six.PY2:
try:
return six.text_type(raw_data, encoding, errors).strip()
except LookupError:
return six.text_type(raw_data, "utf-8", errors).strip()
if six.PY3:
try:
return six.text_type(raw_data, encoding).strip()
except (LookupError, UnicodeDecodeError):
return six.text_type(raw_data, "utf-8", errors).strip() | python | def ported_string(raw_data, encoding='utf-8', errors='ignore'):
"""
Give as input raw data and output a str in Python 3
and unicode in Python 2.
Args:
raw_data: Python 2 str, Python 3 bytes or str to porting
encoding: string giving the name of an encoding
errors: his specifies the treatment of characters
which are invalid in the input encoding
Returns:
str (Python 3) or unicode (Python 2)
"""
if not raw_data:
return six.text_type()
if isinstance(raw_data, six.text_type):
return raw_data.strip()
if six.PY2:
try:
return six.text_type(raw_data, encoding, errors).strip()
except LookupError:
return six.text_type(raw_data, "utf-8", errors).strip()
if six.PY3:
try:
return six.text_type(raw_data, encoding).strip()
except (LookupError, UnicodeDecodeError):
return six.text_type(raw_data, "utf-8", errors).strip() | [
"def",
"ported_string",
"(",
"raw_data",
",",
"encoding",
"=",
"'utf-8'",
",",
"errors",
"=",
"'ignore'",
")",
":",
"if",
"not",
"raw_data",
":",
"return",
"six",
".",
"text_type",
"(",
")",
"if",
"isinstance",
"(",
"raw_data",
",",
"six",
".",
"text_typ... | Give as input raw data and output a str in Python 3
and unicode in Python 2.
Args:
raw_data: Python 2 str, Python 3 bytes or str to porting
encoding: string giving the name of an encoding
errors: his specifies the treatment of characters
which are invalid in the input encoding
Returns:
str (Python 3) or unicode (Python 2) | [
"Give",
"as",
"input",
"raw",
"data",
"and",
"output",
"a",
"str",
"in",
"Python",
"3",
"and",
"unicode",
"in",
"Python",
"2",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L83-L114 | train | 207,861 |
SpamScope/mail-parser | mailparser/utils.py | decode_header_part | def decode_header_part(header):
"""
Given an raw header returns an decoded header
Args:
header (string): header to decode
Returns:
str (Python 3) or unicode (Python 2)
"""
if not header:
return six.text_type()
output = six.text_type()
try:
for d, c in decode_header(header):
c = c if c else 'utf-8'
output += ported_string(d, c, 'ignore')
# Header parsing failed, when header has charset Shift_JIS
except (HeaderParseError, UnicodeError):
log.error("Failed decoding header part: {}".format(header))
output += header
return output | python | def decode_header_part(header):
"""
Given an raw header returns an decoded header
Args:
header (string): header to decode
Returns:
str (Python 3) or unicode (Python 2)
"""
if not header:
return six.text_type()
output = six.text_type()
try:
for d, c in decode_header(header):
c = c if c else 'utf-8'
output += ported_string(d, c, 'ignore')
# Header parsing failed, when header has charset Shift_JIS
except (HeaderParseError, UnicodeError):
log.error("Failed decoding header part: {}".format(header))
output += header
return output | [
"def",
"decode_header_part",
"(",
"header",
")",
":",
"if",
"not",
"header",
":",
"return",
"six",
".",
"text_type",
"(",
")",
"output",
"=",
"six",
".",
"text_type",
"(",
")",
"try",
":",
"for",
"d",
",",
"c",
"in",
"decode_header",
"(",
"header",
"... | Given an raw header returns an decoded header
Args:
header (string): header to decode
Returns:
str (Python 3) or unicode (Python 2) | [
"Given",
"an",
"raw",
"header",
"returns",
"an",
"decoded",
"header"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L117-L142 | train | 207,862 |
SpamScope/mail-parser | mailparser/utils.py | fingerprints | def fingerprints(data):
"""
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
"""
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512")
if six.PY2:
if not isinstance(data, str):
data = data.encode("utf-8")
elif six.PY3:
if not isinstance(data, bytes):
data = data.encode("utf-8")
# md5
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
# sha1
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
# sha256
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
# sha512
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512) | python | def fingerprints(data):
"""
This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512
"""
Hashes = namedtuple('Hashes', "md5 sha1 sha256 sha512")
if six.PY2:
if not isinstance(data, str):
data = data.encode("utf-8")
elif six.PY3:
if not isinstance(data, bytes):
data = data.encode("utf-8")
# md5
md5 = hashlib.md5()
md5.update(data)
md5 = md5.hexdigest()
# sha1
sha1 = hashlib.sha1()
sha1.update(data)
sha1 = sha1.hexdigest()
# sha256
sha256 = hashlib.sha256()
sha256.update(data)
sha256 = sha256.hexdigest()
# sha512
sha512 = hashlib.sha512()
sha512.update(data)
sha512 = sha512.hexdigest()
return Hashes(md5, sha1, sha256, sha512) | [
"def",
"fingerprints",
"(",
"data",
")",
":",
"Hashes",
"=",
"namedtuple",
"(",
"'Hashes'",
",",
"\"md5 sha1 sha256 sha512\"",
")",
"if",
"six",
".",
"PY2",
":",
"if",
"not",
"isinstance",
"(",
"data",
",",
"str",
")",
":",
"data",
"=",
"data",
".",
"e... | This function return the fingerprints of data.
Args:
data (string): raw data
Returns:
namedtuple: fingerprints md5, sha1, sha256, sha512 | [
"This",
"function",
"return",
"the",
"fingerprints",
"of",
"data",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L161-L201 | train | 207,863 |
SpamScope/mail-parser | mailparser/utils.py | msgconvert | def msgconvert(email):
"""
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
"""
log.debug("Started converting Outlook email")
temph, temp = tempfile.mkstemp(prefix="outlook_")
command = ["msgconvert", "--outfile", temp, email]
try:
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
except OSError:
message = "To use this function you must install 'msgconvert' tool"
log.exception(message)
raise MailParserOSError(message)
else:
stdoutdata, _ = out.communicate()
return temp, stdoutdata.decode("utf-8").strip()
finally:
os.close(temph) | python | def msgconvert(email):
"""
Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3)
"""
log.debug("Started converting Outlook email")
temph, temp = tempfile.mkstemp(prefix="outlook_")
command = ["msgconvert", "--outfile", temp, email]
try:
if six.PY2:
with open(os.devnull, "w") as devnull:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=devnull)
elif six.PY3:
out = subprocess.Popen(
command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
except OSError:
message = "To use this function you must install 'msgconvert' tool"
log.exception(message)
raise MailParserOSError(message)
else:
stdoutdata, _ = out.communicate()
return temp, stdoutdata.decode("utf-8").strip()
finally:
os.close(temph) | [
"def",
"msgconvert",
"(",
"email",
")",
":",
"log",
".",
"debug",
"(",
"\"Started converting Outlook email\"",
")",
"temph",
",",
"temp",
"=",
"tempfile",
".",
"mkstemp",
"(",
"prefix",
"=",
"\"outlook_\"",
")",
"command",
"=",
"[",
"\"msgconvert\"",
",",
"\... | Exec msgconvert tool, to convert msg Outlook
mail in eml mail format
Args:
email (string): file path of Outlook msg mail
Returns:
tuple with file path of mail converted and
standard output data (unicode Python 2, str Python 3) | [
"Exec",
"msgconvert",
"tool",
"to",
"convert",
"msg",
"Outlook",
"mail",
"in",
"eml",
"mail",
"format"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L204-L241 | train | 207,864 |
SpamScope/mail-parser | mailparser/utils.py | parse_received | def parse_received(received):
"""
Parse a single received header.
Return a dictionary of values by clause.
Arguments:
received {str} -- single received header
Raises:
MailParserReceivedParsingError -- Raised when a
received header cannot be parsed
Returns:
dict -- values by clause
"""
values_by_clause = {}
for pattern in RECEIVED_COMPILED_LIST:
matches = [match for match in pattern.finditer(received)]
if len(matches) == 0:
# no matches for this clause, but it's ok! keep going!
log.debug("No matches found for %s in %s" % (
pattern.pattern, received))
continue
elif len(matches) > 1:
# uh, can't have more than one of each clause in a received.
# so either there's more than one or the current regex is wrong
msg = "More than one match found for %s in %s" % (
pattern.pattern, received)
log.error(msg)
raise MailParserReceivedParsingError(msg)
else:
# otherwise we have one matching clause!
log.debug("Found one match for %s in %s" % (
pattern.pattern, received))
match = matches[0].groupdict()
if six.PY2:
values_by_clause[match.keys()[0]] = match.values()[0]
elif six.PY3:
key = list(match.keys())[0]
value = list(match.values())[0]
values_by_clause[key] = value
if len(values_by_clause) == 0:
# we weren't able to match anything...
msg = "Unable to match any clauses in %s" % (received)
log.error(msg)
raise MailParserReceivedParsingError(msg)
return values_by_clause | python | def parse_received(received):
"""
Parse a single received header.
Return a dictionary of values by clause.
Arguments:
received {str} -- single received header
Raises:
MailParserReceivedParsingError -- Raised when a
received header cannot be parsed
Returns:
dict -- values by clause
"""
values_by_clause = {}
for pattern in RECEIVED_COMPILED_LIST:
matches = [match for match in pattern.finditer(received)]
if len(matches) == 0:
# no matches for this clause, but it's ok! keep going!
log.debug("No matches found for %s in %s" % (
pattern.pattern, received))
continue
elif len(matches) > 1:
# uh, can't have more than one of each clause in a received.
# so either there's more than one or the current regex is wrong
msg = "More than one match found for %s in %s" % (
pattern.pattern, received)
log.error(msg)
raise MailParserReceivedParsingError(msg)
else:
# otherwise we have one matching clause!
log.debug("Found one match for %s in %s" % (
pattern.pattern, received))
match = matches[0].groupdict()
if six.PY2:
values_by_clause[match.keys()[0]] = match.values()[0]
elif six.PY3:
key = list(match.keys())[0]
value = list(match.values())[0]
values_by_clause[key] = value
if len(values_by_clause) == 0:
# we weren't able to match anything...
msg = "Unable to match any clauses in %s" % (received)
log.error(msg)
raise MailParserReceivedParsingError(msg)
return values_by_clause | [
"def",
"parse_received",
"(",
"received",
")",
":",
"values_by_clause",
"=",
"{",
"}",
"for",
"pattern",
"in",
"RECEIVED_COMPILED_LIST",
":",
"matches",
"=",
"[",
"match",
"for",
"match",
"in",
"pattern",
".",
"finditer",
"(",
"received",
")",
"]",
"if",
"... | Parse a single received header.
Return a dictionary of values by clause.
Arguments:
received {str} -- single received header
Raises:
MailParserReceivedParsingError -- Raised when a
received header cannot be parsed
Returns:
dict -- values by clause | [
"Parse",
"a",
"single",
"received",
"header",
".",
"Return",
"a",
"dictionary",
"of",
"values",
"by",
"clause",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L244-L293 | train | 207,865 |
SpamScope/mail-parser | mailparser/utils.py | receiveds_parsing | def receiveds_parsing(receiveds):
"""
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
"""
parsed = []
receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds]
n = len(receiveds)
log.debug("Nr. of receiveds. {}".format(n))
for idx, received in enumerate(receiveds):
log.debug("Parsing received {}/{}".format(idx + 1, n))
log.debug("Try to parse {!r}".format(received))
try:
# try to parse the current received header...
values_by_clause = parse_received(received)
except MailParserReceivedParsingError:
# if we can't, let's append the raw
parsed.append({'raw': received})
else:
# otherwise append the full values_by_clause dict
parsed.append(values_by_clause)
log.debug("len(receiveds) %s, len(parsed) %s" % (
len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
# something really bad happened,
# so just return raw receiveds with hop indices
log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \
parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds)
else:
# all's good! we have parsed or raw receiveds for each received header
return receiveds_format(parsed) | python | def receiveds_parsing(receiveds):
"""
This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position
"""
parsed = []
receiveds = [re.sub(JUNK_PATTERN, " ", i).strip() for i in receiveds]
n = len(receiveds)
log.debug("Nr. of receiveds. {}".format(n))
for idx, received in enumerate(receiveds):
log.debug("Parsing received {}/{}".format(idx + 1, n))
log.debug("Try to parse {!r}".format(received))
try:
# try to parse the current received header...
values_by_clause = parse_received(received)
except MailParserReceivedParsingError:
# if we can't, let's append the raw
parsed.append({'raw': received})
else:
# otherwise append the full values_by_clause dict
parsed.append(values_by_clause)
log.debug("len(receiveds) %s, len(parsed) %s" % (
len(receiveds), len(parsed)))
if len(receiveds) != len(parsed):
# something really bad happened,
# so just return raw receiveds with hop indices
log.error("len(receiveds): %s, len(parsed): %s, receiveds: %s, \
parsed: %s" % (len(receiveds), len(parsed), receiveds, parsed))
return receiveds_not_parsed(receiveds)
else:
# all's good! we have parsed or raw receiveds for each received header
return receiveds_format(parsed) | [
"def",
"receiveds_parsing",
"(",
"receiveds",
")",
":",
"parsed",
"=",
"[",
"]",
"receiveds",
"=",
"[",
"re",
".",
"sub",
"(",
"JUNK_PATTERN",
",",
"\" \"",
",",
"i",
")",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"receiveds",
"]",
"n",
"=",
"len",... | This function parses the receiveds headers.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of parsed receiveds headers with first hop in first position | [
"This",
"function",
"parses",
"the",
"receiveds",
"headers",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L296-L337 | train | 207,866 |
SpamScope/mail-parser | mailparser/utils.py | receiveds_not_parsed | def receiveds_not_parsed(receiveds):
"""
If receiveds are not parsed, makes a new structure with raw
field. It's useful to have the same structure of receiveds
parsed.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of not parsed receiveds headers with first hop in first position
"""
log.debug("Receiveds for this email are not parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
j = {"raw": i.strip()}
j["hop"] = counter["hop"] + 1
counter["hop"] += 1
output.append(j)
else:
return output | python | def receiveds_not_parsed(receiveds):
"""
If receiveds are not parsed, makes a new structure with raw
field. It's useful to have the same structure of receiveds
parsed.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of not parsed receiveds headers with first hop in first position
"""
log.debug("Receiveds for this email are not parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
j = {"raw": i.strip()}
j["hop"] = counter["hop"] + 1
counter["hop"] += 1
output.append(j)
else:
return output | [
"def",
"receiveds_not_parsed",
"(",
"receiveds",
")",
":",
"log",
".",
"debug",
"(",
"\"Receiveds for this email are not parsed\"",
")",
"output",
"=",
"[",
"]",
"counter",
"=",
"Counter",
"(",
")",
"for",
"i",
"in",
"receiveds",
"[",
":",
":",
"-",
"1",
"... | If receiveds are not parsed, makes a new structure with raw
field. It's useful to have the same structure of receiveds
parsed.
Args:
receiveds (list): list of raw receiveds headers
Returns:
a list of not parsed receiveds headers with first hop in first position | [
"If",
"receiveds",
"are",
"not",
"parsed",
"makes",
"a",
"new",
"structure",
"with",
"raw",
"field",
".",
"It",
"s",
"useful",
"to",
"have",
"the",
"same",
"structure",
"of",
"receiveds",
"parsed",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L352-L375 | train | 207,867 |
SpamScope/mail-parser | mailparser/utils.py | receiveds_format | def receiveds_format(receiveds):
"""
Given a list of receiveds hop, adds metadata and reformat
field values
Args:
receiveds (list): list of receiveds hops already formatted
Returns:
list of receiveds reformated and with new fields
"""
log.debug("Receiveds for this email are parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
# Clean strings
j = {k: v.strip() for k, v in i.items() if v}
# Add hop
j["hop"] = counter["hop"] + 1
# Add UTC date
if i.get("date"):
# Modify date to manage strange header like:
# "for <eboktor@romolo.com>; Tue, 7 Mar 2017 14:29:24 -0800",
i["date"] = i["date"].split(";")[-1]
try:
j["date_utc"], _ = convert_mail_date(i["date"])
except TypeError:
j["date_utc"] = None
# Add delay
size = len(output)
now = j.get("date_utc")
if size and now:
before = output[counter["hop"] - 1].get("date_utc")
if before:
j["delay"] = (now - before).total_seconds()
else:
j["delay"] = 0
else:
j["delay"] = 0
# append result
output.append(j)
# new hop
counter["hop"] += 1
else:
for i in output:
if i.get("date_utc"):
i["date_utc"] = i["date_utc"].isoformat()
else:
return output | python | def receiveds_format(receiveds):
"""
Given a list of receiveds hop, adds metadata and reformat
field values
Args:
receiveds (list): list of receiveds hops already formatted
Returns:
list of receiveds reformated and with new fields
"""
log.debug("Receiveds for this email are parsed")
output = []
counter = Counter()
for i in receiveds[::-1]:
# Clean strings
j = {k: v.strip() for k, v in i.items() if v}
# Add hop
j["hop"] = counter["hop"] + 1
# Add UTC date
if i.get("date"):
# Modify date to manage strange header like:
# "for <eboktor@romolo.com>; Tue, 7 Mar 2017 14:29:24 -0800",
i["date"] = i["date"].split(";")[-1]
try:
j["date_utc"], _ = convert_mail_date(i["date"])
except TypeError:
j["date_utc"] = None
# Add delay
size = len(output)
now = j.get("date_utc")
if size and now:
before = output[counter["hop"] - 1].get("date_utc")
if before:
j["delay"] = (now - before).total_seconds()
else:
j["delay"] = 0
else:
j["delay"] = 0
# append result
output.append(j)
# new hop
counter["hop"] += 1
else:
for i in output:
if i.get("date_utc"):
i["date_utc"] = i["date_utc"].isoformat()
else:
return output | [
"def",
"receiveds_format",
"(",
"receiveds",
")",
":",
"log",
".",
"debug",
"(",
"\"Receiveds for this email are parsed\"",
")",
"output",
"=",
"[",
"]",
"counter",
"=",
"Counter",
"(",
")",
"for",
"i",
"in",
"receiveds",
"[",
":",
":",
"-",
"1",
"]",
":... | Given a list of receiveds hop, adds metadata and reformat
field values
Args:
receiveds (list): list of receiveds hops already formatted
Returns:
list of receiveds reformated and with new fields | [
"Given",
"a",
"list",
"of",
"receiveds",
"hop",
"adds",
"metadata",
"and",
"reformat",
"field",
"values"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L378-L434 | train | 207,868 |
SpamScope/mail-parser | mailparser/utils.py | get_header | def get_header(message, name):
"""
Gets an email.message.Message and a header name and returns
the mail header decoded with the correct charset.
Args:
message (email.message.Message): email message object
name (string): header to get
Returns:
decoded header
"""
header = message.get(name)
log.debug("Getting header {!r}: {!r}".format(name, header))
if header:
return decode_header_part(header)
return six.text_type() | python | def get_header(message, name):
"""
Gets an email.message.Message and a header name and returns
the mail header decoded with the correct charset.
Args:
message (email.message.Message): email message object
name (string): header to get
Returns:
decoded header
"""
header = message.get(name)
log.debug("Getting header {!r}: {!r}".format(name, header))
if header:
return decode_header_part(header)
return six.text_type() | [
"def",
"get_header",
"(",
"message",
",",
"name",
")",
":",
"header",
"=",
"message",
".",
"get",
"(",
"name",
")",
"log",
".",
"debug",
"(",
"\"Getting header {!r}: {!r}\"",
".",
"format",
"(",
"name",
",",
"header",
")",
")",
"if",
"header",
":",
"re... | Gets an email.message.Message and a header name and returns
the mail header decoded with the correct charset.
Args:
message (email.message.Message): email message object
name (string): header to get
Returns:
decoded header | [
"Gets",
"an",
"email",
".",
"message",
".",
"Message",
"and",
"a",
"header",
"name",
"and",
"returns",
"the",
"mail",
"header",
"decoded",
"with",
"the",
"correct",
"charset",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L448-L464 | train | 207,869 |
SpamScope/mail-parser | mailparser/utils.py | get_mail_keys | def get_mail_keys(message, complete=True):
"""
Given an email.message.Message, return a set with all email parts to get
Args:
message (email.message.Message): email message object
complete (bool): if True returns all email headers
Returns:
set with all email parts
"""
if complete:
log.debug("Get all headers")
all_headers_keys = {i.lower() for i in message.keys()}
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys
else:
log.debug("Get only mains headers")
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS
log.debug("All parts to get: {}".format(", ".join(all_parts)))
return all_parts | python | def get_mail_keys(message, complete=True):
"""
Given an email.message.Message, return a set with all email parts to get
Args:
message (email.message.Message): email message object
complete (bool): if True returns all email headers
Returns:
set with all email parts
"""
if complete:
log.debug("Get all headers")
all_headers_keys = {i.lower() for i in message.keys()}
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS | all_headers_keys
else:
log.debug("Get only mains headers")
all_parts = ADDRESSES_HEADERS | OTHERS_PARTS
log.debug("All parts to get: {}".format(", ".join(all_parts)))
return all_parts | [
"def",
"get_mail_keys",
"(",
"message",
",",
"complete",
"=",
"True",
")",
":",
"if",
"complete",
":",
"log",
".",
"debug",
"(",
"\"Get all headers\"",
")",
"all_headers_keys",
"=",
"{",
"i",
".",
"lower",
"(",
")",
"for",
"i",
"in",
"message",
".",
"k... | Given an email.message.Message, return a set with all email parts to get
Args:
message (email.message.Message): email message object
complete (bool): if True returns all email headers
Returns:
set with all email parts | [
"Given",
"an",
"email",
".",
"message",
".",
"Message",
"return",
"a",
"set",
"with",
"all",
"email",
"parts",
"to",
"get"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L467-L488 | train | 207,870 |
SpamScope/mail-parser | mailparser/utils.py | write_sample | def write_sample(binary, payload, path, filename): # pragma: no cover
"""
This function writes a sample on file system.
Args:
binary (bool): True if it's a binary file
payload: payload of sample, in base64 if it's a binary
path (string): path of file
filename (string): name of file
hash_ (string): file hash
"""
if not os.path.exists(path):
os.makedirs(path)
sample = os.path.join(path, filename)
if binary:
with open(sample, "wb") as f:
f.write(base64.b64decode(payload))
else:
with open(sample, "w") as f:
f.write(payload) | python | def write_sample(binary, payload, path, filename): # pragma: no cover
"""
This function writes a sample on file system.
Args:
binary (bool): True if it's a binary file
payload: payload of sample, in base64 if it's a binary
path (string): path of file
filename (string): name of file
hash_ (string): file hash
"""
if not os.path.exists(path):
os.makedirs(path)
sample = os.path.join(path, filename)
if binary:
with open(sample, "wb") as f:
f.write(base64.b64decode(payload))
else:
with open(sample, "w") as f:
f.write(payload) | [
"def",
"write_sample",
"(",
"binary",
",",
"payload",
",",
"path",
",",
"filename",
")",
":",
"# pragma: no cover",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"sample",
"=",
"os",
".... | This function writes a sample on file system.
Args:
binary (bool): True if it's a binary file
payload: payload of sample, in base64 if it's a binary
path (string): path of file
filename (string): name of file
hash_ (string): file hash | [
"This",
"function",
"writes",
"a",
"sample",
"on",
"file",
"system",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L531-L552 | train | 207,871 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.from_file_obj | def from_file_obj(cls, fp):
"""
Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file object")
try:
fp.seek(0)
except IOError:
# When stdout is a TTY it's a character device
# and it's not seekable, you cannot seek in a TTY.
pass
finally:
s = fp.read()
return cls.from_string(s) | python | def from_file_obj(cls, fp):
"""
Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file object")
try:
fp.seek(0)
except IOError:
# When stdout is a TTY it's a character device
# and it's not seekable, you cannot seek in a TTY.
pass
finally:
s = fp.read()
return cls.from_string(s) | [
"def",
"from_file_obj",
"(",
"cls",
",",
"fp",
")",
":",
"log",
".",
"debug",
"(",
"\"Parsing email from file object\"",
")",
"try",
":",
"fp",
".",
"seek",
"(",
"0",
")",
"except",
"IOError",
":",
"# When stdout is a TTY it's a character device",
"# and it's not ... | Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser | [
"Init",
"a",
"new",
"object",
"from",
"a",
"file",
"-",
"like",
"object",
".",
"Not",
"for",
"Outlook",
"msg",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L144-L165 | train | 207,872 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.from_file | def from_file(cls, fp, is_outlook=False):
"""
Init a new object from a file path.
Args:
fp (string): file path of raw email
is_outlook (boolean): if True is an Outlook email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file {!r}".format(fp))
with ported_open(fp) as f:
message = email.message_from_file(f)
if is_outlook:
log.debug("Removing temp converted Outlook email {!r}".format(fp))
os.remove(fp)
return cls(message) | python | def from_file(cls, fp, is_outlook=False):
"""
Init a new object from a file path.
Args:
fp (string): file path of raw email
is_outlook (boolean): if True is an Outlook email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file {!r}".format(fp))
with ported_open(fp) as f:
message = email.message_from_file(f)
if is_outlook:
log.debug("Removing temp converted Outlook email {!r}".format(fp))
os.remove(fp)
return cls(message) | [
"def",
"from_file",
"(",
"cls",
",",
"fp",
",",
"is_outlook",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"\"Parsing email from file {!r}\"",
".",
"format",
"(",
"fp",
")",
")",
"with",
"ported_open",
"(",
"fp",
")",
"as",
"f",
":",
"message",
"=... | Init a new object from a file path.
Args:
fp (string): file path of raw email
is_outlook (boolean): if True is an Outlook email
Returns:
Instance of MailParser | [
"Init",
"a",
"new",
"object",
"from",
"a",
"file",
"path",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L168-L188 | train | 207,873 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.from_string | def from_string(cls, s):
"""
Init a new object from a string.
Args:
s (string): raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from string")
message = email.message_from_string(s)
return cls(message) | python | def from_string(cls, s):
"""
Init a new object from a string.
Args:
s (string): raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from string")
message = email.message_from_string(s)
return cls(message) | [
"def",
"from_string",
"(",
"cls",
",",
"s",
")",
":",
"log",
".",
"debug",
"(",
"\"Parsing email from string\"",
")",
"message",
"=",
"email",
".",
"message_from_string",
"(",
"s",
")",
"return",
"cls",
"(",
"message",
")"
] | Init a new object from a string.
Args:
s (string): raw email
Returns:
Instance of MailParser | [
"Init",
"a",
"new",
"object",
"from",
"a",
"string",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L207-L220 | train | 207,874 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.from_bytes | def from_bytes(cls, bt):
"""
Init a new object from bytes.
Args:
bt (bytes-like object): raw email as bytes-like object
Returns:
Instance of MailParser
"""
log.debug("Parsing email from bytes")
if six.PY2:
raise MailParserEnvironmentError(
"Parsing from bytes is valid only for Python 3.x version")
message = email.message_from_bytes(bt)
return cls(message) | python | def from_bytes(cls, bt):
"""
Init a new object from bytes.
Args:
bt (bytes-like object): raw email as bytes-like object
Returns:
Instance of MailParser
"""
log.debug("Parsing email from bytes")
if six.PY2:
raise MailParserEnvironmentError(
"Parsing from bytes is valid only for Python 3.x version")
message = email.message_from_bytes(bt)
return cls(message) | [
"def",
"from_bytes",
"(",
"cls",
",",
"bt",
")",
":",
"log",
".",
"debug",
"(",
"\"Parsing email from bytes\"",
")",
"if",
"six",
".",
"PY2",
":",
"raise",
"MailParserEnvironmentError",
"(",
"\"Parsing from bytes is valid only for Python 3.x version\"",
")",
"message"... | Init a new object from bytes.
Args:
bt (bytes-like object): raw email as bytes-like object
Returns:
Instance of MailParser | [
"Init",
"a",
"new",
"object",
"from",
"bytes",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L223-L238 | train | 207,875 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser._reset | def _reset(self):
"""
Reset the state of mail object.
"""
log.debug("Reset all variables")
self._attachments = []
self._text_plain = []
self._text_html = []
self._defects = []
self._defects_categories = set()
self._has_defects = False | python | def _reset(self):
"""
Reset the state of mail object.
"""
log.debug("Reset all variables")
self._attachments = []
self._text_plain = []
self._text_html = []
self._defects = []
self._defects_categories = set()
self._has_defects = False | [
"def",
"_reset",
"(",
"self",
")",
":",
"log",
".",
"debug",
"(",
"\"Reset all variables\"",
")",
"self",
".",
"_attachments",
"=",
"[",
"]",
"self",
".",
"_text_plain",
"=",
"[",
"]",
"self",
".",
"_text_html",
"=",
"[",
"]",
"self",
".",
"_defects",
... | Reset the state of mail object. | [
"Reset",
"the",
"state",
"of",
"mail",
"object",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L240-L251 | train | 207,876 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser._append_defects | def _append_defects(self, part, part_content_type):
"""
Add new defects and defects categories to object attributes.
The defects are a list of all the problems found
when parsing this message.
Args:
part (string): mail part
part_content_type (string): content type of part
"""
part_defects = {}
for e in part.defects:
defects = "{}: {}".format(e.__class__.__name__, e.__doc__)
self._defects_categories.add(e.__class__.__name__)
part_defects.setdefault(part_content_type, []).append(defects)
log.debug("Added defect {!r}".format(defects))
# Tag mail with defect
if part_defects:
self._has_defects = True
# Save all defects
self._defects.append(part_defects) | python | def _append_defects(self, part, part_content_type):
"""
Add new defects and defects categories to object attributes.
The defects are a list of all the problems found
when parsing this message.
Args:
part (string): mail part
part_content_type (string): content type of part
"""
part_defects = {}
for e in part.defects:
defects = "{}: {}".format(e.__class__.__name__, e.__doc__)
self._defects_categories.add(e.__class__.__name__)
part_defects.setdefault(part_content_type, []).append(defects)
log.debug("Added defect {!r}".format(defects))
# Tag mail with defect
if part_defects:
self._has_defects = True
# Save all defects
self._defects.append(part_defects) | [
"def",
"_append_defects",
"(",
"self",
",",
"part",
",",
"part_content_type",
")",
":",
"part_defects",
"=",
"{",
"}",
"for",
"e",
"in",
"part",
".",
"defects",
":",
"defects",
"=",
"\"{}: {}\"",
".",
"format",
"(",
"e",
".",
"__class__",
".",
"__name__"... | Add new defects and defects categories to object attributes.
The defects are a list of all the problems found
when parsing this message.
Args:
part (string): mail part
part_content_type (string): content type of part | [
"Add",
"new",
"defects",
"and",
"defects",
"categories",
"to",
"object",
"attributes",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L253-L278 | train | 207,877 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser._make_mail | def _make_mail(self, complete=True):
"""
This method assigns the right values to all tokens of email.
Returns a parsed object
Keyword Arguments:
complete {bool} -- If True returns all mails parts
(default: {True})
Returns:
dict -- Parsed email object
"""
mail = {}
keys = get_mail_keys(self.message, complete)
for i in keys:
log.debug("Getting header or part {!r}".format(i))
value = getattr(self, i)
if value:
mail[i] = value
# add defects
mail["has_defects"] = self.has_defects
if self.has_defects:
mail["defects"] = self.defects
mail["defects_categories"] = list(self.defects_categories)
return mail | python | def _make_mail(self, complete=True):
"""
This method assigns the right values to all tokens of email.
Returns a parsed object
Keyword Arguments:
complete {bool} -- If True returns all mails parts
(default: {True})
Returns:
dict -- Parsed email object
"""
mail = {}
keys = get_mail_keys(self.message, complete)
for i in keys:
log.debug("Getting header or part {!r}".format(i))
value = getattr(self, i)
if value:
mail[i] = value
# add defects
mail["has_defects"] = self.has_defects
if self.has_defects:
mail["defects"] = self.defects
mail["defects_categories"] = list(self.defects_categories)
return mail | [
"def",
"_make_mail",
"(",
"self",
",",
"complete",
"=",
"True",
")",
":",
"mail",
"=",
"{",
"}",
"keys",
"=",
"get_mail_keys",
"(",
"self",
".",
"message",
",",
"complete",
")",
"for",
"i",
"in",
"keys",
":",
"log",
".",
"debug",
"(",
"\"Getting head... | This method assigns the right values to all tokens of email.
Returns a parsed object
Keyword Arguments:
complete {bool} -- If True returns all mails parts
(default: {True})
Returns:
dict -- Parsed email object | [
"This",
"method",
"assigns",
"the",
"right",
"values",
"to",
"all",
"tokens",
"of",
"email",
".",
"Returns",
"a",
"parsed",
"object"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L280-L308 | train | 207,878 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.get_server_ipaddress | def get_server_ipaddress(self, trust):
"""
Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address
"""
log.debug("Trust string is {!r}".format(trust))
if not trust.strip():
return
received = self.message.get_all("received", [])
for i in received:
i = ported_string(i)
if trust in i:
log.debug("Trust string {!r} is in {!r}".format(trust, i))
check = REGXIP.findall(i[0:i.find("by")])
if check:
try:
ip_str = six.text_type(check[-1])
log.debug("Found sender IP {!r} in {!r}".format(
ip_str, i))
ip = ipaddress.ip_address(ip_str)
except ValueError:
return
else:
if not ip.is_private:
log.debug("IP {!r} not private".format(ip_str))
return ip_str | python | def get_server_ipaddress(self, trust):
"""
Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address
"""
log.debug("Trust string is {!r}".format(trust))
if not trust.strip():
return
received = self.message.get_all("received", [])
for i in received:
i = ported_string(i)
if trust in i:
log.debug("Trust string {!r} is in {!r}".format(trust, i))
check = REGXIP.findall(i[0:i.find("by")])
if check:
try:
ip_str = six.text_type(check[-1])
log.debug("Found sender IP {!r} in {!r}".format(
ip_str, i))
ip = ipaddress.ip_address(ip_str)
except ValueError:
return
else:
if not ip.is_private:
log.debug("IP {!r} not private".format(ip_str))
return ip_str | [
"def",
"get_server_ipaddress",
"(",
"self",
",",
"trust",
")",
":",
"log",
".",
"debug",
"(",
"\"Trust string is {!r}\"",
".",
"format",
"(",
"trust",
")",
")",
"if",
"not",
"trust",
".",
"strip",
"(",
")",
":",
"return",
"received",
"=",
"self",
".",
... | Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address | [
"Return",
"the",
"ip",
"address",
"of",
"sender"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L409-L460 | train | 207,879 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.received_raw | def received_raw(self):
"""
Return a list of all received headers in raw format
"""
output = []
for i in self.message.get_all("received", []):
output.append(decode_header_part(i))
return output | python | def received_raw(self):
"""
Return a list of all received headers in raw format
"""
output = []
for i in self.message.get_all("received", []):
output.append(decode_header_part(i))
return output | [
"def",
"received_raw",
"(",
"self",
")",
":",
"output",
"=",
"[",
"]",
"for",
"i",
"in",
"self",
".",
"message",
".",
"get_all",
"(",
"\"received\"",
",",
"[",
"]",
")",
":",
"output",
".",
"append",
"(",
"decode_header_part",
"(",
"i",
")",
")",
"... | Return a list of all received headers in raw format | [
"Return",
"a",
"list",
"of",
"all",
"received",
"headers",
"in",
"raw",
"format"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L510-L517 | train | 207,880 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.headers | def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for k, v in self.message.items():
d[k] = decode_header_part(v)
return d | python | def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for k, v in self.message.items():
d[k] = decode_header_part(v)
return d | [
"def",
"headers",
"(",
"self",
")",
":",
"d",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"self",
".",
"message",
".",
"items",
"(",
")",
":",
"d",
"[",
"k",
"]",
"=",
"decode_header_part",
"(",
"v",
")",
"return",
"d"
] | Return only the headers as Python object | [
"Return",
"only",
"the",
"headers",
"as",
"Python",
"object"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L529-L536 | train | 207,881 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.date | def date(self):
"""
Return the mail date in datetime.datetime format and UTC.
"""
date = self.message.get('date')
conv = None
try:
conv, _ = convert_mail_date(date)
finally:
return conv | python | def date(self):
"""
Return the mail date in datetime.datetime format and UTC.
"""
date = self.message.get('date')
conv = None
try:
conv, _ = convert_mail_date(date)
finally:
return conv | [
"def",
"date",
"(",
"self",
")",
":",
"date",
"=",
"self",
".",
"message",
".",
"get",
"(",
"'date'",
")",
"conv",
"=",
"None",
"try",
":",
"conv",
",",
"_",
"=",
"convert_mail_date",
"(",
"date",
")",
"finally",
":",
"return",
"conv"
] | Return the mail date in datetime.datetime format and UTC. | [
"Return",
"the",
"mail",
"date",
"in",
"datetime",
".",
"datetime",
"format",
"and",
"UTC",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L560-L570 | train | 207,882 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.timezone | def timezone(self):
"""
Return timezone. Offset from UTC.
"""
date = self.message.get('date')
timezone = 0
try:
_, timezone = convert_mail_date(date)
finally:
return timezone | python | def timezone(self):
"""
Return timezone. Offset from UTC.
"""
date = self.message.get('date')
timezone = 0
try:
_, timezone = convert_mail_date(date)
finally:
return timezone | [
"def",
"timezone",
"(",
"self",
")",
":",
"date",
"=",
"self",
".",
"message",
".",
"get",
"(",
"'date'",
")",
"timezone",
"=",
"0",
"try",
":",
"_",
",",
"timezone",
"=",
"convert_mail_date",
"(",
"date",
")",
"finally",
":",
"return",
"timezone"
] | Return timezone. Offset from UTC. | [
"Return",
"timezone",
".",
"Offset",
"from",
"UTC",
"."
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L573-L583 | train | 207,883 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.date_json | def date_json(self):
"""
Return the JSON of date
"""
if self.date:
return json.dumps(self.date.isoformat(), ensure_ascii=False) | python | def date_json(self):
"""
Return the JSON of date
"""
if self.date:
return json.dumps(self.date.isoformat(), ensure_ascii=False) | [
"def",
"date_json",
"(",
"self",
")",
":",
"if",
"self",
".",
"date",
":",
"return",
"json",
".",
"dumps",
"(",
"self",
".",
"date",
".",
"isoformat",
"(",
")",
",",
"ensure_ascii",
"=",
"False",
")"
] | Return the JSON of date | [
"Return",
"the",
"JSON",
"of",
"date"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L586-L591 | train | 207,884 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.mail_json | def mail_json(self):
"""
Return the JSON of mail parsed
"""
if self.mail.get("date"):
self._mail["date"] = self.date.isoformat()
return json.dumps(self.mail, ensure_ascii=False, indent=2) | python | def mail_json(self):
"""
Return the JSON of mail parsed
"""
if self.mail.get("date"):
self._mail["date"] = self.date.isoformat()
return json.dumps(self.mail, ensure_ascii=False, indent=2) | [
"def",
"mail_json",
"(",
"self",
")",
":",
"if",
"self",
".",
"mail",
".",
"get",
"(",
"\"date\"",
")",
":",
"self",
".",
"_mail",
"[",
"\"date\"",
"]",
"=",
"self",
".",
"date",
".",
"isoformat",
"(",
")",
"return",
"json",
".",
"dumps",
"(",
"s... | Return the JSON of mail parsed | [
"Return",
"the",
"JSON",
"of",
"mail",
"parsed"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L601-L607 | train | 207,885 |
SpamScope/mail-parser | mailparser/mailparser.py | MailParser.mail_partial_json | def mail_partial_json(self):
"""
Return the JSON of mail parsed partial
"""
if self.mail_partial.get("date"):
self._mail_partial["date"] = self.date.isoformat()
return json.dumps(self.mail_partial, ensure_ascii=False, indent=2) | python | def mail_partial_json(self):
"""
Return the JSON of mail parsed partial
"""
if self.mail_partial.get("date"):
self._mail_partial["date"] = self.date.isoformat()
return json.dumps(self.mail_partial, ensure_ascii=False, indent=2) | [
"def",
"mail_partial_json",
"(",
"self",
")",
":",
"if",
"self",
".",
"mail_partial",
".",
"get",
"(",
"\"date\"",
")",
":",
"self",
".",
"_mail_partial",
"[",
"\"date\"",
"]",
"=",
"self",
".",
"date",
".",
"isoformat",
"(",
")",
"return",
"json",
"."... | Return the JSON of mail parsed partial | [
"Return",
"the",
"JSON",
"of",
"mail",
"parsed",
"partial"
] | 814b56d0b803feab9dea04f054b802ce138097e2 | https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/mailparser.py#L618-L624 | train | 207,886 |
sananth12/ImageScraper | image_scraper/utils.py | download_worker_fn | def download_worker_fn(scraper, img_url, pbar, status_flags, status_lock):
""" Stnadalone function that downloads images. """
failed = False
size_failed = False
try:
scraper.download_image(img_url)
except ImageDownloadError:
failed = True
except ImageSizeError:
size_failed = True
status_lock.acquire(True)
if failed:
status_flags['failed'] += 1
elif size_failed:
status_flags['under_min_or_over_max_filesize'] += 1
status_flags['percent'] = status_flags[
'percent'] + old_div(100.0, scraper.no_to_download)
pbar.update(status_flags['percent'] % 100)
status_lock.release()
return True | python | def download_worker_fn(scraper, img_url, pbar, status_flags, status_lock):
""" Stnadalone function that downloads images. """
failed = False
size_failed = False
try:
scraper.download_image(img_url)
except ImageDownloadError:
failed = True
except ImageSizeError:
size_failed = True
status_lock.acquire(True)
if failed:
status_flags['failed'] += 1
elif size_failed:
status_flags['under_min_or_over_max_filesize'] += 1
status_flags['percent'] = status_flags[
'percent'] + old_div(100.0, scraper.no_to_download)
pbar.update(status_flags['percent'] % 100)
status_lock.release()
return True | [
"def",
"download_worker_fn",
"(",
"scraper",
",",
"img_url",
",",
"pbar",
",",
"status_flags",
",",
"status_lock",
")",
":",
"failed",
"=",
"False",
"size_failed",
"=",
"False",
"try",
":",
"scraper",
".",
"download_image",
"(",
"img_url",
")",
"except",
"Im... | Stnadalone function that downloads images. | [
"Stnadalone",
"function",
"that",
"downloads",
"images",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L218-L237 | train | 207,887 |
sananth12/ImageScraper | image_scraper/utils.py | ImageScraper.get_html | def get_html(self):
""" Downloads HTML content of page given the page_url"""
if self.use_ghost:
self.url = urljoin("http://", self.url)
import selenium
import selenium.webdriver
driver = selenium.webdriver.PhantomJS(
service_log_path=os.path.devnull)
driver.get(self.url)
page_html = driver.page_source
page_url = driver.current_url
driver.quit()
else:
if self.proxy_url:
print("Using proxy: " + self.proxy_url + "\n")
try:
page = requests.get(self.url, proxies=self.proxies)
if page.status_code != 200:
raise PageLoadError(page.status_code)
except requests.exceptions.MissingSchema:
self.url = "http://" + self.url
page = requests.get(self.url, proxies=self.proxies)
if page.status_code != 200:
raise PageLoadError(page.status_code)
except requests.exceptions.ConnectionError:
raise PageLoadError(None)
try:
page_html = page.text
page_url = page.url
except UnboundLocalError:
raise PageLoadError(None)
self.page_html = page_html
self.page_url = page_url
return (self.page_html, self.page_url) | python | def get_html(self):
""" Downloads HTML content of page given the page_url"""
if self.use_ghost:
self.url = urljoin("http://", self.url)
import selenium
import selenium.webdriver
driver = selenium.webdriver.PhantomJS(
service_log_path=os.path.devnull)
driver.get(self.url)
page_html = driver.page_source
page_url = driver.current_url
driver.quit()
else:
if self.proxy_url:
print("Using proxy: " + self.proxy_url + "\n")
try:
page = requests.get(self.url, proxies=self.proxies)
if page.status_code != 200:
raise PageLoadError(page.status_code)
except requests.exceptions.MissingSchema:
self.url = "http://" + self.url
page = requests.get(self.url, proxies=self.proxies)
if page.status_code != 200:
raise PageLoadError(page.status_code)
except requests.exceptions.ConnectionError:
raise PageLoadError(None)
try:
page_html = page.text
page_url = page.url
except UnboundLocalError:
raise PageLoadError(None)
self.page_html = page_html
self.page_url = page_url
return (self.page_html, self.page_url) | [
"def",
"get_html",
"(",
"self",
")",
":",
"if",
"self",
".",
"use_ghost",
":",
"self",
".",
"url",
"=",
"urljoin",
"(",
"\"http://\"",
",",
"self",
".",
"url",
")",
"import",
"selenium",
"import",
"selenium",
".",
"webdriver",
"driver",
"=",
"selenium",
... | Downloads HTML content of page given the page_url | [
"Downloads",
"HTML",
"content",
"of",
"page",
"given",
"the",
"page_url"
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L101-L136 | train | 207,888 |
sananth12/ImageScraper | image_scraper/utils.py | ImageScraper.get_img_list | def get_img_list(self):
""" Gets list of images from the page_html. """
tree = html.fromstring(self.page_html)
img = tree.xpath('//img/@src')
links = tree.xpath('//a/@href')
img_list = self.process_links(img)
img_links = self.process_links(links)
img_list.extend(img_links)
if self.filename_pattern:
# Compile pattern for efficiency
pattern = re.compile(self.filename_pattern)
# Verifies filename in the image URL matches pattern
def matches_pattern(img_url):
""" Function to check if pattern is matched. """
img_filename = urlparse(img_url).path.split('/')[-1]
return pattern.search(img_filename)
images = [urljoin(self.url, img_url) for img_url in img_list
if matches_pattern(img_url)]
else:
images = [urljoin(self.url, img_url) for img_url in img_list]
images = list(set(images))
self.images = images
if self.scrape_reverse:
self.images.reverse()
return self.images | python | def get_img_list(self):
""" Gets list of images from the page_html. """
tree = html.fromstring(self.page_html)
img = tree.xpath('//img/@src')
links = tree.xpath('//a/@href')
img_list = self.process_links(img)
img_links = self.process_links(links)
img_list.extend(img_links)
if self.filename_pattern:
# Compile pattern for efficiency
pattern = re.compile(self.filename_pattern)
# Verifies filename in the image URL matches pattern
def matches_pattern(img_url):
""" Function to check if pattern is matched. """
img_filename = urlparse(img_url).path.split('/')[-1]
return pattern.search(img_filename)
images = [urljoin(self.url, img_url) for img_url in img_list
if matches_pattern(img_url)]
else:
images = [urljoin(self.url, img_url) for img_url in img_list]
images = list(set(images))
self.images = images
if self.scrape_reverse:
self.images.reverse()
return self.images | [
"def",
"get_img_list",
"(",
"self",
")",
":",
"tree",
"=",
"html",
".",
"fromstring",
"(",
"self",
".",
"page_html",
")",
"img",
"=",
"tree",
".",
"xpath",
"(",
"'//img/@src'",
")",
"links",
"=",
"tree",
".",
"xpath",
"(",
"'//a/@href'",
")",
"img_list... | Gets list of images from the page_html. | [
"Gets",
"list",
"of",
"images",
"from",
"the",
"page_html",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L138-L167 | train | 207,889 |
sananth12/ImageScraper | image_scraper/utils.py | ImageScraper.process_download_path | def process_download_path(self):
""" Processes the download path.
It checks if the path exists and the scraper has
write permissions.
"""
if os.path.exists(self.download_path):
if not os.access(self.download_path, os.W_OK):
raise DirectoryAccessError
elif os.access(os.path.dirname(self.download_path), os.W_OK):
os.makedirs(self.download_path)
else:
raise DirectoryCreateError
return True | python | def process_download_path(self):
""" Processes the download path.
It checks if the path exists and the scraper has
write permissions.
"""
if os.path.exists(self.download_path):
if not os.access(self.download_path, os.W_OK):
raise DirectoryAccessError
elif os.access(os.path.dirname(self.download_path), os.W_OK):
os.makedirs(self.download_path)
else:
raise DirectoryCreateError
return True | [
"def",
"process_download_path",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"download_path",
")",
":",
"if",
"not",
"os",
".",
"access",
"(",
"self",
".",
"download_path",
",",
"os",
".",
"W_OK",
")",
":",
"raise"... | Processes the download path.
It checks if the path exists and the scraper has
write permissions. | [
"Processes",
"the",
"download",
"path",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L169-L182 | train | 207,890 |
sananth12/ImageScraper | image_scraper/utils.py | ImageScraper.download_image | def download_image(self, img_url):
""" Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required.
"""
img_request = None
try:
img_request = requests.request(
'get', img_url, stream=True, proxies=self.proxies)
if img_request.status_code != 200:
raise ImageDownloadError(img_request.status_code)
except:
raise ImageDownloadError()
if img_url[-3:] == "svg" or (int(img_request.headers['content-length']) > self.min_filesize and\
int(img_request.headers['content-length']) < self.max_filesize):
img_content = img_request.content
with open(os.path.join(self.download_path, img_url.split('/')[-1]), 'wb') as f:
byte_image = bytes(img_content)
f.write(byte_image)
else:
raise ImageSizeError(img_request.headers['content-length'])
return True | python | def download_image(self, img_url):
""" Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required.
"""
img_request = None
try:
img_request = requests.request(
'get', img_url, stream=True, proxies=self.proxies)
if img_request.status_code != 200:
raise ImageDownloadError(img_request.status_code)
except:
raise ImageDownloadError()
if img_url[-3:] == "svg" or (int(img_request.headers['content-length']) > self.min_filesize and\
int(img_request.headers['content-length']) < self.max_filesize):
img_content = img_request.content
with open(os.path.join(self.download_path, img_url.split('/')[-1]), 'wb') as f:
byte_image = bytes(img_content)
f.write(byte_image)
else:
raise ImageSizeError(img_request.headers['content-length'])
return True | [
"def",
"download_image",
"(",
"self",
",",
"img_url",
")",
":",
"img_request",
"=",
"None",
"try",
":",
"img_request",
"=",
"requests",
".",
"request",
"(",
"'get'",
",",
"img_url",
",",
"stream",
"=",
"True",
",",
"proxies",
"=",
"self",
".",
"proxies",... | Downloads a single image.
Downloads img_url using self.page_url as base.
Also, raises the appropriate exception if required. | [
"Downloads",
"a",
"single",
"image",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L184-L207 | train | 207,891 |
sananth12/ImageScraper | image_scraper/utils.py | ImageScraper.process_links | def process_links(self, links):
""" Function to process the list of links and filter required links."""
links_list = []
for link in links:
if os.path.splitext(link)[1][1:].strip().lower() in self.format_list:
links_list.append(link)
return links_list | python | def process_links(self, links):
""" Function to process the list of links and filter required links."""
links_list = []
for link in links:
if os.path.splitext(link)[1][1:].strip().lower() in self.format_list:
links_list.append(link)
return links_list | [
"def",
"process_links",
"(",
"self",
",",
"links",
")",
":",
"links_list",
"=",
"[",
"]",
"for",
"link",
"in",
"links",
":",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"link",
")",
"[",
"1",
"]",
"[",
"1",
":",
"]",
".",
"strip",
"(",
")",
... | Function to process the list of links and filter required links. | [
"Function",
"to",
"process",
"the",
"list",
"of",
"links",
"and",
"filter",
"required",
"links",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L209-L215 | train | 207,892 |
sananth12/ImageScraper | image_scraper/progressbar.py | ProgressBar.update | def update(self, value):
"Updates the progress bar to a new value."
if value <= 0.1:
value = 0
assert 0 <= value <= self.maxval
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n') | python | def update(self, value):
"Updates the progress bar to a new value."
if value <= 0.1:
value = 0
assert 0 <= value <= self.maxval
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n') | [
"def",
"update",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"<=",
"0.1",
":",
"value",
"=",
"0",
"assert",
"0",
"<=",
"value",
"<=",
"self",
".",
"maxval",
"self",
".",
"currval",
"=",
"value",
"if",
"not",
"self",
".",
"_need_update",
"("... | Updates the progress bar to a new value. | [
"Updates",
"the",
"progress",
"bar",
"to",
"a",
"new",
"value",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/progressbar.py#L252-L268 | train | 207,893 |
sananth12/ImageScraper | image_scraper/progressbar.py | ProgressBar.finish | def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL) | python | def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL) | [
"def",
"finish",
"(",
"self",
")",
":",
"self",
".",
"update",
"(",
"self",
".",
"maxval",
")",
"if",
"self",
".",
"signal_set",
":",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGWINCH",
",",
"signal",
".",
"SIG_DFL",
")"
] | Used to tell the progress is finished. | [
"Used",
"to",
"tell",
"the",
"progress",
"is",
"finished",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/progressbar.py#L284-L288 | train | 207,894 |
sananth12/ImageScraper | image_scraper/mains.py | console_main | def console_main():
""" This function handles all the console action. """
setproctitle('image-scraper')
scraper = ImageScraper()
scraper.get_arguments()
print("\nImageScraper\n============\nRequesting page....\n")
try:
scraper.get_html()
except PageLoadError as err:
if err.status_code is None:
print("ImageScraper is unable to acces the internet.")
else:
print("Page failed to load. Status code: {0}".format(err.status_code))
sys.exit()
scraper.get_img_list()
if len(scraper.images) == 0:
sys.exit("Sorry, no images found.")
if scraper.no_to_download is None:
scraper.no_to_download = len(scraper.images)
print("Found {0} images: ".format(len(scraper.images)))
try:
scraper.process_download_path()
except DirectoryAccessError:
print("Sorry, the directory can't be accessed.")
sys.exit()
except DirectoryCreateError:
print("Sorry, the directory can't be created.")
sys.exit()
if scraper.dump_urls:
for img_url in scraper.images:
print(img_url)
status_flags = {'count': 0, 'percent': 0.0, 'failed': 0, 'under_min_or_over_max_filesize': 0}
widgets = ['Progress: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
pool = ThreadPoolExecutor(max_workers=scraper.nthreads)
status_lock = threading.Lock()
for img_url in scraper.images:
if status_flags['count'] == scraper.no_to_download:
break
pool.submit(download_worker_fn, scraper, img_url, pbar, status_flags, status_lock)
status_flags['count'] += 1
pool.shutdown(wait=True)
pbar.finish()
print("\nDone!\nDownloaded {0} images\nFailed: {1}\n".format(
status_flags['count']-status_flags['failed']-status_flags['under_min_or_over_max_filesize'],
status_flags['failed']))
return | python | def console_main():
""" This function handles all the console action. """
setproctitle('image-scraper')
scraper = ImageScraper()
scraper.get_arguments()
print("\nImageScraper\n============\nRequesting page....\n")
try:
scraper.get_html()
except PageLoadError as err:
if err.status_code is None:
print("ImageScraper is unable to acces the internet.")
else:
print("Page failed to load. Status code: {0}".format(err.status_code))
sys.exit()
scraper.get_img_list()
if len(scraper.images) == 0:
sys.exit("Sorry, no images found.")
if scraper.no_to_download is None:
scraper.no_to_download = len(scraper.images)
print("Found {0} images: ".format(len(scraper.images)))
try:
scraper.process_download_path()
except DirectoryAccessError:
print("Sorry, the directory can't be accessed.")
sys.exit()
except DirectoryCreateError:
print("Sorry, the directory can't be created.")
sys.exit()
if scraper.dump_urls:
for img_url in scraper.images:
print(img_url)
status_flags = {'count': 0, 'percent': 0.0, 'failed': 0, 'under_min_or_over_max_filesize': 0}
widgets = ['Progress: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=100).start()
pool = ThreadPoolExecutor(max_workers=scraper.nthreads)
status_lock = threading.Lock()
for img_url in scraper.images:
if status_flags['count'] == scraper.no_to_download:
break
pool.submit(download_worker_fn, scraper, img_url, pbar, status_flags, status_lock)
status_flags['count'] += 1
pool.shutdown(wait=True)
pbar.finish()
print("\nDone!\nDownloaded {0} images\nFailed: {1}\n".format(
status_flags['count']-status_flags['failed']-status_flags['under_min_or_over_max_filesize'],
status_flags['failed']))
return | [
"def",
"console_main",
"(",
")",
":",
"setproctitle",
"(",
"'image-scraper'",
")",
"scraper",
"=",
"ImageScraper",
"(",
")",
"scraper",
".",
"get_arguments",
"(",
")",
"print",
"(",
"\"\\nImageScraper\\n============\\nRequesting page....\\n\"",
")",
"try",
":",
"scr... | This function handles all the console action. | [
"This",
"function",
"handles",
"all",
"the",
"console",
"action",
"."
] | 04cdefaa184420637d02b5a285cf407bbd428929 | https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/mains.py#L26-L79 | train | 207,895 |
btubbs/sseclient | sseclient.py | Event.parse | def parse(cls, raw):
"""
Given a possibly-multiline string representing an SSE message, parse it
and return a Event object.
"""
msg = cls()
for line in raw.splitlines():
m = cls.sse_line_pattern.match(line)
if m is None:
# Malformed line. Discard but warn.
warnings.warn('Invalid SSE line: "%s"' % line, SyntaxWarning)
continue
name = m.group('name')
if name == '':
# line began with a ":", so is a comment. Ignore
continue
value = m.group('value')
if name == 'data':
# If we already have some data, then join to it with a newline.
# Else this is it.
if msg.data:
msg.data = '%s\n%s' % (msg.data, value)
else:
msg.data = value
elif name == 'event':
msg.event = value
elif name == 'id':
msg.id = value
elif name == 'retry':
msg.retry = int(value)
return msg | python | def parse(cls, raw):
"""
Given a possibly-multiline string representing an SSE message, parse it
and return a Event object.
"""
msg = cls()
for line in raw.splitlines():
m = cls.sse_line_pattern.match(line)
if m is None:
# Malformed line. Discard but warn.
warnings.warn('Invalid SSE line: "%s"' % line, SyntaxWarning)
continue
name = m.group('name')
if name == '':
# line began with a ":", so is a comment. Ignore
continue
value = m.group('value')
if name == 'data':
# If we already have some data, then join to it with a newline.
# Else this is it.
if msg.data:
msg.data = '%s\n%s' % (msg.data, value)
else:
msg.data = value
elif name == 'event':
msg.event = value
elif name == 'id':
msg.id = value
elif name == 'retry':
msg.retry = int(value)
return msg | [
"def",
"parse",
"(",
"cls",
",",
"raw",
")",
":",
"msg",
"=",
"cls",
"(",
")",
"for",
"line",
"in",
"raw",
".",
"splitlines",
"(",
")",
":",
"m",
"=",
"cls",
".",
"sse_line_pattern",
".",
"match",
"(",
"line",
")",
"if",
"m",
"is",
"None",
":",... | Given a possibly-multiline string representing an SSE message, parse it
and return a Event object. | [
"Given",
"a",
"possibly",
"-",
"multiline",
"string",
"representing",
"an",
"SSE",
"message",
"parse",
"it",
"and",
"return",
"a",
"Event",
"object",
"."
] | d86d9a752c2135fe8b6f15358b2ba14da4d827ab | https://github.com/btubbs/sseclient/blob/d86d9a752c2135fe8b6f15358b2ba14da4d827ab/sseclient.py#L155-L188 | train | 207,896 |
pytest-dev/pluggy | scripts/release.py | get_upstream | def get_upstream(repo: Repo) -> Remote:
"""Find upstream repository for pluggy on the remotes"""
for remote in repo.remotes:
for url in remote.urls:
if url.endswith("pytest-dev/pluggy.git"):
return remote
raise RuntimeError("could not find tox-dev/tox.git remote") | python | def get_upstream(repo: Repo) -> Remote:
"""Find upstream repository for pluggy on the remotes"""
for remote in repo.remotes:
for url in remote.urls:
if url.endswith("pytest-dev/pluggy.git"):
return remote
raise RuntimeError("could not find tox-dev/tox.git remote") | [
"def",
"get_upstream",
"(",
"repo",
":",
"Repo",
")",
"->",
"Remote",
":",
"for",
"remote",
"in",
"repo",
".",
"remotes",
":",
"for",
"url",
"in",
"remote",
".",
"urls",
":",
"if",
"url",
".",
"endswith",
"(",
"\"pytest-dev/pluggy.git\"",
")",
":",
"re... | Find upstream repository for pluggy on the remotes | [
"Find",
"upstream",
"repository",
"for",
"pluggy",
"on",
"the",
"remotes"
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/scripts/release.py#L27-L33 | train | 207,897 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.register | def register(self, plugin, name=None):
""" Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. """
plugin_name = name or self.get_canonical_name(plugin)
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
if self._name2plugin.get(plugin_name, -1) is None:
return # blocked plugin, return None to indicate no registration
raise ValueError(
"Plugin already registered: %s=%s\n%s"
% (plugin_name, plugin, self._name2plugin)
)
# XXX if an error happens we should make sure no state has been
# changed at point of return
self._name2plugin[plugin_name] = plugin
# register matching hook implementations of the plugin
self._plugin2hookcallers[plugin] = hookcallers = []
for name in dir(plugin):
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
if hookimpl_opts is not None:
normalize_hookimpl_opts(hookimpl_opts)
method = getattr(plugin, name)
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
hook = getattr(self.hook, name, None)
if hook is None:
hook = _HookCaller(name, self._hookexec)
setattr(self.hook, name, hook)
elif hook.has_spec():
self._verify_hook(hook, hookimpl)
hook._maybe_apply_history(hookimpl)
hook._add_hookimpl(hookimpl)
hookcallers.append(hook)
return plugin_name | python | def register(self, plugin, name=None):
""" Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. """
plugin_name = name or self.get_canonical_name(plugin)
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
if self._name2plugin.get(plugin_name, -1) is None:
return # blocked plugin, return None to indicate no registration
raise ValueError(
"Plugin already registered: %s=%s\n%s"
% (plugin_name, plugin, self._name2plugin)
)
# XXX if an error happens we should make sure no state has been
# changed at point of return
self._name2plugin[plugin_name] = plugin
# register matching hook implementations of the plugin
self._plugin2hookcallers[plugin] = hookcallers = []
for name in dir(plugin):
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
if hookimpl_opts is not None:
normalize_hookimpl_opts(hookimpl_opts)
method = getattr(plugin, name)
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
hook = getattr(self.hook, name, None)
if hook is None:
hook = _HookCaller(name, self._hookexec)
setattr(self.hook, name, hook)
elif hook.has_spec():
self._verify_hook(hook, hookimpl)
hook._maybe_apply_history(hookimpl)
hook._add_hookimpl(hookimpl)
hookcallers.append(hook)
return plugin_name | [
"def",
"register",
"(",
"self",
",",
"plugin",
",",
"name",
"=",
"None",
")",
":",
"plugin_name",
"=",
"name",
"or",
"self",
".",
"get_canonical_name",
"(",
"plugin",
")",
"if",
"plugin_name",
"in",
"self",
".",
"_name2plugin",
"or",
"plugin",
"in",
"sel... | Register a plugin and return its canonical name or None if the name
is blocked from registering. Raise a ValueError if the plugin is already
registered. | [
"Register",
"a",
"plugin",
"and",
"return",
"its",
"canonical",
"name",
"or",
"None",
"if",
"the",
"name",
"is",
"blocked",
"from",
"registering",
".",
"Raise",
"a",
"ValueError",
"if",
"the",
"plugin",
"is",
"already",
"registered",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L70-L105 | train | 207,898 |
pytest-dev/pluggy | pluggy/manager.py | PluginManager.unregister | def unregister(self, plugin=None, name=None):
""" unregister a plugin object and all its contained hook implementations
from internal data structures. """
if name is None:
assert plugin is not None, "one of name or plugin needs to be specified"
name = self.get_name(plugin)
if plugin is None:
plugin = self.get_plugin(name)
# if self._name2plugin[name] == None registration was blocked: ignore
if self._name2plugin.get(name):
del self._name2plugin[name]
for hookcaller in self._plugin2hookcallers.pop(plugin, []):
hookcaller._remove_plugin(plugin)
return plugin | python | def unregister(self, plugin=None, name=None):
""" unregister a plugin object and all its contained hook implementations
from internal data structures. """
if name is None:
assert plugin is not None, "one of name or plugin needs to be specified"
name = self.get_name(plugin)
if plugin is None:
plugin = self.get_plugin(name)
# if self._name2plugin[name] == None registration was blocked: ignore
if self._name2plugin.get(name):
del self._name2plugin[name]
for hookcaller in self._plugin2hookcallers.pop(plugin, []):
hookcaller._remove_plugin(plugin)
return plugin | [
"def",
"unregister",
"(",
"self",
",",
"plugin",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"assert",
"plugin",
"is",
"not",
"None",
",",
"\"one of name or plugin needs to be specified\"",
"name",
"=",
"self",
".",
"g... | unregister a plugin object and all its contained hook implementations
from internal data structures. | [
"unregister",
"a",
"plugin",
"object",
"and",
"all",
"its",
"contained",
"hook",
"implementations",
"from",
"internal",
"data",
"structures",
"."
] | 4de9e440eeadd9f0eb8c5232b349ef64e20e33fb | https://github.com/pytest-dev/pluggy/blob/4de9e440eeadd9f0eb8c5232b349ef64e20e33fb/pluggy/manager.py#L130-L147 | train | 207,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.