_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q19200 | SlabDict.pop | train | def pop(self, name, defval=None):
'''
Pop a name from the SlabDict.
Args:
name (str): The name to remove.
defval (obj): The default value to return if the name is not present.
Returns:
object: The object stored in the SlabDict, or defval if | python | {
"resource": ""
} |
q19201 | Slab.copydb | train | def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):
'''
Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab.
'''
destdb = destslab.initdb(destdbname, sourcedb.dupsort)
statdict = destslab.stat(db=destdb)
if statdict['entries'] > 0:
raise s_exc.DataAlreadyExists()
rowcount = 0
| python | {
"resource": ""
} |
q19202 | Slab.replace | train | def replace(self, lkey, lval, db=None):
'''
Like put, but returns the previous value if existed
'''
| python | {
"resource": ""
} |
q19203 | Trigger._match_idens | train | async def _match_idens(self, core, prefix):
'''
Returns the iden that starts with prefix. Prints out error and returns None if it doesn't match
exactly one.
'''
idens = [iden for iden, trig in await core.listTriggers()]
| python | {
"resource": ""
} |
q19204 | decode | train | def decode(name, byts, **opts):
'''
Decode the given byts with the named decoder.
If name is a comma separated list of decoders,
loop through and do them all.
Example:
byts = s_encoding.decode('base64',byts)
Note: Decoder names may also be prefixed with +
to *encode* for that name/layer.
'''
for name in name.split(','): | python | {
"resource": ""
} |
q19205 | addFormat | train | def addFormat(name, fn, opts):
'''
Add an additional ingest file format
'''
| python | {
"resource": ""
} |
q19206 | iterdata | train | def iterdata(fd, close_fd=True, **opts):
'''
Iterate through the data provided by a file like object.
Optional parameters may be used to control how the data
is deserialized.
Examples:
The following example show use of the iterdata function.::
with open('foo.csv','rb') as fd:
for row in iterdata(fd, format='csv', encoding='utf8'):
dostuff(row)
Args:
fd (file) : File like object to iterate over.
close_fd (bool) : Default behavior is to close the fd object.
If this is not true, the fd will not be closed.
**opts (dict): Ingest open directive. Causes the data in the fd
to be parsed according to the 'format' key and any
additional arguments.
Yields:
An item to process. The type of the item is dependent on the format
parameters.
'''
| python | {
"resource": ""
} |
q19207 | overlap | train | def overlap(ival0, ival1):
'''
Determine if two interval tuples have overlap.
Args:
iv0 ((int,int)): An interval tuple
iv1 ((int,int)); An interval tuple
Returns:
| python | {
"resource": ""
} |
q19208 | getTempCortex | train | async def getTempCortex(mods=None):
'''
Get a proxy to a cortex backed by a temporary directory.
Args:
mods (list): A list of modules which are loaded into the cortex.
Notes:
The cortex and temporary directory are town down on exit.
This should only be called from synchronous code.
Returns:
Proxy to the cortex.
| python | {
"resource": ""
} |
q19209 | CoreApi.addTrigger | train | async def addTrigger(self, condition, query, info):
'''
Adds a trigger to the cortex
'''
| python | {
"resource": ""
} |
q19210 | CoreApi._trig_auth_check | train | def _trig_auth_check(self, useriden):
''' Check that, as a non-admin, may only manipulate resources created by you. '''
if not self.user.admin and useriden != self.user.iden:
| python | {
"resource": ""
} |
q19211 | CoreApi.delTrigger | train | async def delTrigger(self, iden):
'''
Deletes a trigger from the cortex
'''
trig = self.cell.triggers.get(iden)
| python | {
"resource": ""
} |
q19212 | CoreApi.updateTrigger | train | async def updateTrigger(self, iden, query):
'''
Change an existing trigger's query
'''
trig = self.cell.triggers.get(iden) | python | {
"resource": ""
} |
q19213 | CoreApi.listTriggers | train | async def listTriggers(self):
'''
Lists all the triggers that the current user is authorized to access
'''
trigs = []
for (iden, trig) in self.cell.triggers.list():
useriden = trig['useriden']
if not (self.user.admin or useriden == self.user.iden):
| python | {
"resource": ""
} |
q19214 | CoreApi.addCronJob | train | async def addCronJob(self, query, reqs, incunit=None, incval=1):
'''
Add a cron job to the cortex
A cron job is a persistently-stored item that causes storm queries to be run in the future. The specification
for the times that the queries run can be one-shot or recurring.
Args:
query (str): The storm query to execute in the future
reqs (Union[Dict[str, Union[int, List[int]]], List[Dict[...]]]):
Either a dict of the fixed time fields or a list of such dicts. The keys are in the set ('year',
'month', 'dayofmonth', 'dayofweek', 'hour', 'minute'. The values must be positive integers, except for
the key of 'dayofmonth' in which it may also be a negative integer which represents the number of days
from the end of the month with -1 representing the last day of the month. All values may also be lists
of valid values.
incunit (Optional[str]):
A member of the same set as above, with an additional member 'day'. If is None (default), then the
appointment is one-shot and will not recur.
incval (Union[int, List[int]):
A integer or a list of integers of the number of units
Returns (bytes):
| python | {
"resource": ""
} |
q19215 | CoreApi.delCronJob | train | async def delCronJob(self, iden):
'''
Delete a cron job
Args:
iden (bytes): The iden of the cron job to be deleted
'''
cron = self.cell.agenda.appts.get(iden)
if cron is None:
| python | {
"resource": ""
} |
q19216 | CoreApi.updateCronJob | train | async def updateCronJob(self, iden, query):
'''
Change an existing cron job's query
Args:
iden (bytes): The iden of the cron job to be changed
'''
cron = self.cell.agenda.appts.get(iden)
if cron is None: | python | {
"resource": ""
} |
q19217 | CoreApi.listCronJobs | train | async def listCronJobs(self):
'''
Get information about all the cron jobs accessible to the current user
'''
crons = []
for iden, cron in self.cell.agenda.list():
useriden = cron['useriden']
if not (self.user.admin or useriden == self.user.iden):
| python | {
"resource": ""
} |
q19218 | CoreApi.addNodeTag | train | async def addNodeTag(self, iden, tag, valu=(None, None)):
'''
Add a tag to a node specified by iden.
Args:
iden (str): A hex encoded node BUID.
tag (str): A tag string.
valu (tuple): A time interval tuple or (None, None).
'''
buid = s_common.uhex(iden)
parts = tag.split('.')
self._reqUserAllowed('tag:add', *parts)
async with await self.cell.snap(user=self.user) as | python | {
"resource": ""
} |
q19219 | CoreApi.addNodes | train | async def addNodes(self, nodes):
'''
Add a list of packed nodes to the cortex.
Args:
nodes (list): [ ( (form, valu), {'props':{}, 'tags':{}}), ... ]
Yields:
(tuple): Packed node tuples ((form,valu), {'props': {}, 'tags':{}})
'''
# First check that that user may add each form
done = {}
for node in nodes:
formname = node[0][0]
if done.get(formname):
continue
self._reqUserAllowed('node:add', formname)
| python | {
"resource": ""
} |
q19220 | CoreApi.count | train | async def count(self, text, opts=None):
'''
Count the number of nodes which result from a storm query.
Args:
text (str): Storm query text.
opts (dict): Storm query options.
Returns:
| python | {
"resource": ""
} |
q19221 | CoreApi.eval | train | async def eval(self, text, opts=None):
'''
Evalute a storm query and yield packed nodes.
'''
| python | {
"resource": ""
} |
q19222 | CoreApi.splices | train | async def splices(self, offs, size):
'''
Return the list of splices at the given offset.
'''
count = 0
async for mesg in self.cell.view.layers[0].splices(offs, size):
| python | {
"resource": ""
} |
q19223 | CoreApi.getProvStack | train | async def getProvStack(self, iden: str):
'''
Return the providence stack associated with the given iden.
Args:
iden (str): the iden from splice
Note: the iden appears on each splice entry | python | {
"resource": ""
} |
q19224 | Cortex._initStormCmds | train | def _initStormCmds(self):
'''
Registration for built-in Storm commands.
'''
self.addStormCmd(s_storm.MaxCmd)
self.addStormCmd(s_storm.MinCmd)
self.addStormCmd(s_storm.HelpCmd)
self.addStormCmd(s_storm.IdenCmd)
self.addStormCmd(s_storm.SpinCmd)
self.addStormCmd(s_storm.SudoCmd)
self.addStormCmd(s_storm.UniqCmd)
self.addStormCmd(s_storm.CountCmd)
self.addStormCmd(s_storm.GraphCmd) | python | {
"resource": ""
} |
q19225 | Cortex._initStormLibs | train | def _initStormLibs(self):
'''
Registration for built-in Storm Libraries
'''
self.addStormLib(('str',), | python | {
"resource": ""
} |
q19226 | Cortex._initSplicers | train | def _initSplicers(self):
'''
Registration for splice handlers.
'''
splicers = {
'tag:add': self._onFeedTagAdd,
'tag:del': self._onFeedTagDel,
'node:add': self._onFeedNodeAdd,
'node:del': self._onFeedNodeDel,
| python | {
"resource": ""
} |
q19227 | Cortex._initLayerCtors | train | def _initLayerCtors(self):
'''
Registration for built-in Layer ctors
'''
| python | {
"resource": ""
} |
q19228 | Cortex._initFeedFuncs | train | def _initFeedFuncs(self):
'''
Registration for built-in Cortex feed functions.
'''
self.setFeedFunc('syn.nodes', self._addSynNodes)
| python | {
"resource": ""
} |
q19229 | Cortex._initCortexHttpApi | train | def _initCortexHttpApi(self):
'''
Registration for built-in Cortex httpapi endpoints
'''
self.addHttpApi('/api/v1/storm', s_httpapi.StormV1, {'cell': self})
| python | {
"resource": ""
} |
q19230 | Cortex._calcFormCounts | train | async def _calcFormCounts(self):
'''
Recalculate form counts from scratch.
'''
logger.info('Calculating form counts from scratch.')
self.counts.clear()
nameforms = list(self.model.forms.items())
fairiter = 5
tcount = 0
for i, (name, form) in enumerate(nameforms, 1):
logger.info('Calculating form counts for [%s] [%s/%s]',
name, i, len(nameforms))
count = 0
async for buid, valu in self.view.layers[0].iterFormRows(name):
count += 1
tcount += 1
if count % fairiter == 0:
await asyncio.sleep(0)
# identity check for small integer
| python | {
"resource": ""
} |
q19231 | Cortex.onTagAdd | train | def onTagAdd(self, name, func):
'''
Register a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
# TODO allow | python | {
"resource": ""
} |
q19232 | Cortex.offTagAdd | train | def offTagAdd(self, name, func):
'''
Unregister a callback for tag addition.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
| python | {
"resource": ""
} |
q19233 | Cortex.onTagDel | train | def onTagDel(self, name, func):
'''
Register a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
'''
| python | {
"resource": ""
} |
q19234 | Cortex.offTagDel | train | def offTagDel(self, name, func):
'''
Unregister a callback for tag deletion.
Args:
name (str): The name of the tag or tag glob.
func (function): The callback func(node, tagname, tagval).
| python | {
"resource": ""
} |
q19235 | Cortex.runRuntLift | train | async def runRuntLift(self, full, valu=None, cmpr=None):
'''
Execute a runt lift function.
Args:
full (str): Property to lift by.
valu:
cmpr:
Returns:
bytes, list: Yields bytes, list tuples where the list contains a series of
key/value pairs which are used to construct a Node object.
'''
func = self._runtLiftFuncs.get(full)
if func is None:
| python | {
"resource": ""
} |
q19236 | Cortex.delView | train | async def delView(self, iden):
'''
Delete a cortex view by iden.
'''
if iden == self.iden:
raise s_exc.SynErr(mesg='cannot delete the main view')
view = self.views.pop(iden, None)
if view is None:
| python | {
"resource": ""
} |
q19237 | Cortex.addLayer | train | async def addLayer(self, **info):
'''
Add a Layer to the cortex.
Notes:
The addLayer ``**info`` arg is expected to be shaped like the following::
info = {
'iden': <str>, ( optional iden. default guid() )
'type': <str>, ( optional type. default lmdb )
'owner': <str>, ( optional owner. default root )
'config': {}, # type specific config options.
}
'''
iden = info.pop('iden', None)
if iden is None:
iden = s_common.guid()
| python | {
"resource": ""
} |
q19238 | Cortex.joinTeleLayer | train | async def joinTeleLayer(self, url, indx=None):
'''
Convenience function to join a remote telepath layer
into this cortex and default view.
'''
info = {
'type': 'remote',
'owner': 'root',
'config': {
'url': url
| python | {
"resource": ""
} |
q19239 | Cortex.addStormCmd | train | def addStormCmd(self, ctor):
'''
Add a synapse.lib.storm.Cmd class to the cortex.
| python | {
"resource": ""
} |
q19240 | Cortex._getSynIngestNodes | train | def _getSynIngestNodes(self, item):
'''
Get a list of packed nodes from a ingest definition.
'''
pnodes = []
seen = item.get('seen')
# Track all the ndefs we make so we can make sources
ndefs = []
# Make the form nodes
tags = item.get('tags', {})
forms = item.get('forms', {})
for form, valus in forms.items():
for valu in valus:
ndef = [form, valu]
ndefs.append(ndef)
obj = [ndef, {'tags': tags}]
if seen:
obj[1]['props'] = {'.seen': seen}
pnodes.append(obj)
# Make the packed nodes
nodes = item.get('nodes', ())
for pnode in nodes:
ndefs.append(pnode[0])
pnode[1].setdefault('tags', {})
for tag, valu in tags.items():
# Tag in the packed node has a higher predecence
# than the tag in the whole ingest set of data.
pnode[1]['tags'].setdefault(tag, valu)
if seen:
pnode[1].setdefault('props', {})
pnode[1]['props'].setdefault('.seen', seen)
pnodes.append(pnode)
# Make edges
for srcdef, etyp, destndefs in item.get('edges', ()):
for destndef in destndefs:
ndef = [etyp, [srcdef, destndef]]
ndefs.append(ndef)
obj = [ndef, {}]
if seen:
obj[1]['props'] = {'.seen': seen}
if tags:
obj[1]['tags'] = tags.copy()
pnodes.append(obj)
# Make time based edges
| python | {
"resource": ""
} |
q19241 | Cortex.eval | train | async def eval(self, text, opts=None, user=None):
'''
Evaluate a storm query and yield Nodes only.
'''
if user is None:
user = self.auth.getUserByName('root')
await self.boss.promote('storm', user=user, info={'query': text})
| python | {
"resource": ""
} |
q19242 | Cortex.nodes | train | async def nodes(self, text, opts=None, user=None):
'''
A simple non-streaming way to return a list of nodes.
'''
| python | {
"resource": ""
} |
q19243 | Cortex.getStormQuery | train | def getStormQuery(self, text):
'''
Parse storm query text and return a Query object.
'''
| python | {
"resource": ""
} |
q19244 | Cortex._logStormQuery | train | def _logStormQuery(self, text, user):
'''
Log a storm query.
'''
if self.conf.get('storm:log'):
lvl = self.conf.get('storm:log:level')
| python | {
"resource": ""
} |
q19245 | Cortex.getNodesBy | train | async def getNodesBy(self, full, valu, cmpr='='):
'''
Get nodes by a property value or lift syntax.
Args:
full (str): The full name of a property <form>:<prop>.
valu (obj): A value that the type knows how to lift by.
cmpr (str): The comparison operator you are lifting by.
Some node property types allow special syntax here.
Examples:
# simple lift by property equality
core.getNodesBy('file:bytes:size', 20)
| python | {
"resource": ""
} |
q19246 | Cortex.snap | train | async def snap(self, user=None, view=None):
'''
Return a transaction object for the default view.
Args:
write (bool): Set to True for a write transaction.
Returns:
| python | {
"resource": ""
} |
q19247 | Cortex.loadCoreModule | train | async def loadCoreModule(self, ctor, conf=None):
'''
Load a single cortex module with the given ctor and conf.
Args:
ctor (str): The python module class path
conf (dict):Config dictionary for the module
'''
if conf is None:
conf = {}
modu = self._loadCoreModule(ctor, conf=conf)
try:
await s_coro.ornot(modu.preCoreModule)
except asyncio.CancelledError: # pragma: no cover
| python | {
"resource": ""
} |
q19248 | Cortex.getPropNorm | train | async def getPropNorm(self, prop, valu):
'''
Get the normalized property value based on the Cortex data model.
Args:
prop (str): The property to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchProp: If the prop does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
pobj | python | {
"resource": ""
} |
q19249 | Cortex.getTypeNorm | train | async def getTypeNorm(self, name, valu):
'''
Get the normalized type value based on the Cortex data model.
Args:
name (str): The type to normalize.
valu: The value to normalize.
Returns:
(tuple): A two item tuple, containing the normed value and the info dictionary.
Raises:
s_exc.NoSuchType: If the type does not exist.
s_exc.BadTypeValu: If the value fails to normalize.
'''
tobj | python | {
"resource": ""
} |
q19250 | LmdbLayer.stor | train | async def stor(self, sops, splices=None):
'''
Execute a series of storage operations.
Overrides implementation in layer.py to avoid unnecessary async calls.
'''
for oper in sops:
func = self._stor_funcs.get(oper[0])
if func is None: # pragma: | python | {
"resource": ""
} |
q19251 | LmdbLayer._migrate_db_pre010 | train | def _migrate_db_pre010(self, dbname, newslab):
'''
Check for any pre-010 entries in 'dbname' in my slab and migrate those to the new slab.
Once complete, drop the database from me with the name 'dbname'
Returns (bool): True if a migration occurred, else False
'''
donekey = f'migrdone:{dbname}'
if self.metadict.get(donekey, False):
return
if not self.layrslab.dbexists(dbname):
self.metadict.set(donekey, True)
return False
oldslab = self.layrslab
olddb = oldslab.initdb(dbname)
entries = oldslab.stat(olddb)['entries']
if not entries:
self.metadict.set(donekey, True)
return False
if newslab.dbexists(dbname):
logger.warning('Incomplete migration detected. Dropping | python | {
"resource": ""
} |
q19252 | LmdbLayer.migrateProvPre010 | train | def migrateProvPre010(self, newslab):
'''
Check for any pre-010 provstacks and migrate those to the new slab.
'''
| python | {
"resource": ""
} |
q19253 | LmdbLayer.storPropSet | train | async def storPropSet(self, buid, prop, valu):
'''
Migration-only function
'''
assert self.buidcache.disabled
indx = prop.type.indx(valu)
if indx is not None and len(indx) > MAX_INDEX_LEN:
mesg = 'index bytes are too large'
raise s_exc.BadIndxValu(mesg=mesg, prop=prop, | python | {
"resource": ""
} |
q19254 | openurl | train | async def openurl(url, **opts):
'''
Open a URL to a remote telepath object.
Args:
url (str): A telepath URL.
**opts (dict): Telepath connect options.
Returns:
(synapse.telepath.Proxy): A telepath proxy object.
The telepath proxy may then be used for sync or async calls:
proxy = openurl(url)
value = proxy.getFooThing()
... or ...
proxy = await openurl(url)
valu = await proxy.getFooThing()
... or ...
async with await openurl(url) as proxy:
valu = await proxy.getFooThing()
'''
if url.find('://') == -1:
newurl = alias(url)
if newurl is None:
raise s_exc.BadUrl(f':// not found in [{url}] and no alias found!')
url = newurl
info = s_urlhelp.chopurl(url)
info.update(opts)
host = info.get('host')
port = info.get('port')
auth = None
user = info.get('user')
if user is not None:
passwd = info.get('passwd')
auth = (user, {'passwd': passwd})
scheme = info.get('scheme')
if scheme == 'cell':
# cell:///path/to/celldir:share
# cell://rel/path/to/celldir:share
path = info.get('path')
name = info.get('name', '*')
# support cell://<relpath>/<to>/<cell>
# by detecting host...
host = info.get('host')
if host:
| python | {
"resource": ""
} |
q19255 | Proxy.call | train | async def call(self, methname, *args, **kwargs):
'''
Call a remote method by name.
Args:
methname (str): The name of the remote method.
*args: Arguments to the method call.
**kwargs: Keyword arguments to the method call.
Most use cases will likely use the proxy methods directly:
| python | {
"resource": ""
} |
q19256 | RateLimit.allows | train | def allows(self):
'''
Returns True if the rate limit has not been reached.
Example:
if not rlimit.allows():
rasie RateExceeded()
# ok to go...
'''
tick = time.time()
passed = tick - self.lasttick
| python | {
"resource": ""
} |
q19257 | Layer.disablingBuidCache | train | def disablingBuidCache(self):
'''
Disable and invalidate the layer buid cache for migration
'''
| python | {
"resource": ""
} |
q19258 | getClsNames | train | def getClsNames(item):
'''
Return a list of "fully qualified" class names for an instance.
Example:
for name in getClsNames(foo):
| python | {
"resource": ""
} |
q19259 | getShareInfo | train | def getShareInfo(item):
'''
Get a dictionary of special annotations for a Telepath Proxy.
Args:
item: Item to inspect.
Notes:
This will set the ``_syn_telemeth`` attribute on the item
and the items class, so this data is only computed once.
Returns:
dict: A dictionary of methods requiring special handling by the proxy.
'''
key = f'_syn_sharinfo_{item.__class__.__module__}_{item.__class__.__qualname__}'
info = getattr(item, key, None)
if info is not None:
return info
meths = {}
info = {'meths': meths}
for name in dir(item):
if name.startswith('_'):
continue
attr = getattr(item, name, None)
if not callable(attr):
continue
# We know we can cleanly unwrap these functions
# for asyncgenerator inspection.
wrapped = getattr(attr, '__syn_wrapped__', None)
if wrapped in unwraps:
real = inspect.unwrap(attr)
if inspect.isasyncgenfunction(real): | python | {
"resource": ""
} |
q19260 | CellApi.getHiveKey | train | async def getHiveKey(self, path):
''' Get the value of a key in the cell default hive '''
perm = ('hive:get',) + path
| python | {
"resource": ""
} |
q19261 | CellApi.setHiveKey | train | async def setHiveKey(self, path, value):
''' Set or change the value of a key in the cell default hive '''
perm = ('hive:set',) + path
| python | {
"resource": ""
} |
q19262 | CellApi.popHiveKey | train | async def popHiveKey(self, path):
''' Remove and return the value of a key in the cell default hive '''
perm = ('hive:pop',) + path
| python | {
"resource": ""
} |
q19263 | CellApi.getAuthInfo | train | async def getAuthInfo(self, name):
'''
An admin only API endpoint for getting user info.
'''
item = self._getAuthItem(name)
pack = item.pack()
# translate role guids to names for back compat
| python | {
"resource": ""
} |
q19264 | _asynciostacks | train | def _asynciostacks(*args, **kwargs): # pragma: no cover
'''
A signal handler used to print asyncio task stacks and thread stacks.
'''
print(80 * '*')
print('Asyncio tasks stacks:')
tasks = asyncio.all_tasks(_glob_loop)
for | python | {
"resource": ""
} |
q19265 | sync | train | def sync(coro, timeout=None):
'''
Schedule a coroutine to run on the global loop and return it's result.
Args:
coro (coroutine): The coroutine instance.
Notes:
This | python | {
"resource": ""
} |
q19266 | synchelp | train | def synchelp(f):
'''
The synchelp decorator allows the transparent execution of
a coroutine using the global loop from a thread other than
the event loop. In both use cases, teh actual work is done
by the global event loop.
Examples:
Use as a decorator::
@s_glob.synchelp
async def stuff(x, y):
await dostuff()
Calling the stuff function as regular async code using the standard await syntax::
valu = await stuff(x, y)
| python | {
"resource": ""
} |
q19267 | Cron._parse_weekday | train | def _parse_weekday(val):
''' Try to match a day-of-week abbreviation, then try a day-of-week full name '''
val = val.title()
try:
return list(calendar.day_abbr).index(val)
except ValueError:
| python | {
"resource": ""
} |
q19268 | Cron._parse_incval | train | def _parse_incval(incunit, incval):
''' Parse a non-day increment value. Should be an integer or a comma-separated integer list. '''
try:
retn = [int(val) for val in incval.split(',')]
| python | {
"resource": ""
} |
q19269 | Cron._parse_req | train | def _parse_req(requnit, reqval):
''' Parse a non-day fixed value '''
assert reqval[0] != '='
try:
retn = []
for val in reqval.split(','):
if requnit == 'month':
if reqval[0].isdigit():
retn.append(int(reqval)) # must be a month (1-12)
| python | {
"resource": ""
} |
q19270 | Cron._handle_stat | train | async def _handle_stat(self, core, opts):
''' Prints details about a particular cron job. Not actually a different API call '''
prefix = opts.prefix
crons = await core.listCronJobs()
idens = [cron[0] for cron in crons]
matches = [iden for iden in idens if iden.startswith(prefix)]
if len(matches) == 0:
self.printf('Error: provided iden does not match any valid authorized cron job')
return
elif len(matches) > 1:
self.printf('Error: provided iden matches more than one cron job')
return
iden = matches[0]
cron = [cron[1] for cron in crons if cron[0] == iden][0]
user = cron.get('username') or '<None>'
query = cron.get('query') or '<missing>'
isrecur = 'Yes' if cron.get('recur') else 'No'
startcount = cron.get('startcount') or 0
recs = cron.get('recs', [])
laststart = cron.get('laststarttime')
lastend = cron.get('lastfinishtime')
laststart = 'Never' if laststart is None else self._format_timestamp(laststart)
lastend = 'Never' if lastend is None else self._format_timestamp(lastend)
lastresult = cron.get('lastresult') or '<None>'
self.printf(f'iden: {iden}')
self.printf(f'user: {user}')
self.printf(f'recurring: | python | {
"resource": ""
} |
q19271 | Boss.promote | train | async def promote(self, name, user, info=None):
'''
Promote the currently running task.
'''
task = asyncio.current_task()
synt = getattr(task, '_syn_task', None)
| python | {
"resource": ""
} |
q19272 | Boss.execute | train | async def execute(self, coro, name, user, info=None):
'''
Create a synapse task from the given coroutine.
'''
| python | {
"resource": ""
} |
q19273 | Base.on | train | def on(self, evnt, func, base=None):
'''
Add an base function callback for a specific event with optional filtering. If the function returns a
coroutine, it will be awaited.
Args:
evnt (str): An event name
func (function): A callback function to receive event tufo
Examples:
Add a callback function and fire it:
async def baz(event):
x = event[1].get('x')
y = event[1].get('y')
return x + y
d.on('foo', baz)
# this fire triggers baz...
| python | {
"resource": ""
} |
q19274 | Base.off | train | def off(self, evnt, func):
'''
Remove a previously registered event handler function.
Example:
base.off( 'foo', onFooFunc )
'''
funcs = self._syn_funcs.get(evnt)
if funcs is None: | python | {
"resource": ""
} |
q19275 | Base.fire | train | async def fire(self, evtname, **info):
'''
Fire the given event name on the Base.
Returns a list of the return values of each callback.
Example:
for ret in d.fire('woot',foo='asdf'):
| python | {
"resource": ""
} |
q19276 | Base.dist | train | async def dist(self, mesg):
'''
Distribute an existing event tuple.
Args:
mesg ((str,dict)): An event tuple.
Example:
await base.dist( ('foo',{'bar':'baz'}) )
'''
if self.isfini:
return ()
ret = []
for func in self._syn_funcs.get(mesg[0], ()):
try:
ret.append(await s_coro.ornot(func, mesg))
except asyncio.CancelledError:
raise
except Exception: | python | {
"resource": ""
} |
q19277 | Base.onWith | train | def onWith(self, evnt, func):
'''
A context manager which can be used to add a callback and remove it when
using a ``with`` statement.
Args:
evnt (str): An event name
func (function): A callback function to receive event tufo
'''
| python | {
"resource": ""
} |
q19278 | Base.schedCoro | train | def schedCoro(self, coro):
'''
Schedules a free-running coroutine to run on this base's event loop. Kills the coroutine if Base is fini'd.
It does not pend on coroutine completion.
Precondition:
This function is *not* threadsafe and must be run on the Base's event loop
Returns:
asyncio.Task: An asyncio.Task object.
'''
import synapse.lib.provenance as s_provenance # avoid import cycle
if __debug__:
assert s_coro.iscoro(coro)
import synapse.lib.threads as s_threads # avoid import cycle
assert s_threads.iden() == self.tid
task = self.loop.create_task(coro)
# In rare cases, (Like this function being triggered from call_soon_threadsafe), there's no task context | python | {
"resource": ""
} |
q19279 | Base.schedCoroSafePend | train | def schedCoroSafePend(self, coro):
'''
Schedules a coroutine to run as soon as possible on the same event loop that this Base is running on
Note:
This method may *not* be run inside an event loop
'''
if __debug__:
| python | {
"resource": ""
} |
q19280 | Base.waiter | train | def waiter(self, count, *names):
'''
Construct and return a new Waiter for events on this base.
Example:
# wait up to 3 seconds for 10 foo:bar events...
waiter = base.waiter(10,'foo:bar')
# .. fire thread that will cause foo:bar events
events = waiter.wait(timeout=3)
if events == None:
# handle the timout case...
for event in | python | {
"resource": ""
} |
q19281 | Waiter.wait | train | async def wait(self, timeout=None):
'''
Wait for the required number of events and return them or None on timeout.
Example:
evnts = waiter.wait(timeout=30)
if evnts == None:
handleTimedOut()
return
for evnt in evnts:
doStuff(evnt)
| python | {
"resource": ""
} |
q19282 | guid | train | def guid(valu=None):
'''
Get a 16 byte guid value.
By default, this is a random guid value.
Args:
valu: Object used to construct the guid valu from. This must be able
to be msgpack'd.
Returns:
str: 32 character, lowercase ascii string.
'''
| python | {
"resource": ""
} |
q19283 | buid | train | def buid(valu=None):
'''
A binary GUID like sequence of 32 bytes.
Args:
valu (object): Optional, if provided, the hash of the msgpack
encoded form of the object is returned. This can be used to
create stable buids.
Notes:
By default, this returns a random | python | {
"resource": ""
} |
q19284 | lockfile | train | def lockfile(path):
'''
A file lock with-block helper.
Args:
path (str): A path to a lock file.
Examples:
Get the lock on a file and dostuff while having the lock::
path = '/hehe/haha.lock'
with lockfile(path):
| python | {
"resource": ""
} |
q19285 | getexcfo | train | def getexcfo(e):
'''
Get an err tufo from an exception.
Args:
e (Exception): An Exception (or Exception subclass).
Notes:
This can be called outside of the context of an exception handler,
however details such as file, line, function name and source may be
missing.
Returns:
((str, dict)):
'''
tb = sys.exc_info()[2]
tbinfo = traceback.extract_tb(tb)
path, line, name, src = '', '', '', None
if tbinfo:
path, line, name, sorc = tbinfo[-1]
retd | python | {
"resource": ""
} |
q19286 | excinfo | train | def excinfo(e):
'''
Populate err,errmsg,errtrace info from exc.
'''
tb = sys.exc_info()[2]
path, line, name, sorc = traceback.extract_tb(tb)[-1]
ret = {
'err': e.__class__.__name__,
'errmsg': str(e),
'errfile': | python | {
"resource": ""
} |
q19287 | chunks | train | def chunks(item, size):
'''
Divide an iterable into chunks.
Args:
item: Item to slice
size (int): Maximum chunk size.
Notes:
This supports Generator objects and objects which support calling
the __getitem__() method with a slice object.
Yields:
Slices of the item containing up to "size" number of items.
'''
# use islice if it's a generator
if isinstance(item, types.GeneratorType):
while True:
chunk = tuple(itertools.islice(item, size))
if not chunk:
return
| python | {
"resource": ""
} |
q19288 | iterfd | train | def iterfd(fd, size=10000000):
'''
Generator which yields bytes from a file descriptor.
Args:
fd (file): A file-like object to read bytes from.
size (int): Size, in bytes, of the number of bytes to read from the
fd at a given time.
Notes:
If the first read call on the file descriptor is a empty | python | {
"resource": ""
} |
q19289 | firethread | train | def firethread(f):
'''
A decorator for making a function fire a thread.
'''
@functools.wraps(f)
def callmeth(*args, **kwargs):
thr | python | {
"resource": ""
} |
q19290 | setlogging | train | def setlogging(mlogger, defval=None):
'''
Configure synapse logging.
Args:
mlogger (logging.Logger): Reference to a logging.Logger()
defval (str): Default log level
Notes:
This calls logging.basicConfig and should only be called once per process.
Returns:
None
'''
log_level = os.getenv('SYN_LOG_LEVEL',
defval)
if log_level: # pragma: no cover
log_level = log_level.upper()
| python | {
"resource": ""
} |
q19291 | result | train | def result(retn):
'''
Return a value or raise an exception from a retn tuple.
'''
ok, valu = retn
if ok:
| python | {
"resource": ""
} |
q19292 | config | train | def config(conf, confdefs):
'''
Initialize a config dict using the given confdef tuples.
'''
conf = conf.copy()
# for now just populate defval | python | {
"resource": ""
} |
q19293 | Prop.getDelOps | train | def getDelOps(self, buid):
'''
Get a list of storage operations to delete this property from the buid.
Args:
buid (bytes): The node buid.
Returns:
(tuple): The storage operations | python | {
"resource": ""
} |
q19294 | Form.getLiftOps | train | def getLiftOps(self, valu, cmpr='='):
'''
Get a set of lift operations for use with an Xact.
'''
if valu is None:
iops = (('pref', b''),)
return (
('indx', ('byprop', self.pref, iops)),
)
# TODO: In an ideal world, this would get smashed down into the self.type.getLiftOps
# but since doing so breaks existing types, and fixing those could cause a cascade
# of fun failures, we'll put this off until another flag day
if cmpr == '~=':
return (
| python | {
"resource": ""
} |
q19295 | Model.addBaseType | train | def addBaseType(self, item):
'''
Add a Type instance to the data model.
'''
ctor = '.'.join([item.__class__.__module__, item.__class__.__qualname__])
| python | {
"resource": ""
} |
q19296 | Daemon.share | train | def share(self, name, item):
'''
Share an object via the telepath protocol.
Args:
name (str): Name of the shared object
item (object): The object to share over telepath.
'''
try:
| python | {
"resource": ""
} |
q19297 | _dayofmonth | train | def _dayofmonth(hardday, month, year):
'''
Returns a valid day of the month given the desired value.
Negative values are interpreted as offset backwards from the last day of the month, with -1 representing the
last day of the month. Out-of-range values are clamped to the first or last day of the month.
'''
newday = hardday | python | {
"resource": ""
} |
q19298 | _Appt.updateNexttime | train | def updateNexttime(self, now):
'''
Find the next time this appointment should be scheduled.
Delete any nonrecurring record that just happened.
'''
if self._recidxnexttime is not None and not self.recur:
del self.recs[self._recidxnexttime]
while self.recs and self.nexttime <= now:
lowtime = 999999999999.9
# Find the lowest next time of all of our recs (backwards, so we can delete)
for i in range(len(self.recs) - 1, -1, -1):
rec = self.recs[i]
nexttime = rec.nexttime(self.nexttime)
if nexttime == 0.0:
# We blew by and missed a fixed-year appointment, either due to clock shenanigans, this query going
# really long, or the initial requirement being in the past
logger.warning(f'Missed an appointment: {rec}')
| python | {
"resource": ""
} |
q19299 | Agenda.enable | train | async def enable(self):
'''
Enable cron jobs to start running, start the scheduler loop
Go through all the appointments, making sure the query is valid, and remove the ones that aren't. (We can't
evaluate queries until enabled because not all the modules are loaded yet.)
'''
if self.enabled:
return
to_delete = []
for iden, appt in self.appts.items():
try:
self.core.getStormQuery(appt.query)
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.