_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q264700 | BufferWalker._fill | validation | def _fill(self, size):
"""fills the internal buffer from the source iterator"""
try:
for i in range(size):
self.buffer.append(self.source.next())
except StopIteration:
self.buffer.append((EndOfFile, EndOfFile))
self.len = len(self.buffer) | python | {
"resource": ""
} |
q264701 | BufferWalker.next | validation | def next(self):
"""Advances to and returns the next token or returns EndOfFile"""
self.index += 1
t = self.peek()
if not self.depth:
self._cut()
return t | python | {
"resource": ""
} |
q264702 | main | validation | def main(world_cls, referee_cls, gui_cls, gui_actor_cls, ai_actor_cls,
theater_cls=PygletTheater, default_host=DEFAULT_HOST,
default_port=DEFAULT_PORT, argv=None):
"""
Run a game being developed with the kxg game engine.
Usage:
{exe_name} sandbox [<num_ais>] [-v...]
{exe_name} client [--host HOST] [--port PORT] [-v...]
{exe_name} server <num_guis> [<num_ais>] [--host HOST] [--port PORT] [-v...]
{exe_name} debug <num_guis> [<num_ais>] [--host HOST] [--port PORT] [-v...]
{exe_name} --help
Commands:
sandbox
Play a single-player game with the specified number of AIs. None of
the multiplayer machinery will be used.
client
Launch a client that will try to connect to a server on the given host
and port. Once it connects and the game starts, the client will allow
you to play the game against any other connected clients.
server
Launch a server that will manage a game between the given number of
human and AI players. The human players must connect using this
command's client mode.
debug
Debug a multiplayer game locally. This command launches a server and
the given number of clients all in different processes, and configures
the logging system such that the output from each process can be easily
distinguished.
Arguments:
<num_guis>
The number of human players that will be playing the game. Only needed
by commands that will launch some sort of multiplayer server.
<num_ais>
The number of AI players that will be playing the game. Only needed by
commands that will launch single-player games or multiplayer servers.
Options:
-x --host HOST [default: {default_host}]
The address of the machine running the server. Must be accessible from
the machines running the clients.
-p --port PORT [default: {default_port}]
The port that the server should listen on. Don't specify a value less
than 1024 unless the server is running with root permissions.
-v --verbose
Have the game engine log more information about what it's doing. You
can specify this option several times to get more and more information.
This command is provided so that you can start writing your game with the least
possible amount of boilerplate code. However, the clients and servers provided
by this command are not capable of running a production game. Once you have
written your game and want to give it a polished set of menus and options,
you'll have to write new Stage subclasses encapsulating that logic and you'll
have to call those stages yourself by interacting more directly with the
Theater class. The online documentation has more information on this process.
"""
import sys, os, docopt, nonstdlib
exe_name = os.path.basename(sys.argv[0])
usage = main.__doc__.format(**locals()).strip()
args = docopt.docopt(usage, argv or sys.argv[1:])
num_guis = int(args['<num_guis>'] or 1)
num_ais = int(args['<num_ais>'] or 0)
host, port = args['--host'], int(args['--port'])
logging.basicConfig(
format='%(levelname)s: %(name)s: %(message)s',
level=nonstdlib.verbosity(args['--verbose']),
)
# Use the given game objects and command line arguments to play a game!
if args['debug']:
print("""\
****************************** KNOWN BUG WARNING ******************************
In debug mode, every message produced by the logging system gets printed twice.
I know vaguely why this is happening, but as of yet I've not been able to fix
it. In the mean time, don't let this confuse you!
*******************************************************************************""")
game = MultiplayerDebugger(
world_cls, referee_cls, gui_cls, gui_actor_cls, num_guis,
ai_actor_cls, num_ais, theater_cls, host, port)
else:
game = theater_cls()
ai_actors = [ai_actor_cls() for i in range(num_ais)]
if args['sandbox']:
game.gui = gui_cls()
game.initial_stage = UniplayerGameStage(
world_cls(), referee_cls(), gui_actor_cls(), ai_actors)
game.initial_stage.successor = PostgameSplashStage()
if args['client']:
game.gui = gui_cls()
game.initial_stage = ClientConnectionStage(
world_cls(), gui_actor_cls(), host, port)
if args['server']:
game.initial_stage = ServerConnectionStage(
world_cls(), referee_cls(), num_guis, ai_actors,
host, port)
game.play() | python | {
"resource": ""
} |
q264703 | ProcessPool._run_supervisor | validation | def _run_supervisor(self):
"""
Poll the queues that the worker can use to communicate with the
supervisor, until all the workers are done and all the queues are
empty. Handle messages as they appear.
"""
import time
still_supervising = lambda: (
multiprocessing.active_children()
or not self.log_queue.empty()
or not self.exception_queue.empty())
try:
while still_supervising():
# When a log message is received, make a logger with the same
# name in this process and use it to re-log the message. It
# will get handled in this process.
try:
record = self.log_queue.get_nowait()
logger = logging.getLogger(record.name)
logger.handle(record)
except queue.Empty:
pass
# When an exception is received, immediately re-raise it.
try:
exception = self.exception_queue.get_nowait()
except queue.Empty:
pass
else:
raise exception
# Sleep for a little bit, and make sure that the workers haven't
# outlived their time limit.
time.sleep(1/self.frame_rate)
self.elapsed_time += 1/self.frame_rate
if self.time_limit and self.elapsed_time > self.time_limit:
raise RuntimeError("timeout")
# Make sure the workers don't outlive the supervisor, no matter how the
# polling loop ended (e.g. normal execution or an exception).
finally:
for process in multiprocessing.active_children():
process.terminate() | python | {
"resource": ""
} |
q264704 | JSONField.field_type | validation | def field_type(self):
"""Return database field type."""
if not self.model:
return 'JSON'
database = self.model._meta.database
if isinstance(database, Proxy):
database = database.obj
if Json and isinstance(database, PostgresqlDatabase):
return 'JSON'
return 'TEXT' | python | {
"resource": ""
} |
q264705 | JSONField.python_value | validation | def python_value(self, value):
"""Parse value from database."""
if self.field_type == 'TEXT' and isinstance(value, str):
return self.loads(value)
return value | python | {
"resource": ""
} |
q264706 | AFSAPI.get_fsapi_endpoint | validation | def get_fsapi_endpoint(self):
"""Parse the fsapi endpoint from the device url."""
endpoint = yield from self.__session.get(self.fsapi_device_url, timeout = self.timeout)
text = yield from endpoint.text(encoding='utf-8')
doc = objectify.fromstring(text)
return doc.webfsapi.text | python | {
"resource": ""
} |
q264707 | AFSAPI.create_session | validation | def create_session(self):
"""Create a session on the frontier silicon device."""
req_url = '%s/%s' % (self.__webfsapi, 'CREATE_SESSION')
sid = yield from self.__session.get(req_url, params=dict(pin=self.pin),
timeout = self.timeout)
text = yield from sid.text(encoding='utf-8')
doc = objectify.fromstring(text)
return doc.sessionId.text | python | {
"resource": ""
} |
q264708 | AFSAPI.call | validation | def call(self, path, extra=None):
"""Execute a frontier silicon API call."""
try:
if not self.__webfsapi:
self.__webfsapi = yield from self.get_fsapi_endpoint()
if not self.sid:
self.sid = yield from self.create_session()
if not isinstance(extra, dict):
extra = dict()
params = dict(pin=self.pin, sid=self.sid)
params.update(**extra)
req_url = ('%s/%s' % (self.__webfsapi, path))
result = yield from self.__session.get(req_url, params=params,
timeout = self.timeout)
if result.status == 200:
text = yield from result.text(encoding='utf-8')
else:
self.sid = yield from self.create_session()
params = dict(pin=self.pin, sid=self.sid)
params.update(**extra)
result = yield from self.__session.get(req_url, params=params,
timeout = self.timeout)
text = yield from result.text(encoding='utf-8')
return objectify.fromstring(text)
except Exception as e:
logging.info('AFSAPI Exception: ' +traceback.format_exc())
return None | python | {
"resource": ""
} |
q264709 | AFSAPI.handle_set | validation | def handle_set(self, item, value):
"""Helper method for setting a value by using the fsapi API."""
doc = yield from self.call('SET/{}'.format(item), dict(value=value))
if doc is None:
return None
return doc.status == 'FS_OK' | python | {
"resource": ""
} |
q264710 | AFSAPI.handle_text | validation | def handle_text(self, item):
"""Helper method for fetching a text value."""
doc = yield from self.handle_get(item)
if doc is None:
return None
return doc.value.c8_array.text or None | python | {
"resource": ""
} |
q264711 | AFSAPI.handle_int | validation | def handle_int(self, item):
"""Helper method for fetching a integer value."""
doc = yield from self.handle_get(item)
if doc is None:
return None
return int(doc.value.u8.text) or None | python | {
"resource": ""
} |
q264712 | AFSAPI.handle_long | validation | def handle_long(self, item):
"""Helper method for fetching a long value. Result is integer."""
doc = yield from self.handle_get(item)
if doc is None:
return None
return int(doc.value.u32.text) or None | python | {
"resource": ""
} |
q264713 | AFSAPI.get_power | validation | def get_power(self):
"""Check if the device is on."""
power = (yield from self.handle_int(self.API.get('power')))
return bool(power) | python | {
"resource": ""
} |
q264714 | AFSAPI.set_power | validation | def set_power(self, value=False):
"""Power on or off the device."""
power = (yield from self.handle_set(
self.API.get('power'), int(value)))
return bool(power) | python | {
"resource": ""
} |
q264715 | AFSAPI.get_modes | validation | def get_modes(self):
"""Get the modes supported by this device."""
if not self.__modes:
self.__modes = yield from self.handle_list(
self.API.get('valid_modes'))
return self.__modes | python | {
"resource": ""
} |
q264716 | AFSAPI.get_volume_steps | validation | def get_volume_steps(self):
"""Read the maximum volume level of the device."""
if not self.__volume_steps:
self.__volume_steps = yield from self.handle_int(
self.API.get('volume_steps'))
return self.__volume_steps | python | {
"resource": ""
} |
q264717 | AFSAPI.get_mute | validation | def get_mute(self):
"""Check if the device is muted."""
mute = (yield from self.handle_int(self.API.get('mute')))
return bool(mute) | python | {
"resource": ""
} |
q264718 | AFSAPI.set_mute | validation | def set_mute(self, value=False):
"""Mute or unmute the device."""
mute = (yield from self.handle_set(self.API.get('mute'), int(value)))
return bool(mute) | python | {
"resource": ""
} |
q264719 | AFSAPI.get_play_status | validation | def get_play_status(self):
"""Get the play status of the device."""
status = yield from self.handle_int(self.API.get('status'))
return self.PLAY_STATES.get(status) | python | {
"resource": ""
} |
q264720 | AFSAPI.get_equalisers | validation | def get_equalisers(self):
"""Get the equaliser modes supported by this device."""
if not self.__equalisers:
self.__equalisers = yield from self.handle_list(
self.API.get('equalisers'))
return self.__equalisers | python | {
"resource": ""
} |
q264721 | AFSAPI.set_sleep | validation | def set_sleep(self, value=False):
"""Set device sleep timer."""
return (yield from self.handle_set(self.API.get('sleep'), int(value))) | python | {
"resource": ""
} |
q264722 | BitAwareByteArray._set_range | validation | def _set_range(self, start, stop, value, value_len):
"""
Assumes that start and stop are already in 'buffer' coordinates. value is a byte iterable.
value_len is fractional.
"""
assert stop >= start and value_len >= 0
range_len = stop - start
if range_len < value_len:
self._insert_zeros(stop, stop + value_len - range_len)
self._copy_to_range(start, value, value_len)
elif range_len > value_len:
self._del_range(stop - (range_len - value_len), stop)
self._copy_to_range(start, value, value_len)
else:
self._copy_to_range(start, value, value_len) | python | {
"resource": ""
} |
q264723 | GenomeVCFLine._parse_genotype | validation | def _parse_genotype(self, vcf_fields):
"""Parse genotype from VCF line data"""
format_col = vcf_fields[8].split(':')
genome_data = vcf_fields[9].split(':')
try:
gt_idx = format_col.index('GT')
except ValueError:
return []
return [int(x) for x in re.split(r'[\|/]', genome_data[gt_idx]) if
x != '.'] | python | {
"resource": ""
} |
q264724 | IRField.toIndex | validation | def toIndex(self, value):
'''
toIndex - An optional method which will return the value prepped for index.
By default, "toStorage" will be called. If you provide "hashIndex=True" on the constructor,
the field will be md5summed for indexing purposes. This is useful for large strings, etc.
'''
if self._isIrNull(value):
ret = IR_NULL_STR
else:
ret = self._toIndex(value)
if self.isIndexHashed is False:
return ret
return md5(tobytes(ret)).hexdigest() | python | {
"resource": ""
} |
q264725 | IRField.copy | validation | def copy(self):
'''
copy - Create a copy of this IRField.
Each subclass should implement this, as you'll need to pass in the args to constructor.
@return <IRField (or subclass)> - Another IRField that has all the same values as this one.
'''
return self.__class__(name=self.name, valueType=self.valueType, defaultValue=self.defaultValue, hashIndex=self.hashIndex) | python | {
"resource": ""
} |
q264726 | ForeignLinkData.objHasUnsavedChanges | validation | def objHasUnsavedChanges(self):
'''
objHasUnsavedChanges - Check if any object has unsaved changes, cascading.
'''
if not self.obj:
return False
return self.obj.hasUnsavedChanges(cascadeObjects=True) | python | {
"resource": ""
} |
q264727 | assert_json_type | validation | def assert_json_type(value: JsonValue, expected_type: JsonCheckType) -> None:
"""Check that a value has a certain JSON type.
Raise TypeError if the type does not match.
Supported types: str, int, float, bool, list, dict, and None.
float will match any number, int will only match numbers without
fractional part.
The special type JList(x) will match a list value where each
item is of type x:
>>> assert_json_type([1, 2, 3], JList(int))
"""
def type_name(t: Union[JsonCheckType, Type[None]]) -> str:
if t is None:
return "None"
if isinstance(t, JList):
return "list"
return t.__name__
if expected_type is None:
if value is None:
return
elif expected_type == float:
if isinstance(value, float) or isinstance(value, int):
return
elif expected_type in [str, int, bool, list, dict]:
if isinstance(value, expected_type): # type: ignore
return
elif isinstance(expected_type, JList):
if isinstance(value, list):
for v in value:
assert_json_type(v, expected_type.value_type)
return
else:
raise TypeError("unsupported type")
raise TypeError("wrong JSON type {} != {}".format(
type_name(expected_type), type_name(type(value)))) | python | {
"resource": ""
} |
q264728 | composite.load | validation | def load(cls, fh):
"""
Load json or yaml data from file handle.
Args:
fh (file): File handle to load from.
Examlple:
>>> with open('data.json', 'r') as json:
>>> jsdata = composite.load(json)
>>>
>>> with open('data.yml', 'r') as yml:
>>> ymldata = composite.load(yml)
"""
dat = fh.read()
try:
ret = cls.from_json(dat)
except:
ret = cls.from_yaml(dat)
return ret | python | {
"resource": ""
} |
q264729 | composite.from_json | validation | def from_json(cls, fh):
"""
Load json from file handle.
Args:
fh (file): File handle to load from.
Examlple:
>>> with open('data.json', 'r') as json:
>>> data = composite.load(json)
"""
if isinstance(fh, str):
return cls(json.loads(fh))
else:
return cls(json.load(fh)) | python | {
"resource": ""
} |
q264730 | composite.intersection | validation | def intersection(self, other, recursive=True):
"""
Recursively compute intersection of data. For dictionaries, items
for specific keys will be reduced to unique items. For lists, items
will be reduced to unique items. This method is meant to be analogous
to set.intersection for composite objects.
Args:
other (composite): Other composite object to intersect with.
recursive (bool): Whether or not to perform the operation recursively,
for all nested composite objects.
"""
if not isinstance(other, composite):
raise AssertionError('Cannot intersect composite and {} types'.format(type(other)))
if self.meta_type != other.meta_type:
return composite({})
if self.meta_type == 'list':
keep = []
for item in self._list:
if item in other._list:
if recursive and isinstance(item, composite):
keep.extend(item.intersection(other.index(item), recursive=True))
else:
keep.append(item)
return composite(keep)
elif self.meta_type == 'dict':
keep = {}
for key in self._dict:
item = self._dict[key]
if key in other._dict:
if recursive and \
isinstance(item, composite) and \
isinstance(other.get(key), composite):
keep[key] = item.intersection(other.get(key), recursive=True)
elif item == other[key]:
keep[key] = item
return composite(keep)
return | python | {
"resource": ""
} |
q264731 | composite.union | validation | def union(self, other, recursive=True, overwrite=False):
"""
Recursively compute union of data. For dictionaries, items
for specific keys will be combined into a list, depending on the
status of the overwrite= parameter. For lists, items will be appended
and reduced to unique items. This method is meant to be analogous
to set.union for composite objects.
Args:
other (composite): Other composite object to union with.
recursive (bool): Whether or not to perform the operation recursively,
for all nested composite objects.
overwrite (bool): Whether or not to overwrite entries with the same
key in a nested dictionary.
"""
if not isinstance(other, composite):
raise AssertionError('Cannot union composite and {} types'.format(type(other)))
if self.meta_type != other.meta_type:
return composite([self, other])
if self.meta_type == 'list':
keep = []
for item in self._list:
keep.append(item)
for item in other._list:
if item not in self._list:
keep.append(item)
return composite(keep)
elif self.meta_type == 'dict':
keep = {}
for key in list(set(list(self._dict.keys()) + list(other._dict.keys()))):
left = self._dict.get(key)
right = other._dict.get(key)
if recursive and \
isinstance(left, composite) and \
isinstance(right, composite):
keep[key] = left.union(right, recursive=recursive, overwrite=overwrite)
elif left == right:
keep[key] = left
elif left is None:
keep[key] = right
elif right is None:
keep[key] = left
elif overwrite:
keep[key] = right
else:
keep[key] = composite([left, right])
return composite(keep)
return | python | {
"resource": ""
} |
q264732 | composite.append | validation | def append(self, item):
"""
Append to object, if object is list.
"""
if self.meta_type == 'dict':
raise AssertionError('Cannot append to object of `dict` base type!')
if self.meta_type == 'list':
self._list.append(item)
return | python | {
"resource": ""
} |
q264733 | composite.extend | validation | def extend(self, item):
"""
Extend list from object, if object is list.
"""
if self.meta_type == 'dict':
raise AssertionError('Cannot extend to object of `dict` base type!')
if self.meta_type == 'list':
self._list.extend(item)
return | python | {
"resource": ""
} |
q264734 | composite.write_json | validation | def write_json(self, fh, pretty=True):
"""
Write composite object to file handle in JSON format.
Args:
fh (file): File handle to write to.
pretty (bool): Sort keys and indent in output.
"""
sjson = json.JSONEncoder().encode(self.json())
if pretty:
json.dump(json.loads(sjson), fh, sort_keys=True, indent=4)
else:
json.dump(json.loads(sjson), fh)
return | python | {
"resource": ""
} |
q264735 | filetree.filelist | validation | def filelist(self):
"""
Return list of files in filetree.
"""
if len(self._filelist) == 0:
for item in self._data:
if isinstance(self._data[item], filetree):
self._filelist.extend(self._data[item].filelist())
else:
self._filelist.append(self._data[item])
return self._filelist | python | {
"resource": ""
} |
q264736 | filetree.prune | validation | def prune(self, regex=r".*"):
"""
Prune leaves of filetree according to specified
regular expression.
Args:
regex (str): Regular expression to use in pruning tree.
"""
return filetree(self.root, ignore=self.ignore, regex=regex) | python | {
"resource": ""
} |
q264737 | Reference.deref | validation | def deref(self, ctx):
"""
Returns the value this reference is pointing to. This method uses 'ctx' to resolve the reference and return
the value this reference references.
If the call was already made, it returns a cached result.
It also makes sure there's no cyclic reference, and if so raises CyclicReferenceError.
"""
if self in ctx.call_nodes:
raise CyclicReferenceError(ctx, self)
if self in ctx.cached_results:
return ctx.cached_results[self]
try:
ctx.call_nodes.add(self)
ctx.call_stack.append(self)
result = self.evaluate(ctx)
ctx.cached_results[self] = result
return result
except:
if ctx.exception_call_stack is None:
ctx.exception_call_stack = list(ctx.call_stack)
raise
finally:
ctx.call_stack.pop()
ctx.call_nodes.remove(self) | python | {
"resource": ""
} |
q264738 | IRQueryableList.delete | validation | def delete(self):
'''
delete - Delete all objects in this list.
@return <int> - Number of objects deleted
'''
if len(self) == 0:
return 0
mdl = self.getModel()
return mdl.deleter.deleteMultiple(self) | python | {
"resource": ""
} |
q264739 | IRQueryableList.save | validation | def save(self):
'''
save - Save all objects in this list
'''
if len(self) == 0:
return []
mdl = self.getModel()
return mdl.saver.save(self) | python | {
"resource": ""
} |
q264740 | IRQueryableList.reload | validation | def reload(self):
'''
reload - Reload all objects in this list.
Updates in-place. To just fetch all these objects again, use "refetch"
@return - List (same order as current objects) of either exception (KeyError) if operation failed,
or a dict of fields changed -> (old, new)
'''
if len(self) == 0:
return []
ret = []
for obj in self:
res = None
try:
res = obj.reload()
except Exception as e:
res = e
ret.append(res)
return ret | python | {
"resource": ""
} |
q264741 | IRQueryableList.refetch | validation | def refetch(self):
'''
refetch - Fetch a fresh copy of all items in this list.
Returns a new list. To update in-place, use "reload".
@return IRQueryableList<IndexedRedisModel> - List of fetched items
'''
if len(self) == 0:
return IRQueryableList()
mdl = self.getModel()
pks = [item._id for item in self if item._id]
return mdl.objects.getMultiple(pks) | python | {
"resource": ""
} |
q264742 | Blox.render | validation | def render(self, *args, **kwargs):
'''Renders as a str'''
render_to = StringIO()
self.output(render_to, *args, **kwargs)
return render_to.getvalue() | python | {
"resource": ""
} |
q264743 | AbstractTag.start_tag | validation | def start_tag(self):
'''Returns the elements HTML start tag'''
direct_attributes = (attribute.render(self) for attribute in self.render_attributes)
attributes = ()
if hasattr(self, '_attributes'):
attributes = ('{0}="{1}"'.format(key, value)
for key, value in self.attributes.items() if value)
rendered_attributes = " ".join(filter(bool, chain(direct_attributes, attributes)))
return '<{0}{1}{2}{3}>'.format(self.tag, ' ' if rendered_attributes else '',
rendered_attributes, ' /' if self.tag_self_closes else "") | python | {
"resource": ""
} |
q264744 | safe_repr | validation | def safe_repr(obj):
"""Returns a repr of an object and falls back to a minimal representation of type and ID if the call to repr raised
an error.
:param obj: object to safe repr
:returns: repr string or '(type<id> repr error)' string
:rtype: str
"""
try:
obj_repr = repr(obj)
except:
obj_repr = "({0}<{1}> repr error)".format(type(obj), id(obj))
return obj_repr | python | {
"resource": ""
} |
q264745 | match_to_clinvar | validation | def match_to_clinvar(genome_file, clin_file):
"""
Match a genome VCF to variants in the ClinVar VCF file
Acts as a generator, yielding tuples of:
(ClinVarVCFLine, ClinVarAllele, zygosity)
'zygosity' is a string and corresponds to the genome's zygosity for that
ClinVarAllele. It can be either: 'Het' (heterozygous), 'Hom' (homozygous),
or 'Hem' (hemizygous, e.g. X chromosome in XY individuals).
"""
clin_curr_line = _next_line(clin_file)
genome_curr_line = _next_line(genome_file)
# Ignores all the lines that start with a hashtag
while clin_curr_line.startswith('#'):
clin_curr_line = _next_line(clin_file)
while genome_curr_line.startswith('#'):
genome_curr_line = _next_line(genome_file)
# Advance through both files simultaneously to find matches
while clin_curr_line and genome_curr_line:
# Advance a file when positions aren't equal.
clin_curr_pos = VCFLine.get_pos(clin_curr_line)
genome_curr_pos = VCFLine.get_pos(genome_curr_line)
try:
if clin_curr_pos['chrom'] > genome_curr_pos['chrom']:
genome_curr_line = _next_line(genome_file)
continue
elif clin_curr_pos['chrom'] < genome_curr_pos['chrom']:
clin_curr_line = _next_line(clin_file)
continue
if clin_curr_pos['pos'] > genome_curr_pos['pos']:
genome_curr_line = _next_line(genome_file)
continue
elif clin_curr_pos['pos'] < genome_curr_pos['pos']:
clin_curr_line = _next_line(clin_file)
continue
except StopIteration:
break
# If we get here, start positions match.
# Look for allele matching.
genome_vcf_line = GenomeVCFLine(vcf_line=genome_curr_line,
skip_info=True)
# We can skip if genome has no allele information for this point.
if not genome_vcf_line.genotype_allele_indexes:
genome_curr_line = _next_line(genome_file)
continue
# Match only if ClinVar and Genome ref_alleles match.
clinvar_vcf_line = ClinVarVCFLine(vcf_line=clin_curr_line)
if not genome_vcf_line.ref_allele == clinvar_vcf_line.ref_allele:
try:
genome_curr_line = _next_line(genome_file)
clin_curr_line = _next_line(clin_file)
continue
except StopIteration:
break
# Determine genome alleles and zygosity. Zygosity is assumed to be one
# of: heterozygous, homozygous, or hemizygous.
genotype_allele_indexes = genome_vcf_line.genotype_allele_indexes
genome_alleles = [genome_vcf_line.alleles[x] for
x in genotype_allele_indexes]
if len(genome_alleles) == 1:
zygosity = 'Hem'
elif len(genome_alleles) == 2:
if genome_alleles[0].sequence == genome_alleles[1].sequence:
zygosity = 'Hom'
genome_alleles = [genome_alleles[0]]
else:
zygosity = 'Het'
else:
raise ValueError('This code only expects to work on genomes ' +
'with one or two alleles called at each ' +
'location. The following line violates this:' +
str(genome_vcf_line))
# Look for matches to ClinVar alleles.
for genome_allele in genome_alleles:
for allele in clinvar_vcf_line.alleles:
if genome_allele.sequence == allele.sequence:
# The 'records' attribute is specific to ClinVarAlleles.
if hasattr(allele, 'records'):
yield (genome_vcf_line, allele, zygosity)
# Done matching, move on.
try:
genome_curr_line = _next_line(genome_file)
clin_curr_line = _next_line(clin_file)
except StopIteration:
break | python | {
"resource": ""
} |
q264746 | Allele.as_dict | validation | def as_dict(self):
"""Return Allele data as dict object."""
self_as_dict = dict()
self_as_dict['sequence'] = self.sequence
if hasattr(self, 'frequency'):
self_as_dict['frequency'] = self.frequency
return self_as_dict | python | {
"resource": ""
} |
q264747 | VCFLine._parse_allele_data | validation | def _parse_allele_data(self):
"""Create list of Alleles from VCF line data"""
return [Allele(sequence=x) for x in
[self.ref_allele] + self.alt_alleles] | python | {
"resource": ""
} |
q264748 | VCFLine._parse_info | validation | def _parse_info(self, info_field):
"""Parse the VCF info field"""
info = dict()
for item in info_field.split(';'):
# Info fields may be "foo=bar" or just "foo".
# For the first case, store key "foo" with value "bar"
# For the second case, store key "foo" with value True.
info_item_data = item.split('=')
# If length is one, just store as a key with value = true.
if len(info_item_data) == 1:
info[info_item_data[0]] = True
elif len(info_item_data) == 2:
info[info_item_data[0]] = info_item_data[1]
return info | python | {
"resource": ""
} |
q264749 | VCFLine.as_dict | validation | def as_dict(self):
"""Dict representation of parsed VCF data"""
self_as_dict = {'chrom': self.chrom,
'start': self.start,
'ref_allele': self.ref_allele,
'alt_alleles': self.alt_alleles,
'alleles': [x.as_dict() for x in self.alleles]}
try:
self_as_dict['info'] = self.info
except AttributeError:
pass
return self_as_dict | python | {
"resource": ""
} |
q264750 | VCFLine.get_pos | validation | def get_pos(vcf_line):
"""
Very lightweight parsing of a vcf line to get position.
Returns a dict containing:
'chrom': index of chromosome (int), indicates sort order
'pos': position on chromosome (int)
"""
if not vcf_line:
return None
vcf_data = vcf_line.strip().split('\t')
return_data = dict()
return_data['chrom'] = CHROM_INDEX[vcf_data[0]]
return_data['pos'] = int(vcf_data[1])
return return_data | python | {
"resource": ""
} |
q264751 | IRFieldChain._toStorage | validation | def _toStorage(self, value):
'''
_toStorage - Convert the value to a string representation for storage.
@param value - The value of the item to convert
@return A string value suitable for storing.
'''
for chainedField in self.chainedFields:
value = chainedField.toStorage(value)
return value | python | {
"resource": ""
} |
q264752 | nav_to_vcf_dir | validation | def nav_to_vcf_dir(ftp, build):
"""
Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files.
Args:
ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov
build: (type: string) genome build, either 'b37' or 'b38'
"""
if build == 'b37':
ftp.cwd(DIR_CLINVAR_VCF_B37)
elif build == 'b38':
ftp.cwd(DIR_CLINVAR_VCF_B38)
else:
raise IOError("Genome build not recognized.") | python | {
"resource": ""
} |
q264753 | ClinVarAllele.as_dict | validation | def as_dict(self, *args, **kwargs):
"""Return ClinVarAllele data as dict object."""
self_as_dict = super(ClinVarAllele, self).as_dict(*args, **kwargs)
self_as_dict['hgvs'] = self.hgvs
self_as_dict['clnalleleid'] = self.clnalleleid
self_as_dict['clnsig'] = self.clnsig
self_as_dict['clndn'] = self.clndn
self_as_dict['clndisdb'] = self.clndisdb
self_as_dict['clnvi'] = self.clnvi
return self_as_dict | python | {
"resource": ""
} |
q264754 | ClinVarVCFLine._parse_frequencies | validation | def _parse_frequencies(self):
"""Parse frequency data in ClinVar VCF"""
frequencies = OrderedDict([
('EXAC', 'Unknown'),
('ESP', 'Unknown'),
('TGP', 'Unknown')])
pref_freq = 'Unknown'
for source in frequencies.keys():
freq_key = 'AF_' + source
if freq_key in self.info:
frequencies[source] = self.info[freq_key]
if pref_freq == 'Unknown':
pref_freq = frequencies[source]
return pref_freq, frequencies | python | {
"resource": ""
} |
q264755 | ClinVarVCFLine._parse_allele_data | validation | def _parse_allele_data(self):
"""Parse alleles for ClinVar VCF, overrides parent method."""
# Get allele frequencies if they exist.
pref_freq, frequencies = self._parse_frequencies()
info_clnvar_single_tags = ['ALLELEID', 'CLNSIG', 'CLNHGVS']
cln_data = {x.lower(): self.info[x] if x in self.info else None
for x in info_clnvar_single_tags}
cln_data.update(
{'clndisdb': [x.split(',') for x in
self.info['CLNDISDB'].split('|')]
if 'CLNDISDB' in self.info else []})
cln_data.update({'clndn': self.info['CLNDN'].split('|') if
'CLNDN' in self.info else []})
cln_data.update({'clnvi': self.info['CLNVI'].split(',')
if 'CLNVI' in self.info else []})
try:
sequence = self.alt_alleles[0]
except IndexError:
sequence = self.ref_allele
allele = ClinVarAllele(frequency=pref_freq, sequence=sequence,
**cln_data)
# A few ClinVar variants are only reported as a combination with
# other variants, and no single-variant effect is proposed. Skip these.
if not cln_data['clnsig']:
return []
return [allele] | python | {
"resource": ""
} |
q264756 | Factory.add | validation | def add(self, *names):
'''Returns back a class decorator that enables registering Blox to this factory'''
def decorator(blok):
for name in names or (blok.__name__, ):
self[name] = blok
return blok
return decorator | python | {
"resource": ""
} |
q264757 | depricated_name | validation | def depricated_name(newmethod):
"""
Decorator for warning user of depricated functions before use.
Args:
newmethod (str): Name of method to use instead.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"Function {} is depricated, please use {} instead.".format(func.__name__, newmethod),
category=DeprecationWarning, stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return wrapper
return decorator | python | {
"resource": ""
} |
q264758 | setDefaultRedisConnectionParams | validation | def setDefaultRedisConnectionParams( connectionParams ):
'''
setDefaultRedisConnectionParams - Sets the default parameters used when connecting to Redis.
This should be the args to redis.Redis in dict (kwargs) form.
@param connectionParams <dict> - A dict of connection parameters.
Common keys are:
host <str> - hostname/ip of Redis server (default '127.0.0.1')
port <int> - Port number (default 6379)
db <int> - Redis DB number (default 0)
Omitting any of those keys will ensure the default value listed is used.
This connection info will be used by default for all connections to Redis, unless explicitly set otherwise.
The common way to override is to define REDIS_CONNECTION_PARAMS on a model, or use AltConnectedModel = MyModel.connectAlt( PARAMS )
Any omitted fields in these connection overrides will inherit the value from the global default.
For example, if your global default connection params define host = 'example.com', port=15000, and db=0,
and then one of your models has
REDIS_CONNECTION_PARAMS = { 'db' : 1 }
as an attribute, then that model's connection will inherit host='example.com" and port=15000 but override db and use db=1
NOTE: Calling this function will clear the connection_pool attribute of all stored managed connections, disconnect all managed connections,
and close-out the connection pool.
It may not be safe to call this function while other threads are potentially hitting Redis (not that it would make sense anyway...)
@see clearRedisPools for more info
'''
global _defaultRedisConnectionParams
_defaultRedisConnectionParams.clear()
for key, value in connectionParams.items():
_defaultRedisConnectionParams[key] = value
clearRedisPools() | python | {
"resource": ""
} |
q264759 | clearRedisPools | validation | def clearRedisPools():
'''
clearRedisPools - Disconnect all managed connection pools,
and clear the connectiobn_pool attribute on all stored managed connection pools.
A "managed" connection pool is one where REDIS_CONNECTION_PARAMS does not define the "connection_pool" attribute.
If you define your own pools, IndexedRedis will use them and leave them alone.
This method will be called automatically after calling setDefaultRedisConnectionParams.
Otherwise, you shouldn't have to call it.. Maybe as some sort of disaster-recovery call..
'''
global RedisPools
global _redisManagedConnectionParams
for pool in RedisPools.values():
try:
pool.disconnect()
except:
pass
for paramsList in _redisManagedConnectionParams.values():
for params in paramsList:
if 'connection_pool' in params:
del params['connection_pool']
RedisPools.clear()
_redisManagedConnectionParams.clear() | python | {
"resource": ""
} |
q264760 | getRedisPool | validation | def getRedisPool(params):
'''
getRedisPool - Returns and possibly also creates a Redis connection pool
based on the REDIS_CONNECTION_PARAMS passed in.
The goal of this method is to keep a small connection pool rolling
to each unique Redis instance, otherwise during network issues etc
python-redis will leak connections and in short-order can exhaust
all the ports on a system. There's probably also some minor
performance gain in sharing Pools.
Will modify "params", if "host" and/or "port" are missing, will fill
them in with defaults, and prior to return will set "connection_pool"
on params, which will allow immediate return on the next call,
and allow access to the pool directly from the model object.
@param params <dict> - REDIS_CONNECTION_PARAMS - kwargs to redis.Redis
@return redis.ConnectionPool corrosponding to this unique server.
'''
global RedisPools
global _defaultRedisConnectionParams
global _redisManagedConnectionParams
if not params:
params = _defaultRedisConnectionParams
isDefaultParams = True
else:
isDefaultParams = bool(params is _defaultRedisConnectionParams)
if 'connection_pool' in params:
return params['connection_pool']
hashValue = hashDictOneLevel(params)
if hashValue in RedisPools:
params['connection_pool'] = RedisPools[hashValue]
return RedisPools[hashValue]
# Copy the params, so that we don't modify the original dict
if not isDefaultParams:
origParams = params
params = copy.copy(params)
else:
origParams = params
checkAgain = False
if 'host' not in params:
if not isDefaultParams and 'host' in _defaultRedisConnectionParams:
params['host'] = _defaultRedisConnectionParams['host']
else:
params['host'] = '127.0.0.1'
checkAgain = True
if 'port' not in params:
if not isDefaultParams and 'port' in _defaultRedisConnectionParams:
params['port'] = _defaultRedisConnectionParams['port']
else:
params['port'] = 6379
checkAgain = True
if 'db' not in params:
if not isDefaultParams and 'db' in _defaultRedisConnectionParams:
params['db'] = _defaultRedisConnectionParams['db']
else:
params['db'] = 0
checkAgain = True
if not isDefaultParams:
otherGlobalKeys = set(_defaultRedisConnectionParams.keys()) - set(params.keys())
for otherKey in otherGlobalKeys:
if otherKey == 'connection_pool':
continue
params[otherKey] = _defaultRedisConnectionParams[otherKey]
checkAgain = True
if checkAgain:
hashValue = hashDictOneLevel(params)
if hashValue in RedisPools:
params['connection_pool'] = RedisPools[hashValue]
return RedisPools[hashValue]
connectionPool = redis.ConnectionPool(**params)
origParams['connection_pool'] = params['connection_pool'] = connectionPool
RedisPools[hashValue] = connectionPool
# Add the original as a "managed" redis connection (they did not provide their own pool)
# such that if the defaults change, we make sure to re-inherit any keys, and can disconnect
# from clearRedisPools
origParamsHash = hashDictOneLevel(origParams)
if origParamsHash not in _redisManagedConnectionParams:
_redisManagedConnectionParams[origParamsHash] = [origParams]
elif origParams not in _redisManagedConnectionParams[origParamsHash]:
_redisManagedConnectionParams[origParamsHash].append(origParams)
return connectionPool | python | {
"resource": ""
} |
q264761 | IndexedRedisModel.pprint | validation | def pprint(self, stream=None):
'''
pprint - Pretty-print a dict representation of this object.
@param stream <file/None> - Either a stream to output, or None to default to sys.stdout
'''
pprint.pprint(self.asDict(includeMeta=True, forStorage=False, strKeys=True), stream=stream) | python | {
"resource": ""
} |
q264762 | IndexedRedisModel.hasUnsavedChanges | validation | def hasUnsavedChanges(self, cascadeObjects=False):
'''
hasUnsavedChanges - Check if any unsaved changes are present in this model, or if it has never been saved.
@param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively).
Otherwise, will just check if the pk has changed.
@return <bool> - True if any fields have changed since last fetch, or if never saved. Otherwise, False
'''
if not self._id or not self._origData:
return True
for thisField in self.FIELDS:
thisVal = object.__getattribute__(self, thisField)
if self._origData.get(thisField, '') != thisVal:
return True
if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase):
if thisVal.objHasUnsavedChanges():
return True
return False | python | {
"resource": ""
} |
q264763 | IndexedRedisModel.diff | validation | def diff(firstObj, otherObj, includeMeta=False):
'''
diff - Compare the field values on two IndexedRedisModels.
@param firstObj <IndexedRedisModel instance> - First object (or self)
@param otherObj <IndexedRedisModel instance> - Second object
@param includeMeta <bool> - If meta information (like pk) should be in the diff results.
@return <dict> - Dict of 'field' : ( value_firstObjForField, value_otherObjForField ).
Keys are names of fields with different values.
Value is a tuple of ( value_firstObjForField, value_otherObjForField )
Can be called statically, like: IndexedRedisModel.diff ( obj1, obj2 )
or in reference to an obj : obj1.diff(obj2)
'''
if not isIndexedRedisModel(firstObj):
raise ValueError('Type < %s > does not extend IndexedRedisModel.' %( type(firstObj).__name__ , ) )
if not isIndexedRedisModel(otherObj):
raise ValueError('Type < %s > does not extend IndexedRedisModel.' %( type(otherObj).__name__ , ) )
firstObj.validateModel()
otherObj.validateModel()
# Types may not match, but could be subclass, special copy class (like connectAlt), etc.
# So check if FIELDS matches, and if so, we can continue.
if getattr(firstObj, 'FIELDS') != getattr(otherObj, 'FIELDS'):
# NOTE: Maybe we should iterate here and compare just that field types and names match?
# In case a copy changes a default or something, we would still be able to diff..
raise ValueError('Cannot compare < %s > and < %s > . Must be same model OR have equal FIELDS.' %( firstObj.__class__, otherObj.__class__) )
diffFields = {}
for thisField in firstObj.FIELDS:
thisFieldStr = str(thisField)
firstVal = object.__getattribute__( firstObj, thisFieldStr )
otherVal = object.__getattribute__( otherObj, thisFieldStr )
if firstVal != otherVal:
diffFields[ thisFieldStr ] = ( (firstVal, otherVal) )
if includeMeta:
firstPk = firstObj.getPk()
otherPk = otherObj.getPk()
if firstPk != otherPk:
diffFields['_id'] = ( firstPk, otherPk )
return diffFields | python | {
"resource": ""
} |
q264764 | IndexedRedisModel.save | validation | def save(self, cascadeSave=True):
'''
save - Save this object.
Will perform an "insert" if this object had not been saved before,
otherwise will update JUST the fields changed on THIS INSTANCE of the model.
i.e. If you have two processes fetch the same object and change different fields, they will not overwrite
eachother, but only save the ones each process changed.
If you want to save multiple objects of type MyModel in a single transaction,
and you have those objects in a list, myObjs, you can do the following:
MyModel.saver.save(myObjs)
@param cascadeSave <bool> Default True - If True, any Foreign models linked as attributes that have been altered
or created will be saved with this object. If False, only this object (and the reference to an already-saved foreign model) will be saved.
@see #IndexedRedisSave.save
@return <list> - Single element list, id of saved object (if successful)
'''
saver = IndexedRedisSave(self.__class__)
return saver.save(self, cascadeSave=cascadeSave) | python | {
"resource": ""
} |
q264765 | IndexedRedisModel.hasSameValues | validation | def hasSameValues(self, other, cascadeObject=True):
'''
hasSameValues - Check if this and another model have the same fields and values.
This does NOT include id, so the models can have the same values but be different objects in the database.
@param other <IndexedRedisModel> - Another model
@param cascadeObject <bool> default True - If True, foreign link values with changes will be considered a difference.
Otherwise, only the immediate values are checked.
@return <bool> - True if all fields have the same value, otherwise False
'''
if self.FIELDS != other.FIELDS:
return False
oga = object.__getattribute__
for field in self.FIELDS:
thisVal = oga(self, field)
otherVal = oga(other, field)
if thisVal != otherVal:
return False
if cascadeObject is True and issubclass(field.__class__, IRForeignLinkFieldBase):
if thisVal and thisVal.isFetched():
if otherVal and otherVal.isFetched():
theseForeign = thisVal.getObjs()
othersForeign = otherVal.getObjs()
for i in range(len(theseForeign)):
if not theseForeign[i].hasSameValues(othersForeign[i]):
return False
else:
theseForeign = thisVal.getObjs()
for i in range(len(theseForeign)):
if theseForeign[i].hasUnsavedChanges(cascadeObjects=True):
return False
else:
if otherVal and otherVal.isFetched():
othersForeign = otherVal.getObjs()
for i in range(len(othersForeign)):
if othersForeign[i].hasUnsavedChanges(cascadeObjects=True):
return False
return True | python | {
"resource": ""
} |
q264766 | IndexedRedisModel.copy | validation | def copy(self, copyPrimaryKey=False, copyValues=False):
'''
copy - Copies this object.
@param copyPrimaryKey <bool> default False - If True, any changes to the copy will save over-top the existing entry in Redis.
If False, only the data is copied, and nothing is saved.
@param copyValues <bool> default False - If True, every field value on this object will be explicitly copied. If False,
an object will be created with the same values, and depending on the type may share the same reference.
This is the difference between a copy and a deepcopy.
@return <IndexedRedisModel> - Copy of this object, per above
If you need a copy that IS linked, @see IndexedRedisModel.copy
'''
cpy = self.__class__(**self.asDict(copyPrimaryKey, forStorage=False))
if copyValues is True:
for fieldName in cpy.FIELDS:
setattr(cpy, fieldName, copy.deepcopy(getattr(cpy, fieldName)))
return cpy | python | {
"resource": ""
} |
q264767 | IndexedRedisModel.saveToExternal | validation | def saveToExternal(self, redisCon):
'''
saveToExternal - Saves this object to a different Redis than that specified by REDIS_CONNECTION_PARAMS on this model.
@param redisCon <dict/redis.Redis> - Either a dict of connection params, a la REDIS_CONNECTION_PARAMS, or an existing Redis connection.
If you are doing a lot of bulk copies, it is recommended that you create a Redis connection and pass it in rather than establish a new
connection with each call.
@note - You will generate a new primary key relative to the external Redis environment. If you need to reference a "shared" primary key, it is better
to use an indexed field than the internal pk.
'''
if type(redisCon) == dict:
conn = redis.Redis(**redisCon)
elif hasattr(conn, '__class__') and issubclass(conn.__class__, redis.Redis):
conn = redisCon
else:
raise ValueError('saveToExternal "redisCon" param must either be a dictionary of connection parameters, or redis.Redis, or extension thereof')
saver = self.saver
# Fetch next PK from external
forceID = saver._getNextID(conn) # Redundant because of changes in save method
myCopy = self.copy(False)
return saver.save(myCopy, usePipeline=True, forceID=forceID, conn=conn) | python | {
"resource": ""
} |
q264768 | IndexedRedisModel.reload | validation | def reload(self, cascadeObjects=True):
'''
reload - Reload this object from the database, overriding any local changes and merging in any updates.
@param cascadeObjects <bool> Default True. If True, foreign-linked objects will be reloaded if their values have changed
since last save/fetch. If False, only if the pk changed will the foreign linked objects be reloaded.
@raises KeyError - if this object has not been saved (no primary key)
@return - Dict with the keys that were updated. Key is field name that was updated,
and value is tuple of (old value, new value).
NOTE: Currently, this will cause a fetch of all Foreign Link objects, one level
'''
_id = self._id
if not _id:
raise KeyError('Object has never been saved! Cannot reload.')
currentData = self.asDict(False, forStorage=False)
# Get the object, and compare the unconverted "asDict" repr.
# If any changes, we will apply the already-convered value from
# the object, but we compare the unconverted values (what's in the DB).
newDataObj = self.objects.get(_id)
if not newDataObj:
raise KeyError('Object with id=%d is not in database. Cannot reload.' %(_id,))
newData = newDataObj.asDict(False, forStorage=False)
if currentData == newData and not self.foreignFields:
return []
updatedFields = {}
for thisField, newValue in newData.items():
defaultValue = thisField.getDefaultValue()
currentValue = currentData.get(thisField, defaultValue)
fieldIsUpdated = False
if currentValue != newValue:
fieldIsUpdated = True
elif cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase):
# If we are cascading objects, and at this point the pk is the same
if currentValue.isFetched():
# If we have fetched the current set, we might need to update (pks already match)
oldObjs = currentValue.getObjs()
newObjs = newValue.getObjs()
if oldObjs != newObjs: # This will check using __eq__, so one-level including pk
fieldIsUpdated = True
else:
# Use hasSameValues with cascadeObjects=True to scan past one level
for i in range(len(oldObjs)):
if not oldObjs[i].hasSameValues(newObjs[i], cascadeObjects=True):
fieldIsUpdated = True
break
if fieldIsUpdated is True:
# Use "converted" values in the updatedFields dict, and apply on the object.
updatedFields[thisField] = ( currentValue, newValue)
setattr(self, thisField, newValue)
self._origData[thisField] = newDataObj._origData[thisField]
return updatedFields | python | {
"resource": ""
} |
q264769 | IndexedRedisModel.copyModel | validation | def copyModel(mdl):
'''
copyModel - Copy this model, and return that copy.
The copied model will have all the same data, but will have a fresh instance of the FIELDS array and all members,
and the INDEXED_FIELDS array.
This is useful for converting, like changing field types or whatever, where you can load from one model and save into the other.
@return <IndexedRedisModel> - A copy class of this model class with a unique name.
'''
copyNum = _modelCopyMap[mdl]
_modelCopyMap[mdl] += 1
mdlCopy = type(mdl.__name__ + '_Copy' + str(copyNum), mdl.__bases__, copy.deepcopy(dict(mdl.__dict__)))
mdlCopy.FIELDS = [field.copy() for field in mdl.FIELDS]
mdlCopy.INDEXED_FIELDS = [str(idxField) for idxField in mdl.INDEXED_FIELDS] # Make sure they didn't do INDEXED_FIELDS = FIELDS or something wacky,
# so do a comprehension of str on these to make sure we only get names
mdlCopy.validateModel()
return mdlCopy | python | {
"resource": ""
} |
q264770 | IndexedRedisModel.connectAlt | validation | def connectAlt(cls, redisConnectionParams):
'''
connectAlt - Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model.
@param redisConnectionParams <dict> - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS.
@return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance.
The fields and key will be the same here, but the connection will be different. use #copyModel if you want an independent class for the model
'''
if not isinstance(redisConnectionParams, dict):
raise ValueError('redisConnectionParams must be a dictionary!')
hashVal = hashDictOneLevel(redisConnectionParams)
modelDictCopy = copy.deepcopy(dict(cls.__dict__))
modelDictCopy['REDIS_CONNECTION_PARAMS'] = redisConnectionParams
ConnectedIndexedRedisModel = type('AltConnect' + cls.__name__ + str(hashVal), cls.__bases__, modelDictCopy)
return ConnectedIndexedRedisModel | python | {
"resource": ""
} |
q264771 | IndexedRedisHelper._get_new_connection | validation | def _get_new_connection(self):
'''
_get_new_connection - Get a new connection
internal
'''
pool = getRedisPool(self.mdl.REDIS_CONNECTION_PARAMS)
return redis.Redis(connection_pool=pool) | python | {
"resource": ""
} |
q264772 | IndexedRedisHelper._get_connection | validation | def _get_connection(self):
'''
_get_connection - Maybe get a new connection, or reuse if passed in.
Will share a connection with a model
internal
'''
if self._connection is None:
self._connection = self._get_new_connection()
return self._connection | python | {
"resource": ""
} |
q264773 | IndexedRedisHelper._add_id_to_keys | validation | def _add_id_to_keys(self, pk, conn=None):
'''
_add_id_to_keys - Adds primary key to table
internal
'''
if conn is None:
conn = self._get_connection()
conn.sadd(self._get_ids_key(), pk) | python | {
"resource": ""
} |
q264774 | IndexedRedisHelper._rem_id_from_keys | validation | def _rem_id_from_keys(self, pk, conn=None):
'''
_rem_id_from_keys - Remove primary key from table
internal
'''
if conn is None:
conn = self._get_connection()
conn.srem(self._get_ids_key(), pk) | python | {
"resource": ""
} |
q264775 | IndexedRedisHelper._add_id_to_index | validation | def _add_id_to_index(self, indexedField, pk, val, conn=None):
'''
_add_id_to_index - Adds an id to an index
internal
'''
if conn is None:
conn = self._get_connection()
conn.sadd(self._get_key_for_index(indexedField, val), pk) | python | {
"resource": ""
} |
q264776 | IndexedRedisHelper._rem_id_from_index | validation | def _rem_id_from_index(self, indexedField, pk, val, conn=None):
'''
_rem_id_from_index - Removes an id from an index
internal
'''
if conn is None:
conn = self._get_connection()
conn.srem(self._get_key_for_index(indexedField, val), pk) | python | {
"resource": ""
} |
q264777 | IndexedRedisHelper._get_key_for_index | validation | def _get_key_for_index(self, indexedField, val):
'''
_get_key_for_index - Returns the key name that would hold the indexes on a value
Internal - does not validate that indexedFields is actually indexed. Trusts you. Don't let it down.
@param indexedField - string of field name
@param val - Value of field
@return - Key name string, potentially hashed.
'''
# If provided an IRField, use the toIndex from that (to support compat_ methods
if hasattr(indexedField, 'toIndex'):
val = indexedField.toIndex(val)
else:
# Otherwise, look up the indexed field from the model
val = self.fields[indexedField].toIndex(val)
return ''.join( [INDEXED_REDIS_PREFIX, self.keyName, ':idx:', indexedField, ':', val] ) | python | {
"resource": ""
} |
q264778 | IndexedRedisHelper._compat_rem_str_id_from_index | validation | def _compat_rem_str_id_from_index(self, indexedField, pk, val, conn=None):
'''
_compat_rem_str_id_from_index - Used in compat_convertHashedIndexes to remove the old string repr of a field,
in order to later add the hashed value,
'''
if conn is None:
conn = self._get_connection()
conn.srem(self._compat_get_str_key_for_index(indexedField, val), pk) | python | {
"resource": ""
} |
q264779 | IndexedRedisHelper._peekNextID | validation | def _peekNextID(self, conn=None):
'''
_peekNextID - Look at, but don't increment the primary key for this model.
Internal.
@return int - next pk
'''
if conn is None:
conn = self._get_connection()
return to_unicode(conn.get(self._get_next_id_key()) or 0) | python | {
"resource": ""
} |
q264780 | IndexedRedisQuery._filter | validation | def _filter(filterObj, **kwargs):
'''
Internal for handling filters; the guts of .filter and .filterInline
'''
for key, value in kwargs.items():
if key.endswith('__ne'):
notFilter = True
key = key[:-4]
else:
notFilter = False
if key not in filterObj.indexedFields:
raise ValueError('Field "' + key + '" is not in INDEXED_FIELDS array. Filtering is only supported on indexed fields.')
if notFilter is False:
filterObj.filters.append( (key, value) )
else:
filterObj.notFilters.append( (key, value) )
return filterObj | python | {
"resource": ""
} |
q264781 | IndexedRedisQuery.count | validation | def count(self):
'''
count - gets the number of records matching the filter criteria
Example:
theCount = Model.objects.filter(field1='value').count()
'''
conn = self._get_connection()
numFilters = len(self.filters)
numNotFilters = len(self.notFilters)
if numFilters + numNotFilters == 0:
return conn.scard(self._get_ids_key())
if numNotFilters == 0:
if numFilters == 1:
(filterFieldName, filterValue) = self.filters[0]
return conn.scard(self._get_key_for_index(filterFieldName, filterValue))
indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]
return len(conn.sinter(indexKeys))
notIndexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.notFilters]
if numFilters == 0:
return len(conn.sdiff(self._get_ids_key(), *notIndexKeys))
indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]
tempKey = self._getTempKey()
pipeline = conn.pipeline()
pipeline.sinterstore(tempKey, *indexKeys)
pipeline.sdiff(tempKey, *notIndexKeys)
pipeline.delete(tempKey)
pks = pipeline.execute()[1] # sdiff
return len(pks) | python | {
"resource": ""
} |
q264782 | IndexedRedisQuery.exists | validation | def exists(self, pk):
'''
exists - Tests whether a record holding the given primary key exists.
@param pk - Primary key (see getPk method)
Example usage: Waiting for an object to be deleted without fetching the object or running a filter.
This is a very cheap operation.
@return <bool> - True if object with given pk exists, otherwise False
'''
conn = self._get_connection()
key = self._get_key_for_id(pk)
return conn.exists(key) | python | {
"resource": ""
} |
q264783 | IndexedRedisQuery.getPrimaryKeys | validation | def getPrimaryKeys(self, sortByAge=False):
'''
getPrimaryKeys - Returns all primary keys matching current filterset.
@param sortByAge <bool> - If False, return will be a set and may not be ordered.
If True, return will be a list and is guarenteed to represent objects oldest->newest
@return <set> - A set of all primary keys associated with current filters.
'''
conn = self._get_connection()
# Apply filters, and return object
numFilters = len(self.filters)
numNotFilters = len(self.notFilters)
if numFilters + numNotFilters == 0:
# No filters, get all.
conn = self._get_connection()
matchedKeys = conn.smembers(self._get_ids_key())
elif numNotFilters == 0:
# Only Inclusive
if numFilters == 1:
# Only one filter, get members of that index key
(filterFieldName, filterValue) = self.filters[0]
matchedKeys = conn.smembers(self._get_key_for_index(filterFieldName, filterValue))
else:
# Several filters, intersect the index keys
indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]
matchedKeys = conn.sinter(indexKeys)
else:
# Some negative filters present
notIndexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.notFilters]
if numFilters == 0:
# Only negative, diff against all keys
matchedKeys = conn.sdiff(self._get_ids_key(), *notIndexKeys)
else:
# Negative and positive. Use pipeline, find all positive intersections, and remove negative matches
indexKeys = [self._get_key_for_index(filterFieldName, filterValue) for filterFieldName, filterValue in self.filters]
tempKey = self._getTempKey()
pipeline = conn.pipeline()
pipeline.sinterstore(tempKey, *indexKeys)
pipeline.sdiff(tempKey, *notIndexKeys)
pipeline.delete(tempKey)
matchedKeys = pipeline.execute()[1] # sdiff
matchedKeys = [ int(_key) for _key in matchedKeys ]
if sortByAge is False:
return list(matchedKeys)
else:
matchedKeys = list(matchedKeys)
matchedKeys.sort()
return matchedKeys | python | {
"resource": ""
} |
q264784 | IndexedRedisQuery.all | validation | def all(self, cascadeFetch=False):
'''
all - Get the underlying objects which match the filter criteria.
Example: objs = Model.objects.filter(field1='value', field2='value2').all()
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Objects of the Model instance associated with this query.
'''
matchedKeys = self.getPrimaryKeys()
if matchedKeys:
return self.getMultiple(matchedKeys, cascadeFetch=cascadeFetch)
return IRQueryableList([], mdl=self.mdl) | python | {
"resource": ""
} |
q264785 | IndexedRedisQuery.allOnlyFields | validation | def allOnlyFields(self, fields, cascadeFetch=False):
'''
allOnlyFields - Get the objects which match the filter criteria, only fetching given fields.
@param fields - List of fields to fetch
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Partial objects with only the given fields fetched
'''
matchedKeys = self.getPrimaryKeys()
if matchedKeys:
return self.getMultipleOnlyFields(matchedKeys, fields, cascadeFetch=cascadeFetch)
return IRQueryableList([], mdl=self.mdl) | python | {
"resource": ""
} |
q264786 | IndexedRedisQuery.allOnlyIndexedFields | validation | def allOnlyIndexedFields(self):
'''
allOnlyIndexedFields - Get the objects which match the filter criteria, only fetching indexed fields.
@return - Partial objects with only the indexed fields fetched
'''
matchedKeys = self.getPrimaryKeys()
if matchedKeys:
return self.getMultipleOnlyIndexedFields(matchedKeys)
return IRQueryableList([], mdl=self.mdl) | python | {
"resource": ""
} |
q264787 | IndexedRedisQuery.random | validation | def random(self, cascadeFetch=False):
'''
Random - Returns a random record in current filterset.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@return - Instance of Model object, or None if no items math current filters
'''
matchedKeys = list(self.getPrimaryKeys())
obj = None
# Loop so we don't return None when there are items, if item is deleted between getting key and getting obj
while matchedKeys and not obj:
key = matchedKeys.pop(random.randint(0, len(matchedKeys)-1))
obj = self.get(key, cascadeFetch=cascadeFetch)
return obj | python | {
"resource": ""
} |
q264788 | IndexedRedisQuery.delete | validation | def delete(self):
'''
delete - Deletes all entries matching the filter criteria
'''
if self.filters or self.notFilters:
return self.mdl.deleter.deleteMultiple(self.allOnlyIndexedFields())
return self.mdl.deleter.destroyModel() | python | {
"resource": ""
} |
q264789 | IndexedRedisQuery.get | validation | def get(self, pk, cascadeFetch=False):
'''
get - Get a single value with the internal primary key.
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@param pk - internal primary key (can be found via .getPk() on an item)
'''
conn = self._get_connection()
key = self._get_key_for_id(pk)
res = conn.hgetall(key)
if type(res) != dict or not len(res.keys()):
return None
res['_id'] = pk
ret = self._redisResultToObj(res)
if cascadeFetch is True:
self._doCascadeFetch(ret)
return ret | python | {
"resource": ""
} |
q264790 | IndexedRedisQuery._doCascadeFetch | validation | def _doCascadeFetch(obj):
'''
_doCascadeFetch - Takes an object and performs a cascading fetch on all foreign links, and all theirs, and so on.
@param obj <IndexedRedisModel> - A fetched model
'''
obj.validateModel()
if not obj.foreignFields:
return
# NOTE: Currently this fetches using one transaction per object. Implementation for actual resolution is in
# IndexedRedisModel.__getattribute__
for foreignField in obj.foreignFields:
subObjsData = object.__getattribute__(obj, foreignField)
if not subObjsData:
setattr(obj, str(foreignField), irNull)
continue
subObjs = subObjsData.getObjs()
for subObj in subObjs:
if isIndexedRedisModel(subObj):
IndexedRedisQuery._doCascadeFetch(subObj) | python | {
"resource": ""
} |
q264791 | IndexedRedisQuery.getMultiple | validation | def getMultiple(self, pks, cascadeFetch=False):
'''
getMultiple - Gets multiple objects with a single atomic operation
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
@param pks - list of internal keys
'''
if type(pks) == set:
pks = list(pks)
if len(pks) == 1:
# Optimization to not pipeline on 1 id
return IRQueryableList([self.get(pks[0], cascadeFetch=cascadeFetch)], mdl=self.mdl)
conn = self._get_connection()
pipeline = conn.pipeline()
for pk in pks:
key = self._get_key_for_id(pk)
pipeline.hgetall(key)
res = pipeline.execute()
ret = IRQueryableList(mdl=self.mdl)
i = 0
pksLen = len(pks)
while i < pksLen:
if res[i] is None:
ret.append(None)
i += 1
continue
res[i]['_id'] = pks[i]
obj = self._redisResultToObj(res[i])
ret.append(obj)
i += 1
if cascadeFetch is True:
for obj in ret:
if not obj:
continue
self._doCascadeFetch(obj)
return ret | python | {
"resource": ""
} |
q264792 | IndexedRedisQuery.getOnlyFields | validation | def getOnlyFields(self, pk, fields, cascadeFetch=False):
'''
getOnlyFields - Gets only certain fields from a paticular primary key. For working on entire filter set, see allOnlyFields
@param pk <int> - Primary Key
@param fields list<str> - List of fields
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
return - Partial objects with only fields applied
'''
conn = self._get_connection()
key = self._get_key_for_id(pk)
res = conn.hmget(key, fields)
if type(res) != list or not len(res):
return None
objDict = {}
numFields = len(fields)
i = 0
anyNotNone = False
while i < numFields:
objDict[fields[i]] = res[i]
if res[i] != None:
anyNotNone = True
i += 1
if anyNotNone is False:
return None
objDict['_id'] = pk
ret = self._redisResultToObj(objDict)
if cascadeFetch is True:
self._doCascadeFetch(ret)
return ret | python | {
"resource": ""
} |
q264793 | IndexedRedisQuery.getMultipleOnlyFields | validation | def getMultipleOnlyFields(self, pks, fields, cascadeFetch=False):
'''
getMultipleOnlyFields - Gets only certain fields from a list of primary keys. For working on entire filter set, see allOnlyFields
@param pks list<str> - Primary Keys
@param fields list<str> - List of fields
@param cascadeFetch <bool> Default False, If True, all Foreign objects associated with this model
will be fetched immediately. If False, foreign objects will be fetched on-access.
return - List of partial objects with only fields applied
'''
if type(pks) == set:
pks = list(pks)
if len(pks) == 1:
return IRQueryableList([self.getOnlyFields(pks[0], fields, cascadeFetch=cascadeFetch)], mdl=self.mdl)
conn = self._get_connection()
pipeline = conn.pipeline()
for pk in pks:
key = self._get_key_for_id(pk)
pipeline.hmget(key, fields)
res = pipeline.execute()
ret = IRQueryableList(mdl=self.mdl)
pksLen = len(pks)
i = 0
numFields = len(fields)
while i < pksLen:
objDict = {}
anyNotNone = False
thisRes = res[i]
if thisRes is None or type(thisRes) != list:
ret.append(None)
i += 1
continue
j = 0
while j < numFields:
objDict[fields[j]] = thisRes[j]
if thisRes[j] != None:
anyNotNone = True
j += 1
if anyNotNone is False:
ret.append(None)
i += 1
continue
objDict['_id'] = pks[i]
obj = self._redisResultToObj(objDict)
ret.append(obj)
i += 1
if cascadeFetch is True:
for obj in ret:
self._doCascadeFetch(obj)
return ret | python | {
"resource": ""
} |
q264794 | IndexedRedisQuery.compat_convertHashedIndexes | validation | def compat_convertHashedIndexes(self, fetchAll=True):
'''
compat_convertHashedIndexes - Reindex fields, used for when you change the propery "hashIndex" on one or more fields.
For each field, this will delete both the hash and unhashed keys to an object,
and then save a hashed or unhashed value, depending on that field's value for "hashIndex".
For an IndexedRedisModel class named "MyModel", call as "MyModel.objects.compat_convertHashedIndexes()"
NOTE: This works one object at a time (regardless of #fetchAll), so that an unhashable object does not trash all data.
This method is intended to be used while your application is offline,
as it doesn't make sense to be changing your model while applications are actively using it.
@param fetchAll <bool>, Default True - If True, all objects will be fetched first, then converted.
This is generally what you want to do, as it is more efficient. If you are memory contrainted,
you can set this to "False", and it will fetch one object at a time, convert it, and save it back.
'''
saver = IndexedRedisSave(self.mdl)
if fetchAll is True:
objs = self.all()
saver.compat_convertHashedIndexes(objs)
else:
didWarnOnce = False
pks = self.getPrimaryKeys()
for pk in pks:
obj = self.get(pk)
if not obj:
if didWarnOnce is False:
sys.stderr.write('WARNING(once)! An object (type=%s , pk=%d) disappered while ' \
'running compat_convertHashedIndexes! This probably means an application ' \
'is using the model while converting indexes. This is a very BAD IDEA (tm).')
didWarnOnce = True
continue
saver.compat_convertHashedIndexes([obj]) | python | {
"resource": ""
} |
q264795 | IndexedRedisSave._doSave | validation | def _doSave(self, obj, isInsert, conn, pipeline=None):
'''
_doSave - Internal function to save a single object. Don't call this directly.
Use "save" instead.
If a pipeline is provided, the operations (setting values, updating indexes, etc)
will be queued into that pipeline.
Otherwise, everything will be executed right away.
@param obj - Object to save
@param isInsert - Bool, if insert or update. Either way, obj._id is expected to be set.
@param conn - Redis connection
@param pipeline - Optional pipeline, if present the items will be queued onto it. Otherwise, go directly to conn.
'''
if pipeline is None:
pipeline = conn
newDict = obj.asDict(forStorage=True)
key = self._get_key_for_id(obj._id)
if isInsert is True:
for thisField in self.fields:
fieldValue = newDict.get(thisField, thisField.getDefaultValue())
pipeline.hset(key, thisField, fieldValue)
# Update origData with the new data
if fieldValue == IR_NULL_STR:
obj._origData[thisField] = irNull
else:
obj._origData[thisField] = object.__getattribute__(obj, str(thisField))
self._add_id_to_keys(obj._id, pipeline)
for indexedField in self.indexedFields:
self._add_id_to_index(indexedField, obj._id, obj._origData[indexedField], pipeline)
else:
updatedFields = obj.getUpdatedFields()
for thisField, fieldValue in updatedFields.items():
(oldValue, newValue) = fieldValue
oldValueForStorage = thisField.toStorage(oldValue)
newValueForStorage = thisField.toStorage(newValue)
pipeline.hset(key, thisField, newValueForStorage)
if thisField in self.indexedFields:
self._rem_id_from_index(thisField, obj._id, oldValueForStorage, pipeline)
self._add_id_to_index(thisField, obj._id, newValueForStorage, pipeline)
# Update origData with the new data
obj._origData[thisField] = newValue | python | {
"resource": ""
} |
q264796 | IndexedRedisSave.compat_convertHashedIndexes | validation | def compat_convertHashedIndexes(self, objs, conn=None):
'''
compat_convertHashedIndexes - Reindex all fields for the provided objects, where the field value is hashed or not.
If the field is unhashable, do not allow.
NOTE: This works one object at a time. It is intended to be used while your application is offline,
as it doesn't make sense to be changing your model while applications are actively using it.
@param objs <IndexedRedisModel objects to convert>
@param conn <redis.Redis or None> - Specific Redis connection or None to reuse.
'''
if conn is None:
conn = self._get_connection()
# Do one pipeline per object.
# XXX: Maybe we should do the whole thing in one pipeline?
fields = [] # A list of the indexed fields
# Iterate now so we do this once instead of per-object.
for indexedField in self.indexedFields:
origField = self.fields[indexedField]
# Check if type supports configurable hashIndex, and if not skip it.
if 'hashIndex' not in origField.__class__.__new__.__code__.co_varnames:
continue
if indexedField.hashIndex is True:
hashingField = origField
regField = origField.copy()
regField.hashIndex = False
else:
regField = origField
# Maybe copy should allow a dict of override params?
hashingField = origField.copy()
hashingField.hashIndex = True
fields.append ( (origField, regField, hashingField) )
objDicts = [obj.asDict(True, forStorage=True) for obj in objs]
# Iterate over all values. Remove the possibly stringed index, the possibly hashed index, and then put forth the hashed index.
for objDict in objDicts:
pipeline = conn.pipeline()
pk = objDict['_id']
for origField, regField, hashingField in fields:
val = objDict[indexedField]
# Remove the possibly stringed index
self._rem_id_from_index(regField, pk, val, pipeline)
# Remove the possibly hashed index
self._rem_id_from_index(hashingField, pk, val, pipeline)
# Add the new (hashed or unhashed) form.
self._add_id_to_index(origField, pk, val, pipeline)
# Launch all at once
pipeline.execute() | python | {
"resource": ""
} |
q264797 | IndexedRedisDelete.deleteOne | validation | def deleteOne(self, obj, conn=None):
'''
deleteOne - Delete one object
@param obj - object to delete
@param conn - Connection to reuse, or None
@return - number of items deleted (0 or 1)
'''
if not getattr(obj, '_id', None):
return 0
if conn is None:
conn = self._get_connection()
pipeline = conn.pipeline()
executeAfter = True
else:
pipeline = conn # In this case, we are inheriting a pipeline
executeAfter = False
pipeline.delete(self._get_key_for_id(obj._id))
self._rem_id_from_keys(obj._id, pipeline)
for indexedFieldName in self.indexedFields:
self._rem_id_from_index(indexedFieldName, obj._id, obj._origData[indexedFieldName], pipeline)
obj._id = None
if executeAfter is True:
pipeline.execute()
return 1 | python | {
"resource": ""
} |
q264798 | IndexedRedisDelete.deleteByPk | validation | def deleteByPk(self, pk):
'''
deleteByPk - Delete object associated with given primary key
'''
obj = self.mdl.objects.getOnlyIndexedFields(pk)
if not obj:
return 0
return self.deleteOne(obj) | python | {
"resource": ""
} |
q264799 | IndexedRedisDelete.deleteMultiple | validation | def deleteMultiple(self, objs):
'''
deleteMultiple - Delete multiple objects
@param objs - List of objects
@return - Number of objects deleted
'''
conn = self._get_connection()
pipeline = conn.pipeline()
numDeleted = 0
for obj in objs:
numDeleted += self.deleteOne(obj, pipeline)
pipeline.execute()
return numDeleted | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.