body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def load_command(self, command, flags, user_level, code, set=True):
'\n Load a command in the runtime\n\n :param command: What is the command called\n :param flags: Command flags\n :param user_level: The minimum user level to run the command\n :param code: The Lua code for the custom command\n :param set: Should the command be set on the bot via set_command,\n set this to False when loading commands from e.g. the\n database\n :return: None\n '
if self.logger:
self.logger.debug(u'Loading command {0} with user level {1}'.format(command, user_level))
self.commands[command] = {'flags': flags, 'user_level': user_level, 'code': code}
self.load_lua(code)
return (self.channel, command, flags, user_level, code)
| -1,585,351,775,516,527,400
|
Load a command in the runtime
:param command: What is the command called
:param flags: Command flags
:param user_level: The minimum user level to run the command
:param code: The Lua code for the custom command
:param set: Should the command be set on the bot via set_command,
set this to False when loading commands from e.g. the
database
:return: None
|
bot/commandmanager.py
|
load_command
|
lietu/twitch-bot
|
python
|
def load_command(self, command, flags, user_level, code, set=True):
'\n Load a command in the runtime\n\n :param command: What is the command called\n :param flags: Command flags\n :param user_level: The minimum user level to run the command\n :param code: The Lua code for the custom command\n :param set: Should the command be set on the bot via set_command,\n set this to False when loading commands from e.g. the\n database\n :return: None\n '
if self.logger:
self.logger.debug(u'Loading command {0} with user level {1}'.format(command, user_level))
self.commands[command] = {'flags': flags, 'user_level': user_level, 'code': code}
self.load_lua(code)
return (self.channel, command, flags, user_level, code)
|
def run_command(self, nick, user_level, command, args=None, timestamp=None, threaded=True):
"\n Handles running of custom commands from chat\n\n :param nick: The calling user\n :param user_level: The calling user's level\n :param command: The command triggered\n :param args: The words on the line after the command\n :param timestamp: The unixtime for when the event happened\n :return: Any return value from the custom Lua command, to be sent\n back to the channel\n :raise CommandPermissionError: If user lacks permissions for command\n "
if (not self._can_run_command(user_level, command)):
raise CommandPermissionError(u'User does not have permission to run this command')
if (args is None):
args = []
elif ('quoted' in self.commands[command]['flags']):
if (self.commands[command]['flags']['quoted'] == 1):
text = ' '.join(args)
args = shlex.split(text)
if (timestamp is None):
timestamp = time.time()
if self._is_under_cooldown(command, timestamp):
raise CommandCooldownError()
self._set_last_executed_time(command, timestamp)
def run():
code = self.call_template.format(func_name=command)
lua_func = self.lua.eval(code)
if ('want_user' in self.commands[command]['flags']):
if (self.commands[command]['flags']['want_user'] == 1):
args.insert(0, nick)
return lua_func(*args)
if threaded:
lua_thread = Thread(target=run)
lua_thread.daemon = True
lua_thread.start()
else:
return run()
| 309,166,365,526,661,400
|
Handles running of custom commands from chat
:param nick: The calling user
:param user_level: The calling user's level
:param command: The command triggered
:param args: The words on the line after the command
:param timestamp: The unixtime for when the event happened
:return: Any return value from the custom Lua command, to be sent
back to the channel
:raise CommandPermissionError: If user lacks permissions for command
|
bot/commandmanager.py
|
run_command
|
lietu/twitch-bot
|
python
|
def run_command(self, nick, user_level, command, args=None, timestamp=None, threaded=True):
"\n Handles running of custom commands from chat\n\n :param nick: The calling user\n :param user_level: The calling user's level\n :param command: The command triggered\n :param args: The words on the line after the command\n :param timestamp: The unixtime for when the event happened\n :return: Any return value from the custom Lua command, to be sent\n back to the channel\n :raise CommandPermissionError: If user lacks permissions for command\n "
if (not self._can_run_command(user_level, command)):
raise CommandPermissionError(u'User does not have permission to run this command')
if (args is None):
args = []
elif ('quoted' in self.commands[command]['flags']):
if (self.commands[command]['flags']['quoted'] == 1):
text = ' '.join(args)
args = shlex.split(text)
if (timestamp is None):
timestamp = time.time()
if self._is_under_cooldown(command, timestamp):
raise CommandCooldownError()
self._set_last_executed_time(command, timestamp)
def run():
code = self.call_template.format(func_name=command)
lua_func = self.lua.eval(code)
if ('want_user' in self.commands[command]['flags']):
if (self.commands[command]['flags']['want_user'] == 1):
args.insert(0, nick)
return lua_func(*args)
if threaded:
lua_thread = Thread(target=run)
lua_thread.daemon = True
lua_thread.start()
else:
return run()
|
def load_lua(self, code):
'\n Load Lua code in our runtime\n\n :param code: The Lua code\n :return: None\n '
self.lua.execute(code)
| -9,147,439,341,931,043,000
|
Load Lua code in our runtime
:param code: The Lua code
:return: None
|
bot/commandmanager.py
|
load_lua
|
lietu/twitch-bot
|
python
|
def load_lua(self, code):
'\n Load Lua code in our runtime\n\n :param code: The Lua code\n :return: None\n '
self.lua.execute(code)
|
def _parse_func(self, args):
'\n Process the given arguments into a function definition\n\n :param args: List of the words after the "def" command\n :return: Function name, if it wants the caller\'s user name,\n the required user level, and the function\'s Lua code\n :raise argparse.ArgumentError: There was something wrong with the args\n '
parser = ArgumentParser()
parser.add_argument('-ul', '--user_level', default='mod')
parser.add_argument('-c', '--cooldown', default=None)
parser.add_argument('-a', '--args', default='')
parser.add_argument('-w', '--want_user', action='store_true', default=False)
parser.add_argument('-q', '--quoted', action='store_true', default=False)
parser.add_argument('func_name')
parser.add_argument('func_body', nargs='*')
options = parser.parse_args(args)
if options.want_user:
new_args = 'user'
if (len(options.args) > 0):
new_args += ','
options.args = (new_args + options.args)
code = self.func_template.format(func_name=options.func_name, args=options.args, func_body=' '.join(options.func_body))
flags = {'want_user': int(options.want_user), 'quoted': int(options.quoted), 'cooldown': (int(options.cooldown) if options.cooldown else None)}
added = bool(options.func_body)
return (added, options.func_name, flags, options.user_level, code)
| -3,807,910,362,400,559,000
|
Process the given arguments into a function definition
:param args: List of the words after the "def" command
:return: Function name, if it wants the caller's user name,
the required user level, and the function's Lua code
:raise argparse.ArgumentError: There was something wrong with the args
|
bot/commandmanager.py
|
_parse_func
|
lietu/twitch-bot
|
python
|
def _parse_func(self, args):
'\n Process the given arguments into a function definition\n\n :param args: List of the words after the "def" command\n :return: Function name, if it wants the caller\'s user name,\n the required user level, and the function\'s Lua code\n :raise argparse.ArgumentError: There was something wrong with the args\n '
parser = ArgumentParser()
parser.add_argument('-ul', '--user_level', default='mod')
parser.add_argument('-c', '--cooldown', default=None)
parser.add_argument('-a', '--args', default=)
parser.add_argument('-w', '--want_user', action='store_true', default=False)
parser.add_argument('-q', '--quoted', action='store_true', default=False)
parser.add_argument('func_name')
parser.add_argument('func_body', nargs='*')
options = parser.parse_args(args)
if options.want_user:
new_args = 'user'
if (len(options.args) > 0):
new_args += ','
options.args = (new_args + options.args)
code = self.func_template.format(func_name=options.func_name, args=options.args, func_body=' '.join(options.func_body))
flags = {'want_user': int(options.want_user), 'quoted': int(options.quoted), 'cooldown': (int(options.cooldown) if options.cooldown else None)}
added = bool(options.func_body)
return (added, options.func_name, flags, options.user_level, code)
|
def _parse_simple_func(self, args):
'\n Process the given arguments into a simple function definition\n\n :param args: List of the words after the "com" command\n :return: Function name, if it wants the caller\'s user name,\n the required user level, and the function\'s Lua code\n :raise argparse.ArgumentError: There was something wrong with the args\n '
parser = ArgumentParser()
parser.add_argument('-ul', '--user_level', default='mod')
parser.add_argument('-c', '--cooldown', default=None)
parser.add_argument('func_name')
parser.add_argument('response_text', nargs='*')
options = parser.parse_args(args)
response_text = ' '.join(options.response_text)
response_text = response_text.replace('\\', '\\\\')
response_text = response_text.replace('"', '\\"')
func_body = u'\n return SimpleCom("{response_text}", user, table.pack(...))\n '.format(response_text=response_text)
code = self.func_template.format(func_name=options.func_name, args='user,...', func_body=func_body)
flags = {'want_user': 1, 'quoted': 0, 'cooldown': (int(options.cooldown) if options.cooldown else None)}
added = bool(options.response_text)
return (added, options.func_name, flags, options.user_level, code)
| -820,046,322,053,778,800
|
Process the given arguments into a simple function definition
:param args: List of the words after the "com" command
:return: Function name, if it wants the caller's user name,
the required user level, and the function's Lua code
:raise argparse.ArgumentError: There was something wrong with the args
|
bot/commandmanager.py
|
_parse_simple_func
|
lietu/twitch-bot
|
python
|
def _parse_simple_func(self, args):
'\n Process the given arguments into a simple function definition\n\n :param args: List of the words after the "com" command\n :return: Function name, if it wants the caller\'s user name,\n the required user level, and the function\'s Lua code\n :raise argparse.ArgumentError: There was something wrong with the args\n '
parser = ArgumentParser()
parser.add_argument('-ul', '--user_level', default='mod')
parser.add_argument('-c', '--cooldown', default=None)
parser.add_argument('func_name')
parser.add_argument('response_text', nargs='*')
options = parser.parse_args(args)
response_text = ' '.join(options.response_text)
response_text = response_text.replace('\\', '\\\\')
response_text = response_text.replace('"', '\\"')
func_body = u'\n return SimpleCom("{response_text}", user, table.pack(...))\n '.format(response_text=response_text)
code = self.func_template.format(func_name=options.func_name, args='user,...', func_body=func_body)
flags = {'want_user': 1, 'quoted': 0, 'cooldown': (int(options.cooldown) if options.cooldown else None)}
added = bool(options.response_text)
return (added, options.func_name, flags, options.user_level, code)
|
def _is_under_cooldown(self, command, timestamp):
"\n Check if this command's cooldown period is in effect\n :param command: Which command\n :param timestamp: What is the timestamp it was issued on\n :return:\n "
if (command in self.commands_last_executed):
if ('cooldown' in self.commands[command]['flags']):
cooldown_period = self.commands[command]['flags']['cooldown']
last_executed = self.commands_last_executed[command]
if (cooldown_period is not None):
cooldown_expires = (last_executed + cooldown_period)
if (timestamp < cooldown_expires):
return True
return False
| 962,702,915,414,738,800
|
Check if this command's cooldown period is in effect
:param command: Which command
:param timestamp: What is the timestamp it was issued on
:return:
|
bot/commandmanager.py
|
_is_under_cooldown
|
lietu/twitch-bot
|
python
|
def _is_under_cooldown(self, command, timestamp):
"\n Check if this command's cooldown period is in effect\n :param command: Which command\n :param timestamp: What is the timestamp it was issued on\n :return:\n "
if (command in self.commands_last_executed):
if ('cooldown' in self.commands[command]['flags']):
cooldown_period = self.commands[command]['flags']['cooldown']
last_executed = self.commands_last_executed[command]
if (cooldown_period is not None):
cooldown_expires = (last_executed + cooldown_period)
if (timestamp < cooldown_expires):
return True
return False
|
def _set_last_executed_time(self, command, timestamp):
'\n Save the last execution time of a command\n :param command: Which command\n :param timestamp: What is the timestamp it was issued on\n :return:\n '
self.commands_last_executed[command] = timestamp
| -3,623,127,483,259,920,000
|
Save the last execution time of a command
:param command: Which command
:param timestamp: What is the timestamp it was issued on
:return:
|
bot/commandmanager.py
|
_set_last_executed_time
|
lietu/twitch-bot
|
python
|
def _set_last_executed_time(self, command, timestamp):
'\n Save the last execution time of a command\n :param command: Which command\n :param timestamp: What is the timestamp it was issued on\n :return:\n '
self.commands_last_executed[command] = timestamp
|
def _level_name_to_number(self, name):
'\n Convert the given user level to a number\n\n :param name: Level name\n :return: A number between 0 and Infinity, higher number is higher\n user level\n :raise ValueError: In case of invalid user level\n '
levels = ['user', 'reg', 'mod', 'owner']
if (not (name in levels)):
raise ValueError(u'{0} is not a valid user level'.format(name))
return levels.index(name)
| -4,419,965,674,233,182,700
|
Convert the given user level to a number
:param name: Level name
:return: A number between 0 and Infinity, higher number is higher
user level
:raise ValueError: In case of invalid user level
|
bot/commandmanager.py
|
_level_name_to_number
|
lietu/twitch-bot
|
python
|
def _level_name_to_number(self, name):
'\n Convert the given user level to a number\n\n :param name: Level name\n :return: A number between 0 and Infinity, higher number is higher\n user level\n :raise ValueError: In case of invalid user level\n '
levels = ['user', 'reg', 'mod', 'owner']
if (not (name in levels)):
raise ValueError(u'{0} is not a valid user level'.format(name))
return levels.index(name)
|
def _can_run_command(self, user_level, command):
"\n Check if this command can be run with the given user level\n\n :param user_level: The calling user's level\n :param command: The command being called\n :return: True of False\n "
need_level = self._level_name_to_number(self.commands[command]['user_level'])
got_level = self._level_name_to_number(user_level)
return (got_level >= need_level)
| -2,490,286,005,795,867,600
|
Check if this command can be run with the given user level
:param user_level: The calling user's level
:param command: The command being called
:return: True of False
|
bot/commandmanager.py
|
_can_run_command
|
lietu/twitch-bot
|
python
|
def _can_run_command(self, user_level, command):
"\n Check if this command can be run with the given user level\n\n :param user_level: The calling user's level\n :param command: The command being called\n :return: True of False\n "
need_level = self._level_name_to_number(self.commands[command]['user_level'])
got_level = self._level_name_to_number(user_level)
return (got_level >= need_level)
|
def _inject_globals(self):
'\n Inject some Python objects and functions into the Lua global scope _G\n\n :return: None\n '
injector = self.lua.eval('\n function (key, value)\n _G[key] = value\n end\n ')
def log(message):
'\n Pass a message from Lua to the Python logger\n\n :param message: The message text\n :return: None\n '
self.logger.debug((u'Lua: ' + str(message)))
def interval(seconds, function):
i = Interval(seconds, function, self.lua)
self.timers.append(i)
return i
def delayed(seconds, function):
i = Delayed(seconds, function, self.lua)
self.timers.append(i)
return i
def simple_com(text, user, args):
params = []
if args:
for key in args:
if (key != 'n'):
params.append(args[key])
try:
response = text.format(*params, user=user)
except IndexError:
response = (user + u', invalid number of arguments.')
return response
injector('log', log)
injector('datasource', self.datasource)
injector('human_readable_time', human_readable_time)
injector('settings', self.settings)
injector('Chat', self.chat)
injector('Http', Http())
injector('TupleData', TupleData)
injector('Interval', interval)
injector('Delayed', delayed)
injector('SimpleCom', simple_com)
| -6,403,042,037,872,840,000
|
Inject some Python objects and functions into the Lua global scope _G
:return: None
|
bot/commandmanager.py
|
_inject_globals
|
lietu/twitch-bot
|
python
|
def _inject_globals(self):
'\n Inject some Python objects and functions into the Lua global scope _G\n\n :return: None\n '
injector = self.lua.eval('\n function (key, value)\n _G[key] = value\n end\n ')
def log(message):
'\n Pass a message from Lua to the Python logger\n\n :param message: The message text\n :return: None\n '
self.logger.debug((u'Lua: ' + str(message)))
def interval(seconds, function):
i = Interval(seconds, function, self.lua)
self.timers.append(i)
return i
def delayed(seconds, function):
i = Delayed(seconds, function, self.lua)
self.timers.append(i)
return i
def simple_com(text, user, args):
params = []
if args:
for key in args:
if (key != 'n'):
params.append(args[key])
try:
response = text.format(*params, user=user)
except IndexError:
response = (user + u', invalid number of arguments.')
return response
injector('log', log)
injector('datasource', self.datasource)
injector('human_readable_time', human_readable_time)
injector('settings', self.settings)
injector('Chat', self.chat)
injector('Http', Http())
injector('TupleData', TupleData)
injector('Interval', interval)
injector('Delayed', delayed)
injector('SimpleCom', simple_com)
|
def log(message):
'\n Pass a message from Lua to the Python logger\n\n :param message: The message text\n :return: None\n '
self.logger.debug((u'Lua: ' + str(message)))
| 294,852,574,707,328,300
|
Pass a message from Lua to the Python logger
:param message: The message text
:return: None
|
bot/commandmanager.py
|
log
|
lietu/twitch-bot
|
python
|
def log(message):
'\n Pass a message from Lua to the Python logger\n\n :param message: The message text\n :return: None\n '
self.logger.debug((u'Lua: ' + str(message)))
|
def test_basic_constants(self):
'\n Check that the basic constants are imported and visible.\n '
self.assertIsNotNone(ck.SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.DONT_SEEK)
self.assertIsNotNone(ck.SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_DONT_SEEK)
| -8,485,359,525,170,984,000
|
Check that the basic constants are imported and visible.
|
py/server/tests/test_kafka_consumer.py
|
test_basic_constants
|
lbooker42/deephaven-core
|
python
|
def test_basic_constants(self):
'\n \n '
self.assertIsNotNone(ck.SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.DONT_SEEK)
self.assertIsNotNone(ck.SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_BEGINNING)
self.assertIsNotNone(ck.ALL_PARTITIONS_SEEK_TO_END)
self.assertIsNotNone(ck.ALL_PARTITIONS_DONT_SEEK)
|
def test_simple_spec(self):
'\n Check a simple Kafka subscription creates the right table.\n '
t = ck.consume({'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.simple_spec('Price', dtypes.double))
cols = t.columns
self.assertEqual(4, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Price', cols[3].name)
self.assertEqual(dtypes.double, cols[3].data_type)
| 5,960,851,227,852,433,000
|
Check a simple Kafka subscription creates the right table.
|
py/server/tests/test_kafka_consumer.py
|
test_simple_spec
|
lbooker42/deephaven-core
|
python
|
def test_simple_spec(self):
'\n \n '
t = ck.consume({'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.simple_spec('Price', dtypes.double))
cols = t.columns
self.assertEqual(4, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Price', cols[3].name)
self.assertEqual(dtypes.double, cols[3].data_type)
|
def test_json_spec(self):
'\n Check a JSON Kafka subscription creates the right table.\n '
t = ck.consume({'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.json_spec([('Symbol', dtypes.string), ('Side', dtypes.string), ('Price', dtypes.double), ('Qty', dtypes.int_), ('Tstamp', dtypes.DateTime)], mapping={'jsymbol': 'Symbol', 'jside': 'Side', 'jprice': 'Price', 'jqty': 'Qty', 'jts': 'Tstamp'}), table_type=TableType.append())
cols = t.columns
self.assertEqual(8, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Symbol', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Side', cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual('Price', cols[5].name)
self.assertEqual(dtypes.double, cols[5].data_type)
self.assertEqual('Qty', cols[6].name)
self.assertEqual(dtypes.int_, cols[6].data_type)
self.assertEqual('Tstamp', cols[7].name)
self.assertEqual(dtypes.DateTime, cols[7].data_type)
| 2,342,120,107,655,886,300
|
Check a JSON Kafka subscription creates the right table.
|
py/server/tests/test_kafka_consumer.py
|
test_json_spec
|
lbooker42/deephaven-core
|
python
|
def test_json_spec(self):
'\n \n '
t = ck.consume({'bootstrap.servers': 'redpanda:29092'}, 'orders', key_spec=KeyValueSpec.IGNORE, value_spec=ck.json_spec([('Symbol', dtypes.string), ('Side', dtypes.string), ('Price', dtypes.double), ('Qty', dtypes.int_), ('Tstamp', dtypes.DateTime)], mapping={'jsymbol': 'Symbol', 'jside': 'Side', 'jprice': 'Price', 'jqty': 'Qty', 'jts': 'Tstamp'}), table_type=TableType.append())
cols = t.columns
self.assertEqual(8, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Symbol', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Side', cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual('Price', cols[5].name)
self.assertEqual(dtypes.double, cols[5].data_type)
self.assertEqual('Qty', cols[6].name)
self.assertEqual(dtypes.int_, cols[6].data_type)
self.assertEqual('Tstamp', cols[7].name)
self.assertEqual(dtypes.DateTime, cols[7].data_type)
|
def test_avro_spec(self):
'\n Check an Avro Kafka subscription creates the right table.\n '
schema = '\n { "type" : "record",\n "namespace" : "io.deephaven.examples",\n "name" : "share_price",\n "fields" : [\n { "name" : "Symbol", "type" : "string" },\n { "name" : "Side", "type" : "string" },\n { "name" : "Qty", "type" : "int" },\n { "name" : "Price", "type" : "double" }\n ]\n }\n '
schema_str = ('{ "schema" : "%s" }' % schema.replace('\n', ' ').replace('"', '\\"'))
sys_str = ("\n curl -X POST -H 'Content-type: application/vnd.schemaregistry.v1+json; artifactType=AVRO' --data-binary '%s' http://redpanda:8081/subjects/share_price_record/versions\n " % schema_str)
r = os.system(sys_str)
self.assertEqual(0, r)
with self.subTest(msg='straight schema, no mapping'):
t = ck.consume({'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081'}, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', schema_version='1'), table_type=TableType.append())
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Symbol', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Side', cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual('Qty', cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual('Price', cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
with self.subTest(msg='mapping_only (filter out some schema fields)'):
m = {'Symbol': 'Ticker', 'Price': 'Dollars'}
t = ck.consume({'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081'}, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m, mapped_only=True), table_type=TableType.append())
cols = t.columns
self.assertEqual(5, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Ticker', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Dollars', cols[4].name)
self.assertEqual(dtypes.double, cols[4].data_type)
with self.subTest(msg='mapping (rename some fields)'):
m = {'Symbol': 'Ticker', 'Qty': 'Quantity'}
t = ck.consume({'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081'}, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m), table_type=TableType.append())
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Ticker', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Side', cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual('Quantity', cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual('Price', cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
| 485,627,723,911,774,850
|
Check an Avro Kafka subscription creates the right table.
|
py/server/tests/test_kafka_consumer.py
|
test_avro_spec
|
lbooker42/deephaven-core
|
python
|
def test_avro_spec(self):
'\n \n '
schema = '\n { "type" : "record",\n "namespace" : "io.deephaven.examples",\n "name" : "share_price",\n "fields" : [\n { "name" : "Symbol", "type" : "string" },\n { "name" : "Side", "type" : "string" },\n { "name" : "Qty", "type" : "int" },\n { "name" : "Price", "type" : "double" }\n ]\n }\n '
schema_str = ('{ "schema" : "%s" }' % schema.replace('\n', ' ').replace('"', '\\"'))
sys_str = ("\n curl -X POST -H 'Content-type: application/vnd.schemaregistry.v1+json; artifactType=AVRO' --data-binary '%s' http://redpanda:8081/subjects/share_price_record/versions\n " % schema_str)
r = os.system(sys_str)
self.assertEqual(0, r)
with self.subTest(msg='straight schema, no mapping'):
t = ck.consume({'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081'}, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', schema_version='1'), table_type=TableType.append())
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Symbol', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Side', cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual('Qty', cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual('Price', cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
with self.subTest(msg='mapping_only (filter out some schema fields)'):
m = {'Symbol': 'Ticker', 'Price': 'Dollars'}
t = ck.consume({'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081'}, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m, mapped_only=True), table_type=TableType.append())
cols = t.columns
self.assertEqual(5, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Ticker', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Dollars', cols[4].name)
self.assertEqual(dtypes.double, cols[4].data_type)
with self.subTest(msg='mapping (rename some fields)'):
m = {'Symbol': 'Ticker', 'Qty': 'Quantity'}
t = ck.consume({'bootstrap.servers': 'redpanda:29092', 'schema.registry.url': 'http://redpanda:8081'}, 'share_price', key_spec=KeyValueSpec.IGNORE, value_spec=ck.avro_spec('share_price_record', mapping=m), table_type=TableType.append())
cols = t.columns
self.assertEqual(7, len(cols))
self._assert_common_cols(cols)
self.assertEqual('Ticker', cols[3].name)
self.assertEqual(dtypes.string, cols[3].data_type)
self.assertEqual('Side', cols[4].name)
self.assertEqual(dtypes.string, cols[4].data_type)
self.assertEqual('Quantity', cols[5].name)
self.assertEqual(dtypes.int32, cols[5].data_type)
self.assertEqual('Price', cols[6].name)
self.assertEqual(dtypes.double, cols[6].data_type)
|
@unittest.skip('https://github.com/deephaven/deephaven-core/pull/2277')
def test_deprecated_table_types(self):
'\n Tests to make sure deprecated TableTypes are equivalent\n '
self.assertEqual(TableType.append(), TableType.Append)
self.assertEqual(TableType.stream(), TableType.Stream)
| -4,242,480,715,210,158,600
|
Tests to make sure deprecated TableTypes are equivalent
|
py/server/tests/test_kafka_consumer.py
|
test_deprecated_table_types
|
lbooker42/deephaven-core
|
python
|
@unittest.skip('https://github.com/deephaven/deephaven-core/pull/2277')
def test_deprecated_table_types(self):
'\n \n '
self.assertEqual(TableType.append(), TableType.Append)
self.assertEqual(TableType.stream(), TableType.Stream)
|
def test_table_types(self):
'\n Tests TableType construction\n '
_ = TableType.append()
_ = TableType.stream()
_ = TableType.ring(4096)
| 2,499,055,364,101,107,700
|
Tests TableType construction
|
py/server/tests/test_kafka_consumer.py
|
test_table_types
|
lbooker42/deephaven-core
|
python
|
def test_table_types(self):
'\n \n '
_ = TableType.append()
_ = TableType.stream()
_ = TableType.ring(4096)
|
@classmethod
def from_dict(cls, dict_obj):
' Creates an Agent object from parameters stored in a dict. AgentSchema is used to validate inputs.'
return cls(**cls.AgentSchema().load(dict_obj, partial=['paw']))
| -4,173,985,643,143,858,000
|
Creates an Agent object from parameters stored in a dict. AgentSchema is used to validate inputs.
|
app/objects/c_agent.py
|
from_dict
|
zaphodef/caldera
|
python
|
@classmethod
def from_dict(cls, dict_obj):
' '
return cls(**cls.AgentSchema().load(dict_obj, partial=['paw']))
|
def parse_duration(value: str) -> datetime.timedelta:
"Parse a duration string and return a datetime.timedelta.\n\n Args:\n value (str): A time duration given as text. The preferred format for\n durations is '%d %H:%M:%S.%f'. This function also supports ISO 8601\n representation and PostgreSQL's day-time interval format.\n\n Returns:\n datetime.timedelta: An instance representing the duration.\n "
match = (standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value))
if match:
kw = match.groupdict()
days = datetime.timedelta(float((kw.pop('days', 0) or 0)))
sign = ((- 1) if (kw.pop('sign', '+') == '-') else 1)
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
if (kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-')):
kw['microseconds'] = ('-' + kw['microseconds'])
kw = {k: float(v) for (k, v) in kw.items() if (v is not None)}
return (days + (sign * datetime.timedelta(**kw)))
else:
raise ValueError(f'The time duration {value} cannot be parsed.')
| -5,307,280,787,856,276,000
|
Parse a duration string and return a datetime.timedelta.
Args:
value (str): A time duration given as text. The preferred format for
durations is '%d %H:%M:%S.%f'. This function also supports ISO 8601
representation and PostgreSQL's day-time interval format.
Returns:
datetime.timedelta: An instance representing the duration.
|
pde/tools/parse_duration.py
|
parse_duration
|
lmenou/py-pde
|
python
|
def parse_duration(value: str) -> datetime.timedelta:
"Parse a duration string and return a datetime.timedelta.\n\n Args:\n value (str): A time duration given as text. The preferred format for\n durations is '%d %H:%M:%S.%f'. This function also supports ISO 8601\n representation and PostgreSQL's day-time interval format.\n\n Returns:\n datetime.timedelta: An instance representing the duration.\n "
match = (standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value))
if match:
kw = match.groupdict()
days = datetime.timedelta(float((kw.pop('days', 0) or 0)))
sign = ((- 1) if (kw.pop('sign', '+') == '-') else 1)
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
if (kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-')):
kw['microseconds'] = ('-' + kw['microseconds'])
kw = {k: float(v) for (k, v) in kw.items() if (v is not None)}
return (days + (sign * datetime.timedelta(**kw)))
else:
raise ValueError(f'The time duration {value} cannot be parsed.')
|
@query_many_property
def local_modules(self):
'Load local modules. Return SQLAlchemy query'
return self.modules.filter(Module.m.path.like('%{}%'.format(persistence_config.base_path)))
| 34,532,395,116,731,730
|
Load local modules. Return SQLAlchemy query
|
capture/noworkflow/now/persistence/models/trial.py
|
local_modules
|
raffaelfoidl/noworkflow
|
python
|
@query_many_property
def local_modules(self):
return self.modules.filter(Module.m.path.like('%{}%'.format(persistence_config.base_path)))
|
@query_many_property
def modules(self):
'Load modules. Return SQLAlchemy query'
if self.inherited:
return self.inherited.modules
return self.dmodules
| -2,528,479,259,529,810,400
|
Load modules. Return SQLAlchemy query
|
capture/noworkflow/now/persistence/models/trial.py
|
modules
|
raffaelfoidl/noworkflow
|
python
|
@query_many_property
def modules(self):
if self.inherited:
return self.inherited.modules
return self.dmodules
|
@query_many_property
def dependencies(self):
'Load modules. Return SQLAlchemy query'
if self.inherited:
return self.inherited.dependencies
return self.module_dependencies
| -7,273,323,766,861,591,000
|
Load modules. Return SQLAlchemy query
|
capture/noworkflow/now/persistence/models/trial.py
|
dependencies
|
raffaelfoidl/noworkflow
|
python
|
@query_many_property
def dependencies(self):
if self.inherited:
return self.inherited.dependencies
return self.module_dependencies
|
@query_many_property
def initial_activations(self):
'Return initial activation as a SQLAlchemy query'
return self.activations.filter(is_none(Activation.m.caller_id))
| -7,233,818,596,856,919,000
|
Return initial activation as a SQLAlchemy query
|
capture/noworkflow/now/persistence/models/trial.py
|
initial_activations
|
raffaelfoidl/noworkflow
|
python
|
@query_many_property
def initial_activations(self):
return self.activations.filter(is_none(Activation.m.caller_id))
|
@property
def prolog_variables(self):
'Return filtered prolog variables'
if (not self._prolog_visitor):
self.dependency_filter.run()
self._prolog_visitor = PrologVisitor(self.dependency_filter)
self._prolog_visitor.visit(self.dependency_filter.main_cluster)
return self._prolog_visitor
| -397,316,024,569,532,600
|
Return filtered prolog variables
|
capture/noworkflow/now/persistence/models/trial.py
|
prolog_variables
|
raffaelfoidl/noworkflow
|
python
|
@property
def prolog_variables(self):
if (not self._prolog_visitor):
self.dependency_filter.run()
self._prolog_visitor = PrologVisitor(self.dependency_filter)
self._prolog_visitor.visit(self.dependency_filter.main_cluster)
return self._prolog_visitor
|
@property
def script_content(self):
'Return the "main" script content of the trial'
return PrettyLines(content.get(self.code_hash).decode('utf-8').split('/n'))
| 5,959,198,191,165,273,000
|
Return the "main" script content of the trial
|
capture/noworkflow/now/persistence/models/trial.py
|
script_content
|
raffaelfoidl/noworkflow
|
python
|
@property
def script_content(self):
return PrettyLines(content.get(self.code_hash).decode('utf-8').split('/n'))
|
@property
def finished(self):
'Check if trial has finished'
return bool(self.finish)
| 4,549,426,415,557,629,400
|
Check if trial has finished
|
capture/noworkflow/now/persistence/models/trial.py
|
finished
|
raffaelfoidl/noworkflow
|
python
|
@property
def finished(self):
return bool(self.finish)
|
@property
def status(self):
'Check trial status\n Possible statuses: finished, unfinished, backup'
if (not self.run):
return 'backup'
return ('finished' if self.finished else 'unfinished')
| 3,606,235,992,606,115,000
|
Check trial status
Possible statuses: finished, unfinished, backup
|
capture/noworkflow/now/persistence/models/trial.py
|
status
|
raffaelfoidl/noworkflow
|
python
|
@property
def status(self):
'Check trial status\n Possible statuses: finished, unfinished, backup'
if (not self.run):
return 'backup'
return ('finished' if self.finished else 'unfinished')
|
@property
def duration(self):
'Calculate trial duration. Return microseconds'
if self.finish:
return int(((self.finish - self.start).total_seconds() * 1000000))
return 0
| -3,282,620,830,210,900,000
|
Calculate trial duration. Return microseconds
|
capture/noworkflow/now/persistence/models/trial.py
|
duration
|
raffaelfoidl/noworkflow
|
python
|
@property
def duration(self):
if self.finish:
return int(((self.finish - self.start).total_seconds() * 1000000))
return 0
|
@property
def duration_text(self):
'Calculate trial duration. Return formatted str'
if self.finish:
return str((self.finish - self.start))
return 'None'
| -530,437,388,138,204,860
|
Calculate trial duration. Return formatted str
|
capture/noworkflow/now/persistence/models/trial.py
|
duration_text
|
raffaelfoidl/noworkflow
|
python
|
@property
def duration_text(self):
if self.finish:
return str((self.finish - self.start))
return 'None'
|
@property
def environment(self):
'Return dict: environment variables -> value'
return {e.name: e.value for e in self.environment_attrs}
| 3,438,829,612,525,810,700
|
Return dict: environment variables -> value
|
capture/noworkflow/now/persistence/models/trial.py
|
environment
|
raffaelfoidl/noworkflow
|
python
|
@property
def environment(self):
return {e.name: e.value for e in self.environment_attrs}
|
def versioned_files(self, skip_script=False, skip_local=False, skip_access=False):
'Find first files accessed in a trial\n Return map with relative path -> (code_hash, type)\n\n Possible types: script, module, access\n '
files = {}
def add(path, info):
'Add file to dict'
if os.path.isabs(path):
if (not (persistence_config.base_path in path)):
return
path = os.path.relpath(path, persistence_config.base_path)
files[path] = info
if (not skip_script):
add(self.script, {'code_hash': self.code_hash, 'type': 'script'})
if (not skip_local):
for module in self.local_modules:
add(module.path, {'code_hash': module.code_hash, 'type': 'module', 'name': module.name})
if (not skip_access):
for faccess in reversed(list(self.file_accesses)):
add(faccess.name, {'code_hash': faccess.content_hash_before, 'type': 'access'})
return files
| 7,804,030,863,116,041,000
|
Find first files accessed in a trial
Return map with relative path -> (code_hash, type)
Possible types: script, module, access
|
capture/noworkflow/now/persistence/models/trial.py
|
versioned_files
|
raffaelfoidl/noworkflow
|
python
|
def versioned_files(self, skip_script=False, skip_local=False, skip_access=False):
'Find first files accessed in a trial\n Return map with relative path -> (code_hash, type)\n\n Possible types: script, module, access\n '
files = {}
def add(path, info):
'Add file to dict'
if os.path.isabs(path):
if (not (persistence_config.base_path in path)):
return
path = os.path.relpath(path, persistence_config.base_path)
files[path] = info
if (not skip_script):
add(self.script, {'code_hash': self.code_hash, 'type': 'script'})
if (not skip_local):
for module in self.local_modules:
add(module.path, {'code_hash': module.code_hash, 'type': 'module', 'name': module.name})
if (not skip_access):
for faccess in reversed(list(self.file_accesses)):
add(faccess.name, {'code_hash': faccess.content_hash_before, 'type': 'access'})
return files
|
def iterate_accesses(self, path=None):
'Iterate on all access to a path'
if ((not path) or self.script.endswith(path)):
(yield (self.script, {'code_hash': self.code_hash, 'type': 'script'}))
for module in self.local_modules:
if ((not path) or module.path.endswith(path)):
(yield (module.path, {'code_hash': module.code_hash, 'type': 'module', 'name': module.name}))
for faccess in list(self.file_accesses):
if ((not path) or faccess.name.endswith(path)):
(yield (faccess.name, {'code_hash': faccess.content_hash_before, 'type': 'access'}))
(yield (faccess.name, {'code_hash': faccess.content_hash_after, 'type': 'access'}))
| 5,028,473,455,695,651,000
|
Iterate on all access to a path
|
capture/noworkflow/now/persistence/models/trial.py
|
iterate_accesses
|
raffaelfoidl/noworkflow
|
python
|
def iterate_accesses(self, path=None):
if ((not path) or self.script.endswith(path)):
(yield (self.script, {'code_hash': self.code_hash, 'type': 'script'}))
for module in self.local_modules:
if ((not path) or module.path.endswith(path)):
(yield (module.path, {'code_hash': module.code_hash, 'type': 'module', 'name': module.name}))
for faccess in list(self.file_accesses):
if ((not path) or faccess.name.endswith(path)):
(yield (faccess.name, {'code_hash': faccess.content_hash_before, 'type': 'access'}))
(yield (faccess.name, {'code_hash': faccess.content_hash_after, 'type': 'access'}))
|
def create_head(self):
'Create head for this trial'
session = relational.make_session()
session.query(Head.m).filter((Head.m.script == self.script)).delete()
session.add(Head.m(trial_id=self.id, script=self.script))
session.commit()
| -2,086,893,957,433,139,000
|
Create head for this trial
|
capture/noworkflow/now/persistence/models/trial.py
|
create_head
|
raffaelfoidl/noworkflow
|
python
|
def create_head(self):
session = relational.make_session()
session.query(Head.m).filter((Head.m.script == self.script)).delete()
session.add(Head.m(trial_id=self.id, script=self.script))
session.commit()
|
def query(self, query):
'Run prolog query'
return self.prolog.query(query)
| 1,142,936,998,032,021,000
|
Run prolog query
|
capture/noworkflow/now/persistence/models/trial.py
|
query
|
raffaelfoidl/noworkflow
|
python
|
def query(self, query):
return self.prolog.query(query)
|
def _ipython_display_(self):
'Display history graph'
if hasattr(self, 'graph'):
return self.graph._ipython_display_()
from IPython.display import display
display({'text/plain': 'Trial {}'.format(self.id)}, raw=True)
| 4,845,852,689,895,969,000
|
Display history graph
|
capture/noworkflow/now/persistence/models/trial.py
|
_ipython_display_
|
raffaelfoidl/noworkflow
|
python
|
def _ipython_display_(self):
if hasattr(self, 'graph'):
return self.graph._ipython_display_()
from IPython.display import display
display({'text/plain': 'Trial {}'.format(self.id)}, raw=True)
|
def show(self, _print=print):
'Print trial information'
_print(' Id: {t.id}\n Inherited Id: {t.inherited_id}\n Script: {t.script}\n Code hash: {t.code_hash}\n Start: {t.start}\n Finish: {t.finish}\n Duration: {t.duration_text} '.format(t=self))
| -3,123,170,109,015,617,500
|
Print trial information
|
capture/noworkflow/now/persistence/models/trial.py
|
show
|
raffaelfoidl/noworkflow
|
python
|
def show(self, _print=print):
_print(' Id: {t.id}\n Inherited Id: {t.inherited_id}\n Script: {t.script}\n Code hash: {t.code_hash}\n Start: {t.start}\n Finish: {t.finish}\n Duration: {t.duration_text} '.format(t=self))
|
@classmethod
def distinct_scripts(cls):
'Return a set with distinct scripts'
return {s[0].rsplit('/', 1)[(- 1)] for s in relational.session.query(distinct(cls.m.script))}
| -6,364,750,551,788,056,000
|
Return a set with distinct scripts
|
capture/noworkflow/now/persistence/models/trial.py
|
distinct_scripts
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def distinct_scripts(cls):
return {s[0].rsplit('/', 1)[(- 1)] for s in relational.session.query(distinct(cls.m.script))}
|
@classmethod
def reverse_trials(cls, limit, session=None):
'Return a generator with <limit> trials ordered by start time desc'
session = (session or relational.session)
return proxy_gen(session.query(cls.m).order_by(cls.m.start.desc()).limit(limit))
| -3,460,930,242,788,007,000
|
Return a generator with <limit> trials ordered by start time desc
|
capture/noworkflow/now/persistence/models/trial.py
|
reverse_trials
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def reverse_trials(cls, limit, session=None):
session = (session or relational.session)
return proxy_gen(session.query(cls.m).order_by(cls.m.start.desc()).limit(limit))
|
@classmethod
def last_trial(cls, script=None, parent_required=False, session=None):
'Return last trial according to start time\n\n Keyword arguments:\n script -- specify the desired script (default=None)\n parent_required -- valid only if script exists (default=False)\n '
model = cls.m
session = (session or relational.session)
trial = session.query(model).filter(model.start.in_(select([func.max(model.start)]).where((model.script == script)))).first()
if (trial or parent_required):
return trial
return session.query(model).filter(model.start.in_(select([func.max(model.start)]))).first()
| 5,395,468,279,525,787,000
|
Return last trial according to start time
Keyword arguments:
script -- specify the desired script (default=None)
parent_required -- valid only if script exists (default=False)
|
capture/noworkflow/now/persistence/models/trial.py
|
last_trial
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def last_trial(cls, script=None, parent_required=False, session=None):
'Return last trial according to start time\n\n Keyword arguments:\n script -- specify the desired script (default=None)\n parent_required -- valid only if script exists (default=False)\n '
model = cls.m
session = (session or relational.session)
trial = session.query(model).filter(model.start.in_(select([func.max(model.start)]).where((model.script == script)))).first()
if (trial or parent_required):
return trial
return session.query(model).filter(model.start.in_(select([func.max(model.start)]))).first()
|
@classmethod
def find_by_name_and_time(cls, script, timestamp, trial=None, session=None):
'Return the first trial according to script and timestamp\n\n Arguments:\n script -- specify the desired script\n timestamp -- specify the start of finish time of trial\n\n Keyword Arguments:\n trial -- limit query to a specific trial\n '
model = cls.m
session = (session or relational.session)
query = session.query(model).filter(((model.script == script) & (model.start.like((timestamp + '%')) | model.finish.like((timestamp + '%'))))).order_by(model.start)
if trial:
query = query.filter((model.id == trial))
return proxy(query.first())
| -5,874,113,822,201,334,000
|
Return the first trial according to script and timestamp
Arguments:
script -- specify the desired script
timestamp -- specify the start of finish time of trial
Keyword Arguments:
trial -- limit query to a specific trial
|
capture/noworkflow/now/persistence/models/trial.py
|
find_by_name_and_time
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def find_by_name_and_time(cls, script, timestamp, trial=None, session=None):
'Return the first trial according to script and timestamp\n\n Arguments:\n script -- specify the desired script\n timestamp -- specify the start of finish time of trial\n\n Keyword Arguments:\n trial -- limit query to a specific trial\n '
model = cls.m
session = (session or relational.session)
query = session.query(model).filter(((model.script == script) & (model.start.like((timestamp + '%')) | model.finish.like((timestamp + '%'))))).order_by(model.start)
if trial:
query = query.filter((model.id == trial))
return proxy(query.first())
|
@classmethod
def load_trial(cls, trial_ref, session=None):
'Load trial by trial reference\n\n Find reference on trials id and tags name\n '
from .tag import Tag
session = (session or relational.session)
return session.query(cls.m).outerjoin(Tag.m).filter(((cls.m.id == trial_ref) | (Tag.m.name == trial_ref))).first()
| -3,829,412,513,214,292,000
|
Load trial by trial reference
Find reference on trials id and tags name
|
capture/noworkflow/now/persistence/models/trial.py
|
load_trial
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def load_trial(cls, trial_ref, session=None):
'Load trial by trial reference\n\n Find reference on trials id and tags name\n '
from .tag import Tag
session = (session or relational.session)
return session.query(cls.m).outerjoin(Tag.m).filter(((cls.m.id == trial_ref) | (Tag.m.name == trial_ref))).first()
|
@classmethod
def load_parent(cls, script, remove=True, parent_required=False, session=None):
'Load head trial by script\n\n\n Keyword arguments:\n remove -- remove from head, after loading (default=True)\n parent_required -- valid only if script exists (default=False)\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
head = Head.load_head(script, session=session)
if head:
trial = head.trial
if remove:
Head.remove(head.id, session=relational.make_session())
elif (not head):
trial = cls.last_trial(script=script, parent_required=parent_required, session=session)
return proxy(trial)
| -5,441,716,824,521,170,000
|
Load head trial by script
Keyword arguments:
remove -- remove from head, after loading (default=True)
parent_required -- valid only if script exists (default=False)
session -- specify session for loading (default=relational.session)
|
capture/noworkflow/now/persistence/models/trial.py
|
load_parent
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def load_parent(cls, script, remove=True, parent_required=False, session=None):
'Load head trial by script\n\n\n Keyword arguments:\n remove -- remove from head, after loading (default=True)\n parent_required -- valid only if script exists (default=False)\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
head = Head.load_head(script, session=session)
if head:
trial = head.trial
if remove:
Head.remove(head.id, session=relational.make_session())
elif (not head):
trial = cls.last_trial(script=script, parent_required=parent_required, session=session)
return proxy(trial)
|
@classmethod
def fast_last_trial_id(cls, session=None):
'Load last trial id that did not bypass modules\n\n\n Compile SQLAlchemy core query into string for optimization\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
if (not hasattr(cls, '_last_trial_id')):
ttrial = cls.t
_query = select([ttrial.c.id]).where(ttrial.c.start.in_(select([func.max(ttrial.c.start)]).select_from(ttrial).where(is_none(ttrial.c.inherited_id))))
cls.last_trial_id = str(_query)
an_id = session.execute(cls.last_trial_id).fetchone()
if (not an_id):
raise RuntimeError('Not able to bypass modules check because no previous trial was found')
return an_id[0]
| 3,566,533,528,998,085,000
|
Load last trial id that did not bypass modules
Compile SQLAlchemy core query into string for optimization
Keyword arguments:
session -- specify session for loading (default=relational.session)
|
capture/noworkflow/now/persistence/models/trial.py
|
fast_last_trial_id
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def fast_last_trial_id(cls, session=None):
'Load last trial id that did not bypass modules\n\n\n Compile SQLAlchemy core query into string for optimization\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
if (not hasattr(cls, '_last_trial_id')):
ttrial = cls.t
_query = select([ttrial.c.id]).where(ttrial.c.start.in_(select([func.max(ttrial.c.start)]).select_from(ttrial).where(is_none(ttrial.c.inherited_id))))
cls.last_trial_id = str(_query)
an_id = session.execute(cls.last_trial_id).fetchone()
if (not an_id):
raise RuntimeError('Not able to bypass modules check because no previous trial was found')
return an_id[0]
|
@classmethod
def fast_update(cls, trial_id, finish, docstring, session=None):
'Update finish time of trial\n\n Use core sqlalchemy\n\n Arguments:\n trial_id -- trial id\n finish -- finish time as a datetime object\n\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
ttrial = cls.t
session.execute(ttrial.update().values(finish=finish, docstring=docstring).where((ttrial.c.id == trial_id)))
session.commit()
| 7,144,367,229,818,227,000
|
Update finish time of trial
Use core sqlalchemy
Arguments:
trial_id -- trial id
finish -- finish time as a datetime object
Keyword arguments:
session -- specify session for loading (default=relational.session)
|
capture/noworkflow/now/persistence/models/trial.py
|
fast_update
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def fast_update(cls, trial_id, finish, docstring, session=None):
'Update finish time of trial\n\n Use core sqlalchemy\n\n Arguments:\n trial_id -- trial id\n finish -- finish time as a datetime object\n\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
ttrial = cls.t
session.execute(ttrial.update().values(finish=finish, docstring=docstring).where((ttrial.c.id == trial_id)))
session.commit()
|
@classmethod
def store(cls, start, script, code_hash, arguments, bypass_modules, command, run, docstring, session=None):
'Create trial and assign a new id to it\n\n Use core sqlalchemy\n\n Arguments:\n start -- trial start time\n script -- script name\n code_hash -- script hash code\n arguments -- trial arguments\n bypass_modules -- whether it captured modules or not\n command -- the full command line with noWorkflow parametes\n run -- trial created by the run command\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
parent = cls.load_parent(script, parent_required=True)
parent_id = (parent.id if parent else None)
inherited_id = None
if bypass_modules:
inherited_id = cls.fast_last_trial_id()
ttrial = cls.__table__
result = session.execute(ttrial.insert(), {'start': start, 'script': script, 'code_hash': code_hash, 'arguments': arguments, 'command': command, 'run': run, 'inherited_id': inherited_id, 'parent_id': parent_id, 'docstring': docstring})
tid = result.lastrowid
session.commit()
return tid
| -5,175,056,888,793,935,000
|
Create trial and assign a new id to it
Use core sqlalchemy
Arguments:
start -- trial start time
script -- script name
code_hash -- script hash code
arguments -- trial arguments
bypass_modules -- whether it captured modules or not
command -- the full command line with noWorkflow parametes
run -- trial created by the run command
Keyword arguments:
session -- specify session for loading (default=relational.session)
|
capture/noworkflow/now/persistence/models/trial.py
|
store
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def store(cls, start, script, code_hash, arguments, bypass_modules, command, run, docstring, session=None):
'Create trial and assign a new id to it\n\n Use core sqlalchemy\n\n Arguments:\n start -- trial start time\n script -- script name\n code_hash -- script hash code\n arguments -- trial arguments\n bypass_modules -- whether it captured modules or not\n command -- the full command line with noWorkflow parametes\n run -- trial created by the run command\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
parent = cls.load_parent(script, parent_required=True)
parent_id = (parent.id if parent else None)
inherited_id = None
if bypass_modules:
inherited_id = cls.fast_last_trial_id()
ttrial = cls.__table__
result = session.execute(ttrial.insert(), {'start': start, 'script': script, 'code_hash': code_hash, 'arguments': arguments, 'command': command, 'run': run, 'inherited_id': inherited_id, 'parent_id': parent_id, 'docstring': docstring})
tid = result.lastrowid
session.commit()
return tid
|
@classmethod
def all(cls, session=None):
'Return all trials\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
return proxy_gen(session.query(cls.m))
| -5,441,857,363,821,957,000
|
Return all trials
Keyword arguments:
session -- specify session for loading (default=relational.session)
|
capture/noworkflow/now/persistence/models/trial.py
|
all
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def all(cls, session=None):
'Return all trials\n\n Keyword arguments:\n session -- specify session for loading (default=relational.session)\n '
session = (session or relational.session)
return proxy_gen(session.query(cls.m))
|
def match_status(self, status):
'Check if trial statuses matches\n '
if (status == '*'):
return True
return (self.status == status)
| 3,859,083,279,929,322,000
|
Check if trial statuses matches
|
capture/noworkflow/now/persistence/models/trial.py
|
match_status
|
raffaelfoidl/noworkflow
|
python
|
def match_status(self, status):
'\n '
if (status == '*'):
return True
return (self.status == status)
|
def match_script(self, script):
'Check if trial scripts matches\n '
if (script == '*'):
return True
return (self.script == script)
| 2,700,283,920,835,461,000
|
Check if trial scripts matches
|
capture/noworkflow/now/persistence/models/trial.py
|
match_script
|
raffaelfoidl/noworkflow
|
python
|
def match_script(self, script):
'\n '
if (script == '*'):
return True
return (self.script == script)
|
@property
def str_start(self):
'Return start date as string'
return str(self.start)
| 6,990,337,085,220,348,000
|
Return start date as string
|
capture/noworkflow/now/persistence/models/trial.py
|
str_start
|
raffaelfoidl/noworkflow
|
python
|
@property
def str_start(self):
return str(self.start)
|
@property
def str_finish(self):
'Return start date as string'
return str(self.finish)
| -4,390,467,911,824,953,000
|
Return start date as string
|
capture/noworkflow/now/persistence/models/trial.py
|
str_finish
|
raffaelfoidl/noworkflow
|
python
|
@property
def str_finish(self):
return str(self.finish)
|
@classmethod
def count(cls, session=None):
'Count number of trials on database\n '
session = (session or relational.session)
return session.query(cls.m).count()
| -3,095,270,125,073,085,000
|
Count number of trials on database
|
capture/noworkflow/now/persistence/models/trial.py
|
count
|
raffaelfoidl/noworkflow
|
python
|
@classmethod
def count(cls, session=None):
'\n '
session = (session or relational.session)
return session.query(cls.m).count()
|
def add(path, info):
'Add file to dict'
if os.path.isabs(path):
if (not (persistence_config.base_path in path)):
return
path = os.path.relpath(path, persistence_config.base_path)
files[path] = info
| 1,143,074,063,093,067,000
|
Add file to dict
|
capture/noworkflow/now/persistence/models/trial.py
|
add
|
raffaelfoidl/noworkflow
|
python
|
def add(path, info):
if os.path.isabs(path):
if (not (persistence_config.base_path in path)):
return
path = os.path.relpath(path, persistence_config.base_path)
files[path] = info
|
def __init__(self, init_state_idx=None, init_state_idx_type='obs', policy_array=None, policy_idx_type='obs', p_diabetes=0.2):
'\n initialize the simulator\n '
assert ((p_diabetes >= 0) and (p_diabetes <= 1)), 'Invalid p_diabetes: {}'.format(p_diabetes)
assert (policy_idx_type in ['obs', 'full', 'proj_obs'])
if (policy_array is not None):
assert (policy_array.shape[1] == Action.NUM_ACTIONS_TOTAL)
if (policy_idx_type == 'obs'):
assert (policy_array.shape[0] == State.NUM_OBS_STATES)
elif (policy_idx_type == 'full'):
assert (policy_array.shape[0] == (State.NUM_HID_STATES * State.NUM_OBS_STATES))
elif (policy_idx_type == 'proj_obs'):
assert (policy_array.shape[0] == State.NUM_PROJ_OBS_STATES)
self.p_diabetes = p_diabetes
self.state = None
self.state = self.get_new_state(init_state_idx, init_state_idx_type)
self.policy_array = policy_array
self.policy_idx_type = policy_idx_type
| 7,739,463,211,233,123,000
|
initialize the simulator
|
sepsisSimDiabetes/MDP.py
|
__init__
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def __init__(self, init_state_idx=None, init_state_idx_type='obs', policy_array=None, policy_idx_type='obs', p_diabetes=0.2):
'\n \n '
assert ((p_diabetes >= 0) and (p_diabetes <= 1)), 'Invalid p_diabetes: {}'.format(p_diabetes)
assert (policy_idx_type in ['obs', 'full', 'proj_obs'])
if (policy_array is not None):
assert (policy_array.shape[1] == Action.NUM_ACTIONS_TOTAL)
if (policy_idx_type == 'obs'):
assert (policy_array.shape[0] == State.NUM_OBS_STATES)
elif (policy_idx_type == 'full'):
assert (policy_array.shape[0] == (State.NUM_HID_STATES * State.NUM_OBS_STATES))
elif (policy_idx_type == 'proj_obs'):
assert (policy_array.shape[0] == State.NUM_PROJ_OBS_STATES)
self.p_diabetes = p_diabetes
self.state = None
self.state = self.get_new_state(init_state_idx, init_state_idx_type)
self.policy_array = policy_array
self.policy_idx_type = policy_idx_type
|
def get_new_state(self, state_idx=None, idx_type='obs', diabetic_idx=None):
"\n use to start MDP over. A few options:\n\n Full specification:\n 1. Provide state_idx with idx_type = 'obs' + diabetic_idx\n 2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored\n 3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx*\n\n * This option will set glucose to a normal level\n\n Random specification\n 4. State_idx, no diabetic_idx: Latter will be generated\n 5. No state_idx, no diabetic_idx: Completely random\n 6. No state_idx, diabetic_idx given: Random conditional on diabetes\n "
assert (idx_type in ['obs', 'full', 'proj_obs'])
option = None
if (state_idx is not None):
if ((idx_type == 'obs') and (diabetic_idx is not None)):
option = 'spec_obs'
elif ((idx_type == 'obs') and (diabetic_idx is None)):
option = 'spec_obs_no_diab'
diabetic_idx = np.random.binomial(1, self.p_diabetes)
elif (idx_type == 'full'):
option = 'spec_full'
elif ((idx_type == 'proj_obs') and (diabetic_idx is not None)):
option = 'spec_proj_obs'
elif ((state_idx is None) and (diabetic_idx is None)):
option = 'random'
elif ((state_idx is None) and (diabetic_idx is not None)):
option = 'random_cond_diab'
assert (option is not None), 'Invalid specification of new state'
if (option in ['random', 'random_cond_diab']):
init_state = self.generate_random_state(diabetic_idx)
while init_state.check_absorbing_state():
init_state = self.generate_random_state(diabetic_idx)
else:
init_state = State(state_idx=state_idx, idx_type=idx_type, diabetic_idx=diabetic_idx)
return init_state
| 5,995,413,871,458,844,000
|
use to start MDP over. A few options:
Full specification:
1. Provide state_idx with idx_type = 'obs' + diabetic_idx
2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored
3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx*
* This option will set glucose to a normal level
Random specification
4. State_idx, no diabetic_idx: Latter will be generated
5. No state_idx, no diabetic_idx: Completely random
6. No state_idx, diabetic_idx given: Random conditional on diabetes
|
sepsisSimDiabetes/MDP.py
|
get_new_state
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def get_new_state(self, state_idx=None, idx_type='obs', diabetic_idx=None):
"\n use to start MDP over. A few options:\n\n Full specification:\n 1. Provide state_idx with idx_type = 'obs' + diabetic_idx\n 2. Provide state_idx with idx_type = 'full', diabetic_idx is ignored\n 3. Provide state_idx with idx_type = 'proj_obs' + diabetic_idx*\n\n * This option will set glucose to a normal level\n\n Random specification\n 4. State_idx, no diabetic_idx: Latter will be generated\n 5. No state_idx, no diabetic_idx: Completely random\n 6. No state_idx, diabetic_idx given: Random conditional on diabetes\n "
assert (idx_type in ['obs', 'full', 'proj_obs'])
option = None
if (state_idx is not None):
if ((idx_type == 'obs') and (diabetic_idx is not None)):
option = 'spec_obs'
elif ((idx_type == 'obs') and (diabetic_idx is None)):
option = 'spec_obs_no_diab'
diabetic_idx = np.random.binomial(1, self.p_diabetes)
elif (idx_type == 'full'):
option = 'spec_full'
elif ((idx_type == 'proj_obs') and (diabetic_idx is not None)):
option = 'spec_proj_obs'
elif ((state_idx is None) and (diabetic_idx is None)):
option = 'random'
elif ((state_idx is None) and (diabetic_idx is not None)):
option = 'random_cond_diab'
assert (option is not None), 'Invalid specification of new state'
if (option in ['random', 'random_cond_diab']):
init_state = self.generate_random_state(diabetic_idx)
while init_state.check_absorbing_state():
init_state = self.generate_random_state(diabetic_idx)
else:
init_state = State(state_idx=state_idx, idx_type=idx_type, diabetic_idx=diabetic_idx)
return init_state
|
def transition_antibiotics_on(self):
'\n antibiotics state on\n heart rate, sys bp: hi -> normal w.p. .5\n '
self.state.antibiotic_state = 1
if ((self.state.hr_state == 2) and (np.random.uniform(0, 1) < 0.5)):
self.state.hr_state = 1
if ((self.state.sysbp_state == 2) and (np.random.uniform(0, 1) < 0.5)):
self.state.sysbp_state = 1
| 8,961,013,802,412,358,000
|
antibiotics state on
heart rate, sys bp: hi -> normal w.p. .5
|
sepsisSimDiabetes/MDP.py
|
transition_antibiotics_on
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_antibiotics_on(self):
'\n antibiotics state on\n heart rate, sys bp: hi -> normal w.p. .5\n '
self.state.antibiotic_state = 1
if ((self.state.hr_state == 2) and (np.random.uniform(0, 1) < 0.5)):
self.state.hr_state = 1
if ((self.state.sysbp_state == 2) and (np.random.uniform(0, 1) < 0.5)):
self.state.sysbp_state = 1
|
def transition_antibiotics_off(self):
'\n antibiotics state off\n if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1\n '
if (self.state.antibiotic_state == 1):
if ((self.state.hr_state == 1) and (np.random.uniform(0, 1) < 0.1)):
self.state.hr_state = 2
if ((self.state.sysbp_state == 1) and (np.random.uniform(0, 1) < 0.1)):
self.state.sysbp_state = 2
self.state.antibiotic_state = 0
| -558,963,829,897,848,400
|
antibiotics state off
if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1
|
sepsisSimDiabetes/MDP.py
|
transition_antibiotics_off
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_antibiotics_off(self):
'\n antibiotics state off\n if antibiotics was on: heart rate, sys bp: normal -> hi w.p. .1\n '
if (self.state.antibiotic_state == 1):
if ((self.state.hr_state == 1) and (np.random.uniform(0, 1) < 0.1)):
self.state.hr_state = 2
if ((self.state.sysbp_state == 1) and (np.random.uniform(0, 1) < 0.1)):
self.state.sysbp_state = 2
self.state.antibiotic_state = 0
|
def transition_vent_on(self):
'\n ventilation state on\n percent oxygen: low -> normal w.p. .7\n '
self.state.vent_state = 1
if ((self.state.percoxyg_state == 0) and (np.random.uniform(0, 1) < 0.7)):
self.state.percoxyg_state = 1
| 4,821,546,960,925,724,000
|
ventilation state on
percent oxygen: low -> normal w.p. .7
|
sepsisSimDiabetes/MDP.py
|
transition_vent_on
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_vent_on(self):
'\n ventilation state on\n percent oxygen: low -> normal w.p. .7\n '
self.state.vent_state = 1
if ((self.state.percoxyg_state == 0) and (np.random.uniform(0, 1) < 0.7)):
self.state.percoxyg_state = 1
|
def transition_vent_off(self):
'\n ventilation state off\n if ventilation was on: percent oxygen: normal -> lo w.p. .1\n '
if (self.state.vent_state == 1):
if ((self.state.percoxyg_state == 1) and (np.random.uniform(0, 1) < 0.1)):
self.state.percoxyg_state = 0
self.state.vent_state = 0
| 5,563,056,253,617,258,000
|
ventilation state off
if ventilation was on: percent oxygen: normal -> lo w.p. .1
|
sepsisSimDiabetes/MDP.py
|
transition_vent_off
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_vent_off(self):
'\n ventilation state off\n if ventilation was on: percent oxygen: normal -> lo w.p. .1\n '
if (self.state.vent_state == 1):
if ((self.state.percoxyg_state == 1) and (np.random.uniform(0, 1) < 0.1)):
self.state.percoxyg_state = 0
self.state.vent_state = 0
|
def transition_vaso_on(self):
'\n vasopressor state on\n for non-diabetic:\n sys bp: low -> normal, normal -> hi w.p. .7\n for diabetic:\n raise blood pressure: normal -> hi w.p. .9,\n lo -> normal w.p. .5, lo -> hi w.p. .4\n raise blood glucose by 1 w.p. .5\n '
self.state.vaso_state = 1
if (self.state.diabetic_idx == 0):
if (np.random.uniform(0, 1) < 0.7):
if (self.state.sysbp_state == 0):
self.state.sysbp_state = 1
elif (self.state.sysbp_state == 1):
self.state.sysbp_state = 2
else:
if (self.state.sysbp_state == 1):
if (np.random.uniform(0, 1) < 0.9):
self.state.sysbp_state = 2
elif (self.state.sysbp_state == 0):
up_prob = np.random.uniform(0, 1)
if (up_prob < 0.5):
self.state.sysbp_state = 1
elif (up_prob < 0.9):
self.state.sysbp_state = 2
if (np.random.uniform(0, 1) < 0.5):
self.state.glucose_state = min(4, (self.state.glucose_state + 1))
| -1,507,010,084,658,555,100
|
vasopressor state on
for non-diabetic:
sys bp: low -> normal, normal -> hi w.p. .7
for diabetic:
raise blood pressure: normal -> hi w.p. .9,
lo -> normal w.p. .5, lo -> hi w.p. .4
raise blood glucose by 1 w.p. .5
|
sepsisSimDiabetes/MDP.py
|
transition_vaso_on
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_vaso_on(self):
'\n vasopressor state on\n for non-diabetic:\n sys bp: low -> normal, normal -> hi w.p. .7\n for diabetic:\n raise blood pressure: normal -> hi w.p. .9,\n lo -> normal w.p. .5, lo -> hi w.p. .4\n raise blood glucose by 1 w.p. .5\n '
self.state.vaso_state = 1
if (self.state.diabetic_idx == 0):
if (np.random.uniform(0, 1) < 0.7):
if (self.state.sysbp_state == 0):
self.state.sysbp_state = 1
elif (self.state.sysbp_state == 1):
self.state.sysbp_state = 2
else:
if (self.state.sysbp_state == 1):
if (np.random.uniform(0, 1) < 0.9):
self.state.sysbp_state = 2
elif (self.state.sysbp_state == 0):
up_prob = np.random.uniform(0, 1)
if (up_prob < 0.5):
self.state.sysbp_state = 1
elif (up_prob < 0.9):
self.state.sysbp_state = 2
if (np.random.uniform(0, 1) < 0.5):
self.state.glucose_state = min(4, (self.state.glucose_state + 1))
|
def transition_vaso_off(self):
'\n vasopressor state off\n if vasopressor was on:\n for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1\n for diabetics, blood pressure falls by 1 w.p. .05 instead of .1\n '
if (self.state.vaso_state == 1):
if (self.state.diabetic_idx == 0):
if (np.random.uniform(0, 1) < 0.1):
self.state.sysbp_state = max(0, (self.state.sysbp_state - 1))
elif (np.random.uniform(0, 1) < 0.05):
self.state.sysbp_state = max(0, (self.state.sysbp_state - 1))
self.state.vaso_state = 0
| 4,314,229,342,329,886,000
|
vasopressor state off
if vasopressor was on:
for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1
for diabetics, blood pressure falls by 1 w.p. .05 instead of .1
|
sepsisSimDiabetes/MDP.py
|
transition_vaso_off
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_vaso_off(self):
'\n vasopressor state off\n if vasopressor was on:\n for non-diabetics, sys bp: normal -> low, hi -> normal w.p. .1\n for diabetics, blood pressure falls by 1 w.p. .05 instead of .1\n '
if (self.state.vaso_state == 1):
if (self.state.diabetic_idx == 0):
if (np.random.uniform(0, 1) < 0.1):
self.state.sysbp_state = max(0, (self.state.sysbp_state - 1))
elif (np.random.uniform(0, 1) < 0.05):
self.state.sysbp_state = max(0, (self.state.sysbp_state - 1))
self.state.vaso_state = 0
|
def transition_fluctuate(self, hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, glucose_fluctuate):
'\n all (non-treatment) states fluctuate +/- 1 w.p. .1\n exception: glucose flucuates +/- 1 w.p. .3 if diabetic\n '
if hr_fluctuate:
hr_prob = np.random.uniform(0, 1)
if (hr_prob < 0.1):
self.state.hr_state = max(0, (self.state.hr_state - 1))
elif (hr_prob < 0.2):
self.state.hr_state = min(2, (self.state.hr_state + 1))
if sysbp_fluctuate:
sysbp_prob = np.random.uniform(0, 1)
if (sysbp_prob < 0.1):
self.state.sysbp_state = max(0, (self.state.sysbp_state - 1))
elif (sysbp_prob < 0.2):
self.state.sysbp_state = min(2, (self.state.sysbp_state + 1))
if percoxyg_fluctuate:
percoxyg_prob = np.random.uniform(0, 1)
if (percoxyg_prob < 0.1):
self.state.percoxyg_state = max(0, (self.state.percoxyg_state - 1))
elif (percoxyg_prob < 0.2):
self.state.percoxyg_state = min(1, (self.state.percoxyg_state + 1))
if glucose_fluctuate:
glucose_prob = np.random.uniform(0, 1)
if (self.state.diabetic_idx == 0):
if (glucose_prob < 0.1):
self.state.glucose_state = max(0, (self.state.glucose_state - 1))
elif (glucose_prob < 0.2):
self.state.glucose_state = min(1, (self.state.glucose_state + 1))
elif (glucose_prob < 0.3):
self.state.glucose_state = max(0, (self.state.glucose_state - 1))
elif (glucose_prob < 0.6):
self.state.glucose_state = min(4, (self.state.glucose_state + 1))
| 1,336,593,036,583,505,400
|
all (non-treatment) states fluctuate +/- 1 w.p. .1
exception: glucose flucuates +/- 1 w.p. .3 if diabetic
|
sepsisSimDiabetes/MDP.py
|
transition_fluctuate
|
GuyLor/gumbel_max_causal_gadgets_part2
|
python
|
def transition_fluctuate(self, hr_fluctuate, sysbp_fluctuate, percoxyg_fluctuate, glucose_fluctuate):
'\n all (non-treatment) states fluctuate +/- 1 w.p. .1\n exception: glucose flucuates +/- 1 w.p. .3 if diabetic\n '
if hr_fluctuate:
hr_prob = np.random.uniform(0, 1)
if (hr_prob < 0.1):
self.state.hr_state = max(0, (self.state.hr_state - 1))
elif (hr_prob < 0.2):
self.state.hr_state = min(2, (self.state.hr_state + 1))
if sysbp_fluctuate:
sysbp_prob = np.random.uniform(0, 1)
if (sysbp_prob < 0.1):
self.state.sysbp_state = max(0, (self.state.sysbp_state - 1))
elif (sysbp_prob < 0.2):
self.state.sysbp_state = min(2, (self.state.sysbp_state + 1))
if percoxyg_fluctuate:
percoxyg_prob = np.random.uniform(0, 1)
if (percoxyg_prob < 0.1):
self.state.percoxyg_state = max(0, (self.state.percoxyg_state - 1))
elif (percoxyg_prob < 0.2):
self.state.percoxyg_state = min(1, (self.state.percoxyg_state + 1))
if glucose_fluctuate:
glucose_prob = np.random.uniform(0, 1)
if (self.state.diabetic_idx == 0):
if (glucose_prob < 0.1):
self.state.glucose_state = max(0, (self.state.glucose_state - 1))
elif (glucose_prob < 0.2):
self.state.glucose_state = min(1, (self.state.glucose_state + 1))
elif (glucose_prob < 0.3):
self.state.glucose_state = max(0, (self.state.glucose_state - 1))
elif (glucose_prob < 0.6):
self.state.glucose_state = min(4, (self.state.glucose_state + 1))
|
def __init__(self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None, cls_agnostic_bbox_reg=False, bbox_aug_enabled=False):
'\n Arguments:\n score_thresh (float)\n nms (float)\n detections_per_img (int)\n box_coder (BoxCoder)\n '
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if (box_coder is None):
box_coder = BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.bbox_aug_enabled = bbox_aug_enabled
| 2,870,374,798,669,880,000
|
Arguments:
score_thresh (float)
nms (float)
detections_per_img (int)
box_coder (BoxCoder)
|
fcos_core/modeling/roi_heads/box_head/inference.py
|
__init__
|
qilei123/FCOS
|
python
|
def __init__(self, score_thresh=0.05, nms=0.5, detections_per_img=100, box_coder=None, cls_agnostic_bbox_reg=False, bbox_aug_enabled=False):
'\n Arguments:\n score_thresh (float)\n nms (float)\n detections_per_img (int)\n box_coder (BoxCoder)\n '
super(PostProcessor, self).__init__()
self.score_thresh = score_thresh
self.nms = nms
self.detections_per_img = detections_per_img
if (box_coder is None):
box_coder = BoxCoder(weights=(10.0, 10.0, 5.0, 5.0))
self.box_coder = box_coder
self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg
self.bbox_aug_enabled = bbox_aug_enabled
|
def forward(self, x, boxes):
'\n Arguments:\n x (tuple[tensor, tensor]): x contains the class logits\n and the box_regression from the model.\n boxes (list[BoxList]): bounding boxes that are used as\n reference, one for ech image\n\n Returns:\n results (list[BoxList]): one BoxList for each image, containing\n the extra fields labels and scores\n '
(class_logits, box_regression) = x
class_prob = F.softmax(class_logits, (- 1))
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, (- 4):]
proposals = self.box_coder.decode(box_regression.view(sum(boxes_per_image), (- 1)), concat_boxes)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for (prob, boxes_per_img, image_shape) in zip(class_prob, proposals, image_shapes):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
if (not self.bbox_aug_enabled):
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results
| -3,406,154,131,159,554,600
|
Arguments:
x (tuple[tensor, tensor]): x contains the class logits
and the box_regression from the model.
boxes (list[BoxList]): bounding boxes that are used as
reference, one for ech image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra fields labels and scores
|
fcos_core/modeling/roi_heads/box_head/inference.py
|
forward
|
qilei123/FCOS
|
python
|
def forward(self, x, boxes):
'\n Arguments:\n x (tuple[tensor, tensor]): x contains the class logits\n and the box_regression from the model.\n boxes (list[BoxList]): bounding boxes that are used as\n reference, one for ech image\n\n Returns:\n results (list[BoxList]): one BoxList for each image, containing\n the extra fields labels and scores\n '
(class_logits, box_regression) = x
class_prob = F.softmax(class_logits, (- 1))
image_shapes = [box.size for box in boxes]
boxes_per_image = [len(box) for box in boxes]
concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)
if self.cls_agnostic_bbox_reg:
box_regression = box_regression[:, (- 4):]
proposals = self.box_coder.decode(box_regression.view(sum(boxes_per_image), (- 1)), concat_boxes)
if self.cls_agnostic_bbox_reg:
proposals = proposals.repeat(1, class_prob.shape[1])
num_classes = class_prob.shape[1]
proposals = proposals.split(boxes_per_image, dim=0)
class_prob = class_prob.split(boxes_per_image, dim=0)
results = []
for (prob, boxes_per_img, image_shape) in zip(class_prob, proposals, image_shapes):
boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)
boxlist = boxlist.clip_to_image(remove_empty=False)
if (not self.bbox_aug_enabled):
boxlist = self.filter_results(boxlist, num_classes)
results.append(boxlist)
return results
|
def prepare_boxlist(self, boxes, scores, image_shape):
'\n Returns BoxList from `boxes` and adds probability scores information\n as an extra field\n `boxes` has shape (#detections, 4 * #classes), where each row represents\n a list of predicted bounding boxes for each of the object classes in the\n dataset (including the background class). The detections in each row\n originate from the same object proposal.\n `scores` has shape (#detection, #classes), where each row represents a list\n of object detection confidence scores for each of the object classes in the\n dataset (including the background class). `scores[i, j]`` corresponds to the\n box at `boxes[i, j * 4:(j + 1) * 4]`.\n '
boxes = boxes.reshape((- 1), 4)
scores = scores.reshape((- 1))
boxlist = BoxList(boxes, image_shape, mode='xyxy')
boxlist.add_field('scores', scores)
return boxlist
| 6,100,855,235,390,330,000
|
Returns BoxList from `boxes` and adds probability scores information
as an extra field
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
|
fcos_core/modeling/roi_heads/box_head/inference.py
|
prepare_boxlist
|
qilei123/FCOS
|
python
|
def prepare_boxlist(self, boxes, scores, image_shape):
'\n Returns BoxList from `boxes` and adds probability scores information\n as an extra field\n `boxes` has shape (#detections, 4 * #classes), where each row represents\n a list of predicted bounding boxes for each of the object classes in the\n dataset (including the background class). The detections in each row\n originate from the same object proposal.\n `scores` has shape (#detection, #classes), where each row represents a list\n of object detection confidence scores for each of the object classes in the\n dataset (including the background class). `scores[i, j]`` corresponds to the\n box at `boxes[i, j * 4:(j + 1) * 4]`.\n '
boxes = boxes.reshape((- 1), 4)
scores = scores.reshape((- 1))
boxlist = BoxList(boxes, image_shape, mode='xyxy')
boxlist.add_field('scores', scores)
return boxlist
|
def filter_results(self, boxlist, num_classes):
'Returns bounding-box detection results by thresholding on scores and\n applying non-maximum suppression (NMS).\n '
boxes = boxlist.bbox.reshape((- 1), (num_classes * 4))
scores = boxlist.get_field('scores').reshape((- 1), num_classes)
device = scores.device
result = []
inds_all = (scores > self.score_thresh)
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[(inds, j)]
boxes_j = boxes[inds, (j * 4):((j + 1) * 4)]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode='xyxy')
boxlist_for_class.add_field('scores', scores_j)
boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field('labels', torch.full((num_labels,), j, dtype=torch.int64, device=device))
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
if (number_of_detections > self.detections_per_img > 0):
cls_scores = result.get_field('scores')
(image_thresh, _) = torch.kthvalue(cls_scores.cpu(), ((number_of_detections - self.detections_per_img) + 1))
keep = (cls_scores >= image_thresh.item())
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
| 3,938,484,773,227,017,700
|
Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
|
fcos_core/modeling/roi_heads/box_head/inference.py
|
filter_results
|
qilei123/FCOS
|
python
|
def filter_results(self, boxlist, num_classes):
'Returns bounding-box detection results by thresholding on scores and\n applying non-maximum suppression (NMS).\n '
boxes = boxlist.bbox.reshape((- 1), (num_classes * 4))
scores = boxlist.get_field('scores').reshape((- 1), num_classes)
device = scores.device
result = []
inds_all = (scores > self.score_thresh)
for j in range(1, num_classes):
inds = inds_all[:, j].nonzero().squeeze(1)
scores_j = scores[(inds, j)]
boxes_j = boxes[inds, (j * 4):((j + 1) * 4)]
boxlist_for_class = BoxList(boxes_j, boxlist.size, mode='xyxy')
boxlist_for_class.add_field('scores', scores_j)
boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms)
num_labels = len(boxlist_for_class)
boxlist_for_class.add_field('labels', torch.full((num_labels,), j, dtype=torch.int64, device=device))
result.append(boxlist_for_class)
result = cat_boxlist(result)
number_of_detections = len(result)
if (number_of_detections > self.detections_per_img > 0):
cls_scores = result.get_field('scores')
(image_thresh, _) = torch.kthvalue(cls_scores.cpu(), ((number_of_detections - self.detections_per_img) + 1))
keep = (cls_scores >= image_thresh.item())
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
return result
|
def _calculate_deltas(times: (((str | np.ndarray) | NDFrame) | None), halflife: ((float | TimedeltaConvertibleTypes) | None)) -> np.ndarray:
'\n Return the diff of the times divided by the half-life. These values are used in\n the calculation of the ewm mean.\n\n Parameters\n ----------\n times : str, np.ndarray, Series, default None\n Times corresponding to the observations. Must be monotonically increasing\n and ``datetime64[ns]`` dtype.\n halflife : float, str, timedelta, optional\n Half-life specifying the decay\n\n Returns\n -------\n np.ndarray\n Diff of the times divided by the half-life\n '
_times = np.asarray(times.view(np.int64), dtype=np.float64)
_halflife = float(Timedelta(halflife).value)
return (np.diff(_times) / _halflife)
| -5,254,744,993,093,402,000
|
Return the diff of the times divided by the half-life. These values are used in
the calculation of the ewm mean.
Parameters
----------
times : str, np.ndarray, Series, default None
Times corresponding to the observations. Must be monotonically increasing
and ``datetime64[ns]`` dtype.
halflife : float, str, timedelta, optional
Half-life specifying the decay
Returns
-------
np.ndarray
Diff of the times divided by the half-life
|
pandas/core/window/ewm.py
|
_calculate_deltas
|
DrGFreeman/pandas
|
python
|
def _calculate_deltas(times: (((str | np.ndarray) | NDFrame) | None), halflife: ((float | TimedeltaConvertibleTypes) | None)) -> np.ndarray:
'\n Return the diff of the times divided by the half-life. These values are used in\n the calculation of the ewm mean.\n\n Parameters\n ----------\n times : str, np.ndarray, Series, default None\n Times corresponding to the observations. Must be monotonically increasing\n and ``datetime64[ns]`` dtype.\n halflife : float, str, timedelta, optional\n Half-life specifying the decay\n\n Returns\n -------\n np.ndarray\n Diff of the times divided by the half-life\n '
_times = np.asarray(times.view(np.int64), dtype=np.float64)
_halflife = float(Timedelta(halflife).value)
return (np.diff(_times) / _halflife)
|
def _get_window_indexer(self) -> BaseIndexer:
'\n Return an indexer class that will compute the window start and end bounds\n '
return ExponentialMovingWindowIndexer()
| 6,524,741,404,708,218,000
|
Return an indexer class that will compute the window start and end bounds
|
pandas/core/window/ewm.py
|
_get_window_indexer
|
DrGFreeman/pandas
|
python
|
def _get_window_indexer(self) -> BaseIndexer:
'\n \n '
return ExponentialMovingWindowIndexer()
|
def online(self, engine='numba', engine_kwargs=None):
"\n Return an ``OnlineExponentialMovingWindow`` object to calculate\n exponentially moving window aggregations in an online method.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n engine: str, default ``'numba'``\n Execution engine to calculate online aggregations.\n Applies to all supported aggregation methods.\n\n engine_kwargs : dict, default None\n Applies to all supported aggregation methods.\n\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to the function\n\n Returns\n -------\n OnlineExponentialMovingWindow\n "
return OnlineExponentialMovingWindow(obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, axis=self.axis, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection)
| 5,517,177,251,206,371,000
|
Return an ``OnlineExponentialMovingWindow`` object to calculate
exponentially moving window aggregations in an online method.
.. versionadded:: 1.3.0
Parameters
----------
engine: str, default ``'numba'``
Execution engine to calculate online aggregations.
Applies to all supported aggregation methods.
engine_kwargs : dict, default None
Applies to all supported aggregation methods.
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to the function
Returns
-------
OnlineExponentialMovingWindow
|
pandas/core/window/ewm.py
|
online
|
DrGFreeman/pandas
|
python
|
def online(self, engine='numba', engine_kwargs=None):
"\n Return an ``OnlineExponentialMovingWindow`` object to calculate\n exponentially moving window aggregations in an online method.\n\n .. versionadded:: 1.3.0\n\n Parameters\n ----------\n engine: str, default ``'numba'``\n Execution engine to calculate online aggregations.\n Applies to all supported aggregation methods.\n\n engine_kwargs : dict, default None\n Applies to all supported aggregation methods.\n\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to the function\n\n Returns\n -------\n OnlineExponentialMovingWindow\n "
return OnlineExponentialMovingWindow(obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, axis=self.axis, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection)
|
def _get_window_indexer(self) -> GroupbyIndexer:
'\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n '
window_indexer = GroupbyIndexer(groupby_indicies=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer)
return window_indexer
| 4,688,982,227,691,670,000
|
Return an indexer class that will compute the window start and end bounds
Returns
-------
GroupbyIndexer
|
pandas/core/window/ewm.py
|
_get_window_indexer
|
DrGFreeman/pandas
|
python
|
def _get_window_indexer(self) -> GroupbyIndexer:
'\n Return an indexer class that will compute the window start and end bounds\n\n Returns\n -------\n GroupbyIndexer\n '
window_indexer = GroupbyIndexer(groupby_indicies=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer)
return window_indexer
|
def reset(self):
'\n Reset the state captured by `update` calls.\n '
self._mean.reset()
| 8,308,839,287,548,406,000
|
Reset the state captured by `update` calls.
|
pandas/core/window/ewm.py
|
reset
|
DrGFreeman/pandas
|
python
|
def reset(self):
'\n \n '
self._mean.reset()
|
def mean(self, *args, update=None, update_times=None, **kwargs):
'\n Calculate an online exponentially weighted mean.\n\n Parameters\n ----------\n update: DataFrame or Series, default None\n New values to continue calculating the\n exponentially weighted mean from the last values and weights.\n Values should be float64 dtype.\n\n ``update`` needs to be ``None`` the first time the\n exponentially weighted mean is calculated.\n\n update_times: Series or 1-D np.ndarray, default None\n New times to continue calculating the\n exponentially weighted mean from the last values and weights.\n If ``None``, values are assumed to be evenly spaced\n in time.\n This feature is currently unsupported.\n\n Returns\n -------\n DataFrame or Series\n\n Examples\n --------\n >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})\n >>> online_ewm = df.head(2).ewm(0.5).online()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n >>> online_ewm.mean(update=df.tail(3))\n a b\n 2 1.615385 6.615385\n 3 2.550000 7.550000\n 4 3.520661 8.520661\n >>> online_ewm.reset()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n '
result_kwargs = {}
is_frame = (True if (self._selected_obj.ndim == 2) else False)
if (update_times is not None):
raise NotImplementedError('update_times is not implemented.')
else:
update_deltas = np.ones(max((self._selected_obj.shape[(self.axis - 1)] - 1), 0), dtype=np.float64)
if (update is not None):
if (self._mean.last_ewm is None):
raise ValueError('Must call mean with update=None first before passing update')
result_from = 1
result_kwargs['index'] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs['columns'] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs['name'] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs['index'] = self._selected_obj.index
if is_frame:
result_kwargs['columns'] = self._selected_obj.columns
else:
result_kwargs['name'] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)
result = self._mean.run_ewm((np_array if is_frame else np_array[:, np.newaxis]), update_deltas, self.min_periods, ewma_func)
if (not is_frame):
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
| 581,572,148,694,043,100
|
Calculate an online exponentially weighted mean.
Parameters
----------
update: DataFrame or Series, default None
New values to continue calculating the
exponentially weighted mean from the last values and weights.
Values should be float64 dtype.
``update`` needs to be ``None`` the first time the
exponentially weighted mean is calculated.
update_times: Series or 1-D np.ndarray, default None
New times to continue calculating the
exponentially weighted mean from the last values and weights.
If ``None``, values are assumed to be evenly spaced
in time.
This feature is currently unsupported.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})
>>> online_ewm = df.head(2).ewm(0.5).online()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
>>> online_ewm.mean(update=df.tail(3))
a b
2 1.615385 6.615385
3 2.550000 7.550000
4 3.520661 8.520661
>>> online_ewm.reset()
>>> online_ewm.mean()
a b
0 0.00 5.00
1 0.75 5.75
|
pandas/core/window/ewm.py
|
mean
|
DrGFreeman/pandas
|
python
|
def mean(self, *args, update=None, update_times=None, **kwargs):
'\n Calculate an online exponentially weighted mean.\n\n Parameters\n ----------\n update: DataFrame or Series, default None\n New values to continue calculating the\n exponentially weighted mean from the last values and weights.\n Values should be float64 dtype.\n\n ``update`` needs to be ``None`` the first time the\n exponentially weighted mean is calculated.\n\n update_times: Series or 1-D np.ndarray, default None\n New times to continue calculating the\n exponentially weighted mean from the last values and weights.\n If ``None``, values are assumed to be evenly spaced\n in time.\n This feature is currently unsupported.\n\n Returns\n -------\n DataFrame or Series\n\n Examples\n --------\n >>> df = pd.DataFrame({"a": range(5), "b": range(5, 10)})\n >>> online_ewm = df.head(2).ewm(0.5).online()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n >>> online_ewm.mean(update=df.tail(3))\n a b\n 2 1.615385 6.615385\n 3 2.550000 7.550000\n 4 3.520661 8.520661\n >>> online_ewm.reset()\n >>> online_ewm.mean()\n a b\n 0 0.00 5.00\n 1 0.75 5.75\n '
result_kwargs = {}
is_frame = (True if (self._selected_obj.ndim == 2) else False)
if (update_times is not None):
raise NotImplementedError('update_times is not implemented.')
else:
update_deltas = np.ones(max((self._selected_obj.shape[(self.axis - 1)] - 1), 0), dtype=np.float64)
if (update is not None):
if (self._mean.last_ewm is None):
raise ValueError('Must call mean with update=None first before passing update')
result_from = 1
result_kwargs['index'] = update.index
if is_frame:
last_value = self._mean.last_ewm[np.newaxis, :]
result_kwargs['columns'] = update.columns
else:
last_value = self._mean.last_ewm
result_kwargs['name'] = update.name
np_array = np.concatenate((last_value, update.to_numpy()))
else:
result_from = 0
result_kwargs['index'] = self._selected_obj.index
if is_frame:
result_kwargs['columns'] = self._selected_obj.columns
else:
result_kwargs['name'] = self._selected_obj.name
np_array = self._selected_obj.astype(np.float64).to_numpy()
ewma_func = generate_online_numba_ewma_func(self.engine_kwargs)
result = self._mean.run_ewm((np_array if is_frame else np_array[:, np.newaxis]), update_deltas, self.min_periods, ewma_func)
if (not is_frame):
result = result.squeeze()
result = result[result_from:]
result = self._selected_obj._constructor(result, **result_kwargs)
return result
|
def forward(self, seed_points, seed_feats):
'forward.\n\n Args:\n seed_points (torch.Tensor): Coordinate of the seed\n points in shape (B, N, 3).\n seed_feats (torch.Tensor): Features of the seed points in shape\n (B, C, N).\n\n Returns:\n tuple[torch.Tensor]:\n\n - vote_points: Voted xyz based on the seed points with shape (B, M, 3), ``M=num_seed*vote_per_seed``.\n - vote_features: Voted features based on the seed points with shape (B, C, M) where ``M=num_seed*vote_per_seed``, ``C=vote_feature_dim``.\n '
(batch_size, feat_channels, num_seed) = seed_feats.shape
num_vote = (num_seed * self.vote_per_seed)
x = self.vote_conv(seed_feats)
votes = self.conv_out(x)
votes = votes.transpose(2, 1).view(batch_size, num_seed, self.vote_per_seed, (- 1))
offset = votes[:, :, :, 0:3]
res_feats = votes[:, :, :, 3:]
vote_points = (seed_points.unsqueeze(2) + offset).contiguous()
vote_points = vote_points.view(batch_size, num_vote, 3)
vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) + res_feats).contiguous()
vote_feats = vote_feats.view(batch_size, num_vote, feat_channels).transpose(2, 1).contiguous()
if self.norm_feats:
features_norm = torch.norm(vote_feats, p=2, dim=1)
vote_feats = vote_feats.div(features_norm.unsqueeze(1))
return (vote_points, vote_feats)
| 6,924,099,449,724,303,000
|
forward.
Args:
seed_points (torch.Tensor): Coordinate of the seed
points in shape (B, N, 3).
seed_feats (torch.Tensor): Features of the seed points in shape
(B, C, N).
Returns:
tuple[torch.Tensor]:
- vote_points: Voted xyz based on the seed points with shape (B, M, 3), ``M=num_seed*vote_per_seed``.
- vote_features: Voted features based on the seed points with shape (B, C, M) where ``M=num_seed*vote_per_seed``, ``C=vote_feature_dim``.
|
mmdet3d/models/model_utils/vote_module.py
|
forward
|
BOURSa/mmdetection3d
|
python
|
def forward(self, seed_points, seed_feats):
'forward.\n\n Args:\n seed_points (torch.Tensor): Coordinate of the seed\n points in shape (B, N, 3).\n seed_feats (torch.Tensor): Features of the seed points in shape\n (B, C, N).\n\n Returns:\n tuple[torch.Tensor]:\n\n - vote_points: Voted xyz based on the seed points with shape (B, M, 3), ``M=num_seed*vote_per_seed``.\n - vote_features: Voted features based on the seed points with shape (B, C, M) where ``M=num_seed*vote_per_seed``, ``C=vote_feature_dim``.\n '
(batch_size, feat_channels, num_seed) = seed_feats.shape
num_vote = (num_seed * self.vote_per_seed)
x = self.vote_conv(seed_feats)
votes = self.conv_out(x)
votes = votes.transpose(2, 1).view(batch_size, num_seed, self.vote_per_seed, (- 1))
offset = votes[:, :, :, 0:3]
res_feats = votes[:, :, :, 3:]
vote_points = (seed_points.unsqueeze(2) + offset).contiguous()
vote_points = vote_points.view(batch_size, num_vote, 3)
vote_feats = (seed_feats.transpose(2, 1).unsqueeze(2) + res_feats).contiguous()
vote_feats = vote_feats.view(batch_size, num_vote, feat_channels).transpose(2, 1).contiguous()
if self.norm_feats:
features_norm = torch.norm(vote_feats, p=2, dim=1)
vote_feats = vote_feats.div(features_norm.unsqueeze(1))
return (vote_points, vote_feats)
|
def get_loss(self, seed_points, vote_points, seed_indices, vote_targets_mask, vote_targets):
'Calculate loss of voting module.\n\n Args:\n seed_points (torch.Tensor): Coordinate of the seed points.\n vote_points (torch.Tensor): Coordinate of the vote points.\n seed_indices (torch.Tensor): Indices of seed points in raw points.\n vote_targets_mask (torch.Tensor): Mask of valid vote targets.\n vote_targets (torch.Tensor): Targets of votes.\n\n Returns:\n torch.Tensor: Weighted vote loss.\n '
(batch_size, num_seed) = seed_points.shape[:2]
seed_gt_votes_mask = torch.gather(vote_targets_mask, 1, seed_indices).float()
seed_indices_expand = seed_indices.unsqueeze((- 1)).repeat(1, 1, (3 * self.gt_per_seed))
seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand)
seed_gt_votes += seed_points.repeat(1, 1, 3)
weight = (seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-06))
distance = self.vote_loss(vote_points.view((batch_size * num_seed), (- 1), 3), seed_gt_votes.view((batch_size * num_seed), (- 1), 3), dst_weight=weight.view((batch_size * num_seed), 1))[1]
vote_loss = torch.sum(torch.min(distance, dim=1)[0])
return vote_loss
| 1,211,448,506,085,380,000
|
Calculate loss of voting module.
Args:
seed_points (torch.Tensor): Coordinate of the seed points.
vote_points (torch.Tensor): Coordinate of the vote points.
seed_indices (torch.Tensor): Indices of seed points in raw points.
vote_targets_mask (torch.Tensor): Mask of valid vote targets.
vote_targets (torch.Tensor): Targets of votes.
Returns:
torch.Tensor: Weighted vote loss.
|
mmdet3d/models/model_utils/vote_module.py
|
get_loss
|
BOURSa/mmdetection3d
|
python
|
def get_loss(self, seed_points, vote_points, seed_indices, vote_targets_mask, vote_targets):
'Calculate loss of voting module.\n\n Args:\n seed_points (torch.Tensor): Coordinate of the seed points.\n vote_points (torch.Tensor): Coordinate of the vote points.\n seed_indices (torch.Tensor): Indices of seed points in raw points.\n vote_targets_mask (torch.Tensor): Mask of valid vote targets.\n vote_targets (torch.Tensor): Targets of votes.\n\n Returns:\n torch.Tensor: Weighted vote loss.\n '
(batch_size, num_seed) = seed_points.shape[:2]
seed_gt_votes_mask = torch.gather(vote_targets_mask, 1, seed_indices).float()
seed_indices_expand = seed_indices.unsqueeze((- 1)).repeat(1, 1, (3 * self.gt_per_seed))
seed_gt_votes = torch.gather(vote_targets, 1, seed_indices_expand)
seed_gt_votes += seed_points.repeat(1, 1, 3)
weight = (seed_gt_votes_mask / (torch.sum(seed_gt_votes_mask) + 1e-06))
distance = self.vote_loss(vote_points.view((batch_size * num_seed), (- 1), 3), seed_gt_votes.view((batch_size * num_seed), (- 1), 3), dst_weight=weight.view((batch_size * num_seed), 1))[1]
vote_loss = torch.sum(torch.min(distance, dim=1)[0])
return vote_loss
|
def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float:
'Get the mortality percentage point difference increase.\n\n :param temperature: the temperature in Celsius\n :param fish_mass: the fish mass (in grams)\n :returns: Mortality percentage point difference increase\n '
fish_mass_indicator = (1 if (fish_mass > 2000) else 0)
input = np.array([1, temperature, fish_mass_indicator, (temperature ** 2), (temperature * fish_mass_indicator), (fish_mass_indicator ** 2)])
return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0)
| -3,144,523,383,826,243,000
|
Get the mortality percentage point difference increase.
:param temperature: the temperature in Celsius
:param fish_mass: the fish mass (in grams)
:returns: Mortality percentage point difference increase
|
slim/types/TreatmentTypes.py
|
__get_mortality_pp_increase
|
magicicada/slim
|
python
|
def __get_mortality_pp_increase(self, temperature: float, fish_mass: float) -> float:
'Get the mortality percentage point difference increase.\n\n :param temperature: the temperature in Celsius\n :param fish_mass: the fish mass (in grams)\n :returns: Mortality percentage point difference increase\n '
fish_mass_indicator = (1 if (fish_mass > 2000) else 0)
input = np.array([1, temperature, fish_mass_indicator, (temperature ** 2), (temperature * fish_mass_indicator), (fish_mass_indicator ** 2)])
return max(float(self.quadratic_fish_mortality_coeffs.dot(input)), 0)
|
@abstractmethod
def delay(self, average_temperature: float):
'\n Delay before treatment should have a noticeable effect\n '
| -2,254,910,768,644,339,700
|
Delay before treatment should have a noticeable effect
|
slim/types/TreatmentTypes.py
|
delay
|
magicicada/slim
|
python
|
@abstractmethod
def delay(self, average_temperature: float):
'\n \n '
|
@staticmethod
def get_allele_heterozygous_trait(alleles: Alleles):
'\n Get the allele heterozygous type\n '
if ('A' in alleles):
if ('a' in alleles):
trait = HeterozygousResistance.INCOMPLETELY_DOMINANT
else:
trait = HeterozygousResistance.DOMINANT
else:
trait = HeterozygousResistance.RECESSIVE
return trait
| -8,403,997,583,842,534,000
|
Get the allele heterozygous type
|
slim/types/TreatmentTypes.py
|
get_allele_heterozygous_trait
|
magicicada/slim
|
python
|
@staticmethod
def get_allele_heterozygous_trait(alleles: Alleles):
'\n \n '
if ('A' in alleles):
if ('a' in alleles):
trait = HeterozygousResistance.INCOMPLETELY_DOMINANT
else:
trait = HeterozygousResistance.DOMINANT
else:
trait = HeterozygousResistance.RECESSIVE
return trait
|
@abstractmethod
def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:
'\n Calculate the mortality rates of this treatment\n '
| -7,484,706,959,667,294,000
|
Calculate the mortality rates of this treatment
|
slim/types/TreatmentTypes.py
|
get_lice_treatment_mortality_rate
|
magicicada/slim
|
python
|
@abstractmethod
def get_lice_treatment_mortality_rate(self, lice_population: LicePopulation, temperature: float) -> GenoTreatmentDistrib:
'\n \n '
|
def get_fish_mortality_occurrences(self, temperature: float, fish_mass: float, num_fish: float, efficacy_window: float, mortality_events: int):
'Get the number of fish that die due to treatment\n\n :param temperature: the temperature of the cage\n :param num_fish: the number of fish\n :param fish_mass: the average fish mass (in grams)\n :param efficacy_window: the length of the efficacy window\n :param mortality_events: the number of fish mortality events to subtract from\n '
predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass)
mortality_events_pp = ((100 * mortality_events) / num_fish)
predicted_deaths = ((((predicted_pp_increase + mortality_events_pp) * num_fish) / 100) - mortality_events)
predicted_deaths /= efficacy_window
return predicted_deaths
| 220,058,123,522,875,100
|
Get the number of fish that die due to treatment
:param temperature: the temperature of the cage
:param num_fish: the number of fish
:param fish_mass: the average fish mass (in grams)
:param efficacy_window: the length of the efficacy window
:param mortality_events: the number of fish mortality events to subtract from
|
slim/types/TreatmentTypes.py
|
get_fish_mortality_occurrences
|
magicicada/slim
|
python
|
def get_fish_mortality_occurrences(self, temperature: float, fish_mass: float, num_fish: float, efficacy_window: float, mortality_events: int):
'Get the number of fish that die due to treatment\n\n :param temperature: the temperature of the cage\n :param num_fish: the number of fish\n :param fish_mass: the average fish mass (in grams)\n :param efficacy_window: the length of the efficacy window\n :param mortality_events: the number of fish mortality events to subtract from\n '
predicted_pp_increase = self.__get_mortality_pp_increase(temperature, fish_mass)
mortality_events_pp = ((100 * mortality_events) / num_fish)
predicted_deaths = ((((predicted_pp_increase + mortality_events_pp) * num_fish) / 100) - mortality_events)
predicted_deaths /= efficacy_window
return predicted_deaths
|
def savedJSONInvariants(testCase: TestCase, savedJSON: str) -> str:
'\n Assert a few things about the result of L{eventAsJSON}, then return it.\n\n @param testCase: The L{TestCase} with which to perform the assertions.\n @param savedJSON: The result of L{eventAsJSON}.\n\n @return: C{savedJSON}\n\n @raise AssertionError: If any of the preconditions fail.\n '
testCase.assertIsInstance(savedJSON, str)
testCase.assertEqual(savedJSON.count('\n'), 0)
return savedJSON
| -6,524,027,284,910,949,000
|
Assert a few things about the result of L{eventAsJSON}, then return it.
@param testCase: The L{TestCase} with which to perform the assertions.
@param savedJSON: The result of L{eventAsJSON}.
@return: C{savedJSON}
@raise AssertionError: If any of the preconditions fail.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
savedJSONInvariants
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def savedJSONInvariants(testCase: TestCase, savedJSON: str) -> str:
'\n Assert a few things about the result of L{eventAsJSON}, then return it.\n\n @param testCase: The L{TestCase} with which to perform the assertions.\n @param savedJSON: The result of L{eventAsJSON}.\n\n @return: C{savedJSON}\n\n @raise AssertionError: If any of the preconditions fail.\n '
testCase.assertIsInstance(savedJSON, str)
testCase.assertEqual(savedJSON.count('\n'), 0)
return savedJSON
|
def savedEventJSON(self, event: LogEvent) -> str:
'\n Serialize some an events, assert some things about it, and return the\n JSON.\n\n @param event: An event.\n\n @return: JSON.\n '
return savedJSONInvariants(self, eventAsJSON(event))
| 3,940,816,593,613,317,600
|
Serialize some an events, assert some things about it, and return the
JSON.
@param event: An event.
@return: JSON.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
savedEventJSON
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def savedEventJSON(self, event: LogEvent) -> str:
'\n Serialize some an events, assert some things about it, and return the\n JSON.\n\n @param event: An event.\n\n @return: JSON.\n '
return savedJSONInvariants(self, eventAsJSON(event))
|
def test_simpleSaveLoad(self) -> None:
'\n Saving and loading an empty dictionary results in an empty dictionary.\n '
self.assertEqual(eventFromJSON(self.savedEventJSON({})), {})
| 158,441,858,065,887,970
|
Saving and loading an empty dictionary results in an empty dictionary.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_simpleSaveLoad
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_simpleSaveLoad(self) -> None:
'\n \n '
self.assertEqual(eventFromJSON(self.savedEventJSON({})), {})
|
def test_saveLoad(self) -> None:
"\n Saving and loading a dictionary with some simple values in it results\n in those same simple values in the output; according to JSON's rules,\n though, all dictionary keys must be L{str} and any non-L{str}\n keys will be converted.\n "
self.assertEqual(eventFromJSON(self.savedEventJSON({1: 2, '3': '4'})), {'1': 2, '3': '4'})
| -5,613,583,833,805,194,000
|
Saving and loading a dictionary with some simple values in it results
in those same simple values in the output; according to JSON's rules,
though, all dictionary keys must be L{str} and any non-L{str}
keys will be converted.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveLoad
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveLoad(self) -> None:
"\n Saving and loading a dictionary with some simple values in it results\n in those same simple values in the output; according to JSON's rules,\n though, all dictionary keys must be L{str} and any non-L{str}\n keys will be converted.\n "
self.assertEqual(eventFromJSON(self.savedEventJSON({1: 2, '3': '4'})), {'1': 2, '3': '4'})
|
def test_saveUnPersistable(self) -> None:
'\n Saving and loading an object which cannot be represented in JSON will\n result in a placeholder.\n '
self.assertEqual(eventFromJSON(self.savedEventJSON({'1': 2, '3': object()})), {'1': 2, '3': {'unpersistable': True}})
| 1,832,279,315,044,509,000
|
Saving and loading an object which cannot be represented in JSON will
result in a placeholder.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveUnPersistable
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveUnPersistable(self) -> None:
'\n Saving and loading an object which cannot be represented in JSON will\n result in a placeholder.\n '
self.assertEqual(eventFromJSON(self.savedEventJSON({'1': 2, '3': object()})), {'1': 2, '3': {'unpersistable': True}})
|
def test_saveNonASCII(self) -> None:
'\n Non-ASCII keys and values can be saved and loaded.\n '
self.assertEqual(eventFromJSON(self.savedEventJSON({'ሴ': '䌡', '3': object()})), {'ሴ': '䌡', '3': {'unpersistable': True}})
| 2,038,994,471,241,656,300
|
Non-ASCII keys and values can be saved and loaded.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveNonASCII
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveNonASCII(self) -> None:
'\n \n '
self.assertEqual(eventFromJSON(self.savedEventJSON({'ሴ': '䌡', '3': object()})), {'ሴ': '䌡', '3': {'unpersistable': True}})
|
def test_saveBytes(self) -> None:
'\n Any L{bytes} objects will be saved as if they are latin-1 so they can\n be faithfully re-loaded.\n '
inputEvent = {'hello': bytes(range(255))}
inputEvent.update({b'skipped': 'okay'})
self.assertEqual(eventFromJSON(self.savedEventJSON(inputEvent)), {'hello': bytes(range(255)).decode('charmap')})
| 536,017,709,974,603,460
|
Any L{bytes} objects will be saved as if they are latin-1 so they can
be faithfully re-loaded.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveBytes
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveBytes(self) -> None:
'\n Any L{bytes} objects will be saved as if they are latin-1 so they can\n be faithfully re-loaded.\n '
inputEvent = {'hello': bytes(range(255))}
inputEvent.update({b'skipped': 'okay'})
self.assertEqual(eventFromJSON(self.savedEventJSON(inputEvent)), {'hello': bytes(range(255)).decode('charmap')})
|
def test_saveUnPersistableThenFormat(self) -> None:
'\n Saving and loading an object which cannot be represented in JSON, but\n has a string representation which I{can} be saved as JSON, will result\n in the same string formatting; any extractable fields will retain their\n data types.\n '
class Reprable():
def __init__(self, value: object) -> None:
self.value = value
def __repr__(self) -> str:
return 'reprable'
inputEvent = {'log_format': '{object} {object.value}', 'object': Reprable(7)}
outputEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(formatEvent(outputEvent), 'reprable 7')
| -7,824,544,873,663,906,000
|
Saving and loading an object which cannot be represented in JSON, but
has a string representation which I{can} be saved as JSON, will result
in the same string formatting; any extractable fields will retain their
data types.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveUnPersistableThenFormat
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveUnPersistableThenFormat(self) -> None:
'\n Saving and loading an object which cannot be represented in JSON, but\n has a string representation which I{can} be saved as JSON, will result\n in the same string formatting; any extractable fields will retain their\n data types.\n '
class Reprable():
def __init__(self, value: object) -> None:
self.value = value
def __repr__(self) -> str:
return 'reprable'
inputEvent = {'log_format': '{object} {object.value}', 'object': Reprable(7)}
outputEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(formatEvent(outputEvent), 'reprable 7')
|
def test_extractingFieldsPostLoad(self) -> None:
"\n L{extractField} can extract fields from an object that's been saved and\n loaded from JSON.\n "
class Obj():
def __init__(self) -> None:
self.value = 345
inputEvent = dict(log_format='{object.value}', object=Obj())
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(extractField('object.value', loadedEvent), 345)
self.assertRaises(KeyError, extractField, 'object', loadedEvent)
self.assertRaises(KeyError, extractField, 'object', inputEvent)
| -2,728,139,371,617,195,000
|
L{extractField} can extract fields from an object that's been saved and
loaded from JSON.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_extractingFieldsPostLoad
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_extractingFieldsPostLoad(self) -> None:
"\n L{extractField} can extract fields from an object that's been saved and\n loaded from JSON.\n "
class Obj():
def __init__(self) -> None:
self.value = 345
inputEvent = dict(log_format='{object.value}', object=Obj())
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertEqual(extractField('object.value', loadedEvent), 345)
self.assertRaises(KeyError, extractField, 'object', loadedEvent)
self.assertRaises(KeyError, extractField, 'object', inputEvent)
|
def test_failureStructurePreserved(self) -> None:
'\n Round-tripping a failure through L{eventAsJSON} preserves its class and\n structure.\n '
events: List[LogEvent] = []
log = Logger(observer=cast(ILogObserver, events.append))
try:
(1 / 0)
except ZeroDivisionError:
f = Failure()
log.failure('a message about failure', f)
self.assertEqual(len(events), 1)
loaded = eventFromJSON(self.savedEventJSON(events[0]))['log_failure']
self.assertIsInstance(loaded, Failure)
self.assertTrue(loaded.check(ZeroDivisionError))
self.assertIsInstance(loaded.getTraceback(), str)
| 5,266,929,867,749,554,000
|
Round-tripping a failure through L{eventAsJSON} preserves its class and
structure.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_failureStructurePreserved
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_failureStructurePreserved(self) -> None:
'\n Round-tripping a failure through L{eventAsJSON} preserves its class and\n structure.\n '
events: List[LogEvent] = []
log = Logger(observer=cast(ILogObserver, events.append))
try:
(1 / 0)
except ZeroDivisionError:
f = Failure()
log.failure('a message about failure', f)
self.assertEqual(len(events), 1)
loaded = eventFromJSON(self.savedEventJSON(events[0]))['log_failure']
self.assertIsInstance(loaded, Failure)
self.assertTrue(loaded.check(ZeroDivisionError))
self.assertIsInstance(loaded.getTraceback(), str)
|
def test_saveLoadLevel(self) -> None:
"\n It's important that the C{log_level} key remain a\n L{constantly.NamedConstant} object.\n "
inputEvent = dict(log_level=LogLevel.warn)
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertIs(loadedEvent['log_level'], LogLevel.warn)
| 2,264,376,178,710,008,000
|
It's important that the C{log_level} key remain a
L{constantly.NamedConstant} object.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveLoadLevel
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveLoadLevel(self) -> None:
"\n It's important that the C{log_level} key remain a\n L{constantly.NamedConstant} object.\n "
inputEvent = dict(log_level=LogLevel.warn)
loadedEvent = eventFromJSON(self.savedEventJSON(inputEvent))
self.assertIs(loadedEvent['log_level'], LogLevel.warn)
|
def test_saveLoadUnknownLevel(self) -> None:
"\n If a saved bit of JSON (let's say, from a future version of Twisted)\n were to persist a different log_level, it will resolve as None.\n "
loadedEvent = eventFromJSON('{"log_level": {"name": "other", "__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}')
self.assertEqual(loadedEvent, dict(log_level=None))
| -8,522,714,585,909,203,000
|
If a saved bit of JSON (let's say, from a future version of Twisted)
were to persist a different log_level, it will resolve as None.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_saveLoadUnknownLevel
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_saveLoadUnknownLevel(self) -> None:
"\n If a saved bit of JSON (let's say, from a future version of Twisted)\n were to persist a different log_level, it will resolve as None.\n "
loadedEvent = eventFromJSON('{"log_level": {"name": "other", "__class_uuid__": "02E59486-F24D-46AD-8224-3ACDF2A5732A"}}')
self.assertEqual(loadedEvent, dict(log_level=None))
|
def test_interface(self) -> None:
'\n A L{FileLogObserver} returned by L{jsonFileLogObserver} is an\n L{ILogObserver}.\n '
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
| 3,291,993,695,110,113,000
|
A L{FileLogObserver} returned by L{jsonFileLogObserver} is an
L{ILogObserver}.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_interface
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_interface(self) -> None:
'\n A L{FileLogObserver} returned by L{jsonFileLogObserver} is an\n L{ILogObserver}.\n '
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
|
def assertObserverWritesJSON(self, recordSeparator: str='\x1e') -> None:
'\n Asserts that an observer created by L{jsonFileLogObserver} with the\n given arguments writes events serialized as JSON text, using the given\n record separator.\n\n @param recordSeparator: C{recordSeparator} argument to\n L{jsonFileLogObserver}\n '
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle, recordSeparator)
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.getvalue(), f'''{recordSeparator}{{"x": 1}}
''')
| -8,549,672,763,853,330,000
|
Asserts that an observer created by L{jsonFileLogObserver} with the
given arguments writes events serialized as JSON text, using the given
record separator.
@param recordSeparator: C{recordSeparator} argument to
L{jsonFileLogObserver}
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
assertObserverWritesJSON
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def assertObserverWritesJSON(self, recordSeparator: str='\x1e') -> None:
'\n Asserts that an observer created by L{jsonFileLogObserver} with the\n given arguments writes events serialized as JSON text, using the given\n record separator.\n\n @param recordSeparator: C{recordSeparator} argument to\n L{jsonFileLogObserver}\n '
with StringIO() as fileHandle:
observer = jsonFileLogObserver(fileHandle, recordSeparator)
event = dict(x=1)
observer(event)
self.assertEqual(fileHandle.getvalue(), f'{recordSeparator}{{"x": 1}}
')
|
def test_observeWritesDefaultRecordSeparator(self) -> None:
'\n A L{FileLogObserver} created by L{jsonFileLogObserver} writes events\n serialzed as JSON text to a file when it observes events.\n By default, the record separator is C{"\\x1e"}.\n '
self.assertObserverWritesJSON()
| 747,158,194,430,973,300
|
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
By default, the record separator is C{"\x1e"}.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_observeWritesDefaultRecordSeparator
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_observeWritesDefaultRecordSeparator(self) -> None:
'\n A L{FileLogObserver} created by L{jsonFileLogObserver} writes events\n serialzed as JSON text to a file when it observes events.\n By default, the record separator is C{"\\x1e"}.\n '
self.assertObserverWritesJSON()
|
def test_observeWritesEmptyRecordSeparator(self) -> None:
'\n A L{FileLogObserver} created by L{jsonFileLogObserver} writes events\n serialzed as JSON text to a file when it observes events.\n This test sets the record separator to C{""}.\n '
self.assertObserverWritesJSON(recordSeparator='')
| 5,725,957,345,564,703,000
|
A L{FileLogObserver} created by L{jsonFileLogObserver} writes events
serialzed as JSON text to a file when it observes events.
This test sets the record separator to C{""}.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_observeWritesEmptyRecordSeparator
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_observeWritesEmptyRecordSeparator(self) -> None:
'\n A L{FileLogObserver} created by L{jsonFileLogObserver} writes events\n serialzed as JSON text to a file when it observes events.\n This test sets the record separator to C{}.\n '
self.assertObserverWritesJSON(recordSeparator=)
|
def test_failureFormatting(self) -> None:
'\n A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures\n serialized as JSON text to a file when it observes events.\n '
io = StringIO()
publisher = LogPublisher()
logged: List[LogEvent] = []
publisher.addObserver(cast(ILogObserver, logged.append))
publisher.addObserver(jsonFileLogObserver(io))
logger = Logger(observer=publisher)
try:
(1 / 0)
except BaseException:
logger.failure('failed as expected')
reader = StringIO(io.getvalue())
deserialized = list(eventsFromJSONLogFile(reader))
def checkEvents(logEvents: Sequence[LogEvent]) -> None:
self.assertEqual(len(logEvents), 1)
[failureEvent] = logEvents
self.assertIn('log_failure', failureEvent)
failureObject = failureEvent['log_failure']
self.assertIsInstance(failureObject, Failure)
tracebackObject = failureObject.getTracebackObject()
self.assertEqual(tracebackObject.tb_frame.f_code.co_filename.rstrip('co'), __file__.rstrip('co'))
checkEvents(logged)
checkEvents(deserialized)
| -7,563,002,394,003,736,000
|
A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures
serialized as JSON text to a file when it observes events.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_failureFormatting
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_failureFormatting(self) -> None:
'\n A L{FileLogObserver} created by L{jsonFileLogObserver} writes failures\n serialized as JSON text to a file when it observes events.\n '
io = StringIO()
publisher = LogPublisher()
logged: List[LogEvent] = []
publisher.addObserver(cast(ILogObserver, logged.append))
publisher.addObserver(jsonFileLogObserver(io))
logger = Logger(observer=publisher)
try:
(1 / 0)
except BaseException:
logger.failure('failed as expected')
reader = StringIO(io.getvalue())
deserialized = list(eventsFromJSONLogFile(reader))
def checkEvents(logEvents: Sequence[LogEvent]) -> None:
self.assertEqual(len(logEvents), 1)
[failureEvent] = logEvents
self.assertIn('log_failure', failureEvent)
failureObject = failureEvent['log_failure']
self.assertIsInstance(failureObject, Failure)
tracebackObject = failureObject.getTracebackObject()
self.assertEqual(tracebackObject.tb_frame.f_code.co_filename.rstrip('co'), __file__.rstrip('co'))
checkEvents(logged)
checkEvents(deserialized)
|
def _readEvents(self, inFile: IO[Any], recordSeparator: Optional[str]=None, bufferSize: int=4096) -> None:
'\n Test that L{eventsFromJSONLogFile} reads two pre-defined events from a\n file: C{{"x": 1}} and C{{"y": 2}}.\n\n @param inFile: C{inFile} argument to L{eventsFromJSONLogFile}\n @param recordSeparator: C{recordSeparator} argument to\n L{eventsFromJSONLogFile}\n @param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile}\n '
events = iter(eventsFromJSONLogFile(inFile, recordSeparator, bufferSize))
self.assertEqual(next(events), {'x': 1})
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
| -2,322,267,403,128,152,600
|
Test that L{eventsFromJSONLogFile} reads two pre-defined events from a
file: C{{"x": 1}} and C{{"y": 2}}.
@param inFile: C{inFile} argument to L{eventsFromJSONLogFile}
@param recordSeparator: C{recordSeparator} argument to
L{eventsFromJSONLogFile}
@param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile}
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
_readEvents
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def _readEvents(self, inFile: IO[Any], recordSeparator: Optional[str]=None, bufferSize: int=4096) -> None:
'\n Test that L{eventsFromJSONLogFile} reads two pre-defined events from a\n file: C{{"x": 1}} and C{{"y": 2}}.\n\n @param inFile: C{inFile} argument to L{eventsFromJSONLogFile}\n @param recordSeparator: C{recordSeparator} argument to\n L{eventsFromJSONLogFile}\n @param bufferSize: C{bufferSize} argument to L{eventsFromJSONLogFile}\n '
events = iter(eventsFromJSONLogFile(inFile, recordSeparator, bufferSize))
self.assertEqual(next(events), {'x': 1})
self.assertEqual(next(events), {'y': 2})
self.assertRaises(StopIteration, next, events)
|
def test_readEventsAutoWithRecordSeparator(self) -> None:
'\n L{eventsFromJSONLogFile} reads events from a file and automatically\n detects use of C{"\\x1e"} as the record separator.\n '
with StringIO('\x1e{"x": 1}\n\x1e{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
| 1,436,743,086,405,439,500
|
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{"\x1e"} as the record separator.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readEventsAutoWithRecordSeparator
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readEventsAutoWithRecordSeparator(self) -> None:
'\n L{eventsFromJSONLogFile} reads events from a file and automatically\n detects use of C{"\\x1e"} as the record separator.\n '
with StringIO('\x1e{"x": 1}\n\x1e{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
|
def test_readEventsAutoEmptyRecordSeparator(self) -> None:
'\n L{eventsFromJSONLogFile} reads events from a file and automatically\n detects use of C{""} as the record separator.\n '
with StringIO('{"x": 1}\n{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
| -5,958,368,649,329,142,000
|
L{eventsFromJSONLogFile} reads events from a file and automatically
detects use of C{""} as the record separator.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readEventsAutoEmptyRecordSeparator
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readEventsAutoEmptyRecordSeparator(self) -> None:
'\n L{eventsFromJSONLogFile} reads events from a file and automatically\n detects use of C{} as the record separator.\n '
with StringIO('{"x": 1}\n{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle)
self.assertEqual(len(self.errorEvents), 0)
|
def test_readEventsExplicitRecordSeparator(self) -> None:
'\n L{eventsFromJSONLogFile} reads events from a file and is told to use\n a specific record separator.\n '
with StringIO('\x08{"x": 1}\n\x08{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle, recordSeparator='\x08')
self.assertEqual(len(self.errorEvents), 0)
| 4,865,284,781,638,233,000
|
L{eventsFromJSONLogFile} reads events from a file and is told to use
a specific record separator.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readEventsExplicitRecordSeparator
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readEventsExplicitRecordSeparator(self) -> None:
'\n L{eventsFromJSONLogFile} reads events from a file and is told to use\n a specific record separator.\n '
with StringIO('\x08{"x": 1}\n\x08{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle, recordSeparator='\x08')
self.assertEqual(len(self.errorEvents), 0)
|
def test_readEventsPartialBuffer(self) -> None:
'\n L{eventsFromJSONLogFile} handles buffering a partial event.\n '
with StringIO('\x1e{"x": 1}\n\x1e{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle, bufferSize=1)
self.assertEqual(len(self.errorEvents), 0)
| 19,513,616,301,660,430
|
L{eventsFromJSONLogFile} handles buffering a partial event.
|
SCRAPE/Lib/site-packages/twisted/logger/test/test_json.py
|
test_readEventsPartialBuffer
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
python
|
def test_readEventsPartialBuffer(self) -> None:
'\n \n '
with StringIO('\x1e{"x": 1}\n\x1e{"y": 2}\n') as fileHandle:
self._readEvents(fileHandle, bufferSize=1)
self.assertEqual(len(self.errorEvents), 0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.