body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
92f483102d26266dfc92b8a1a45429a9539a21ffdc532f3144f0f090517f3066
|
@staticmethod
def fixed_delay(delay, run_first=False):
'Returns a waiter that always waits for the given time (in seconds) and returns the\n time waited.\n '
async def wait_func():
(await asyncio.sleep(delay))
return delay
return Waiter(wait_func, run_first=run_first)
|
Returns a waiter that always waits for the given time (in seconds) and returns the
time waited.
|
tle/util/tasks.py
|
fixed_delay
|
tle-alt/TLE
| 367
|
python
|
@staticmethod
def fixed_delay(delay, run_first=False):
'Returns a waiter that always waits for the given time (in seconds) and returns the\n time waited.\n '
async def wait_func():
(await asyncio.sleep(delay))
return delay
return Waiter(wait_func, run_first=run_first)
|
@staticmethod
def fixed_delay(delay, run_first=False):
'Returns a waiter that always waits for the given time (in seconds) and returns the\n time waited.\n '
async def wait_func():
(await asyncio.sleep(delay))
return delay
return Waiter(wait_func, run_first=run_first)<|docstring|>Returns a waiter that always waits for the given time (in seconds) and returns the
time waited.<|endoftext|>
|
548ac8debc043530c76c546bf983e6ffe5ec8bb05be2ff43e77b8c3926dcc061
|
@staticmethod
def for_event(event_cls, run_first=True):
'Returns a waiter that waits for the given event and returns the result of that\n event.\n '
async def wait_func():
return (await cf_common.event_sys.wait_for(event_cls))
return Waiter(wait_func, run_first=run_first)
|
Returns a waiter that waits for the given event and returns the result of that
event.
|
tle/util/tasks.py
|
for_event
|
tle-alt/TLE
| 367
|
python
|
@staticmethod
def for_event(event_cls, run_first=True):
'Returns a waiter that waits for the given event and returns the result of that\n event.\n '
async def wait_func():
return (await cf_common.event_sys.wait_for(event_cls))
return Waiter(wait_func, run_first=run_first)
|
@staticmethod
def for_event(event_cls, run_first=True):
'Returns a waiter that waits for the given event and returns the result of that\n event.\n '
async def wait_func():
return (await cf_common.event_sys.wait_for(event_cls))
return Waiter(wait_func, run_first=run_first)<|docstring|>Returns a waiter that waits for the given event and returns the result of that
event.<|endoftext|>
|
4a53c1e09cb0ee095c027c1861c6d55c343288b2cf97c12416f7d7ca1f130b33
|
def __init__(self, func, *, needs_instance=False):
'`needs_instance` indicates whether a self argument is required by the `func`.'
_ensure_coroutine_func(func)
self.func = func
self.needs_instance = needs_instance
|
`needs_instance` indicates whether a self argument is required by the `func`.
|
tle/util/tasks.py
|
__init__
|
tle-alt/TLE
| 367
|
python
|
def __init__(self, func, *, needs_instance=False):
_ensure_coroutine_func(func)
self.func = func
self.needs_instance = needs_instance
|
def __init__(self, func, *, needs_instance=False):
_ensure_coroutine_func(func)
self.func = func
self.needs_instance = needs_instance<|docstring|>`needs_instance` indicates whether a self argument is required by the `func`.<|endoftext|>
|
c8e1468b2bd4d0851c8f3219595e79c7f5c9c2181f40ea8aad020cece1115ad9
|
def __init__(self, name, func, waiter, exception_handler=None, *, instance=None):
'`instance`, if present, is passed as the first argument to `func`.'
_ensure_coroutine_func(func)
self.name = name
self.func = func
self._waiter = waiter
self._exception_handler = exception_handler
self.instance = instance
self.asyncio_task = None
self.logger = logging.getLogger(self.__class__.__name__)
|
`instance`, if present, is passed as the first argument to `func`.
|
tle/util/tasks.py
|
__init__
|
tle-alt/TLE
| 367
|
python
|
def __init__(self, name, func, waiter, exception_handler=None, *, instance=None):
_ensure_coroutine_func(func)
self.name = name
self.func = func
self._waiter = waiter
self._exception_handler = exception_handler
self.instance = instance
self.asyncio_task = None
self.logger = logging.getLogger(self.__class__.__name__)
|
def __init__(self, name, func, waiter, exception_handler=None, *, instance=None):
_ensure_coroutine_func(func)
self.name = name
self.func = func
self._waiter = waiter
self._exception_handler = exception_handler
self.instance = instance
self.asyncio_task = None
self.logger = logging.getLogger(self.__class__.__name__)<|docstring|>`instance`, if present, is passed as the first argument to `func`.<|endoftext|>
|
a4a0a44ac452b4f043c45e35aec63f3ac90037cfb105f952ef107f1215adecbe
|
def waiter(self, run_first=False):
'Returns a decorator that sets the decorated coroutine function as the waiter for this\n Task.\n '
def decorator(func):
self._waiter = Waiter(func, run_first=run_first)
return func
return decorator
|
Returns a decorator that sets the decorated coroutine function as the waiter for this
Task.
|
tle/util/tasks.py
|
waiter
|
tle-alt/TLE
| 367
|
python
|
def waiter(self, run_first=False):
'Returns a decorator that sets the decorated coroutine function as the waiter for this\n Task.\n '
def decorator(func):
self._waiter = Waiter(func, run_first=run_first)
return func
return decorator
|
def waiter(self, run_first=False):
'Returns a decorator that sets the decorated coroutine function as the waiter for this\n Task.\n '
def decorator(func):
self._waiter = Waiter(func, run_first=run_first)
return func
return decorator<|docstring|>Returns a decorator that sets the decorated coroutine function as the waiter for this
Task.<|endoftext|>
|
59adc6acf94a97ea90b57de2e4e4cbdcdc8eae236b7ba8319ca65ab22c158efb
|
def exception_handler(self):
'Returns a decorator that sets the decorated coroutine function as the exception handler\n for this Task.\n '
def decorator(func):
self._exception_handler = ExceptionHandler(func)
return func
return decorator
|
Returns a decorator that sets the decorated coroutine function as the exception handler
for this Task.
|
tle/util/tasks.py
|
exception_handler
|
tle-alt/TLE
| 367
|
python
|
def exception_handler(self):
'Returns a decorator that sets the decorated coroutine function as the exception handler\n for this Task.\n '
def decorator(func):
self._exception_handler = ExceptionHandler(func)
return func
return decorator
|
def exception_handler(self):
'Returns a decorator that sets the decorated coroutine function as the exception handler\n for this Task.\n '
def decorator(func):
self._exception_handler = ExceptionHandler(func)
return func
return decorator<|docstring|>Returns a decorator that sets the decorated coroutine function as the exception handler
for this Task.<|endoftext|>
|
b2f77b163873abea10994a074aa3fba0f6066b0fb6bf801c000aac69952231d1
|
def start(self):
'Starts up the task.'
if (self._waiter is None):
raise WaiterRequired(self.name)
if self.running:
raise TaskAlreadyRunning(self.name)
self.logger.info(f'Starting up task `{self.name}`.')
self.asyncio_task = asyncio.create_task(self._task())
|
Starts up the task.
|
tle/util/tasks.py
|
start
|
tle-alt/TLE
| 367
|
python
|
def start(self):
if (self._waiter is None):
raise WaiterRequired(self.name)
if self.running:
raise TaskAlreadyRunning(self.name)
self.logger.info(f'Starting up task `{self.name}`.')
self.asyncio_task = asyncio.create_task(self._task())
|
def start(self):
if (self._waiter is None):
raise WaiterRequired(self.name)
if self.running:
raise TaskAlreadyRunning(self.name)
self.logger.info(f'Starting up task `{self.name}`.')
self.asyncio_task = asyncio.create_task(self._task())<|docstring|>Starts up the task.<|endoftext|>
|
31a7b3e8f92a3550dd5271f4f6f28901e4d1059915ce1c36597a6fe60b20c928
|
async def manual_trigger(self, arg=None):
'Manually triggers the `func` with the optionally provided `arg`, which defaults to\n `None`.\n '
self.logger.info(f'Manually triggering task `{self.name}`.')
(await self._execute_func(arg))
|
Manually triggers the `func` with the optionally provided `arg`, which defaults to
`None`.
|
tle/util/tasks.py
|
manual_trigger
|
tle-alt/TLE
| 367
|
python
|
async def manual_trigger(self, arg=None):
'Manually triggers the `func` with the optionally provided `arg`, which defaults to\n `None`.\n '
self.logger.info(f'Manually triggering task `{self.name}`.')
(await self._execute_func(arg))
|
async def manual_trigger(self, arg=None):
'Manually triggers the `func` with the optionally provided `arg`, which defaults to\n `None`.\n '
self.logger.info(f'Manually triggering task `{self.name}`.')
(await self._execute_func(arg))<|docstring|>Manually triggers the `func` with the optionally provided `arg`, which defaults to
`None`.<|endoftext|>
|
b647a9e63f1a224d28a9de374ad1334eed48d7c28b44c2cb2b20bf7f01b30c6c
|
async def stop(self):
'Stops the task, interrupting the currently running coroutines.'
if self.running:
self.logger.info(f'Stopping task `{self.name}`.')
self.asyncio_task.cancel()
(await asyncio.sleep(0))
|
Stops the task, interrupting the currently running coroutines.
|
tle/util/tasks.py
|
stop
|
tle-alt/TLE
| 367
|
python
|
async def stop(self):
if self.running:
self.logger.info(f'Stopping task `{self.name}`.')
self.asyncio_task.cancel()
(await asyncio.sleep(0))
|
async def stop(self):
if self.running:
self.logger.info(f'Stopping task `{self.name}`.')
self.asyncio_task.cancel()
(await asyncio.sleep(0))<|docstring|>Stops the task, interrupting the currently running coroutines.<|endoftext|>
|
b40f66d16f3f1d076d74364fb0302d50c94c23a89e17a375d80318dc43d481c6
|
def waiter(self, run_first=False, needs_instance=True):
'Returns a decorator that sets the decorated coroutine function as the waiter for this\n TaskSpec.\n '
def decorator(func):
self._waiter = Waiter(func, run_first=run_first, needs_instance=needs_instance)
return func
return decorator
|
Returns a decorator that sets the decorated coroutine function as the waiter for this
TaskSpec.
|
tle/util/tasks.py
|
waiter
|
tle-alt/TLE
| 367
|
python
|
def waiter(self, run_first=False, needs_instance=True):
'Returns a decorator that sets the decorated coroutine function as the waiter for this\n TaskSpec.\n '
def decorator(func):
self._waiter = Waiter(func, run_first=run_first, needs_instance=needs_instance)
return func
return decorator
|
def waiter(self, run_first=False, needs_instance=True):
'Returns a decorator that sets the decorated coroutine function as the waiter for this\n TaskSpec.\n '
def decorator(func):
self._waiter = Waiter(func, run_first=run_first, needs_instance=needs_instance)
return func
return decorator<|docstring|>Returns a decorator that sets the decorated coroutine function as the waiter for this
TaskSpec.<|endoftext|>
|
aca610a40fc355d56ba154cf529e48b0a345c6eb882817bc6ed36dd34f4a1501
|
def exception_handler(self, needs_instance=True):
'Returns a decorator that sets the decorated coroutine function as the exception handler\n for this TaskSpec.\n '
def decorator(func):
self._exception_handler = ExceptionHandler(func, needs_instance=needs_instance)
return func
return decorator
|
Returns a decorator that sets the decorated coroutine function as the exception handler
for this TaskSpec.
|
tle/util/tasks.py
|
exception_handler
|
tle-alt/TLE
| 367
|
python
|
def exception_handler(self, needs_instance=True):
'Returns a decorator that sets the decorated coroutine function as the exception handler\n for this TaskSpec.\n '
def decorator(func):
self._exception_handler = ExceptionHandler(func, needs_instance=needs_instance)
return func
return decorator
|
def exception_handler(self, needs_instance=True):
'Returns a decorator that sets the decorated coroutine function as the exception handler\n for this TaskSpec.\n '
def decorator(func):
self._exception_handler = ExceptionHandler(func, needs_instance=needs_instance)
return func
return decorator<|docstring|>Returns a decorator that sets the decorated coroutine function as the exception handler
for this TaskSpec.<|endoftext|>
|
6fde3a18b9d739fbf961fd468b0a7ea3df20a64ac9a6fcfa98388b71ea36b85f
|
@staticmethod
def print_usage(cmds):
'print nikola "usage" (basic help) instructions'
print('Nikola')
print('Available commands:')
for cmd in sorted(cmds.values(), key=attrgetter('name')):
print((' nikola %s \t\t %s' % (cmd.name, cmd.doc_purpose)))
print('')
print(' nikola help show help / reference')
print(' nikola help <command> show command usage')
print(' nikola help <task-name> show task usage')
|
print nikola "usage" (basic help) instructions
|
nikola/main.py
|
print_usage
|
dhruvbaldawa/nikola
| 1
|
python
|
@staticmethod
def print_usage(cmds):
print('Nikola')
print('Available commands:')
for cmd in sorted(cmds.values(), key=attrgetter('name')):
print((' nikola %s \t\t %s' % (cmd.name, cmd.doc_purpose)))
print()
print(' nikola help show help / reference')
print(' nikola help <command> show command usage')
print(' nikola help <task-name> show task usage')
|
@staticmethod
def print_usage(cmds):
print('Nikola')
print('Available commands:')
for cmd in sorted(cmds.values(), key=attrgetter('name')):
print((' nikola %s \t\t %s' % (cmd.name, cmd.doc_purpose)))
print()
print(' nikola help show help / reference')
print(' nikola help <command> show command usage')
print(' nikola help <task-name> show task usage')<|docstring|>print nikola "usage" (basic help) instructions<|endoftext|>
|
eb8b8b5f01ac12ae6ab03ed44b51b5ff6d6f03ca5105f6b7dede33bf2de1afa5
|
def maxSideLength(self, mat, threshold: int) -> int:
'\n Adopt the prefix sum algorithm\n '
(m, n) = (len(mat), len(mat[0]))
_sum = [([0] * (n + 1)) for _ in range((m + 1))]
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
_sum[i][j] = (((_sum[(i - 1)][j] + _sum[i][(j - 1)]) - _sum[(i - 1)][(j - 1)]) + mat[(i - 1)][(j - 1)])
ans = 0
for k in range(1, (min(m, n) + 1)):
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
if (((i - k) < 0) or ((j - k) < 0)):
continue
tmp = (((_sum[i][j] - _sum[(i - k)][j]) - _sum[i][(j - k)]) + _sum[(i - k)][(j - k)])
if (tmp <= threshold):
ans = max(ans, k)
return ans
|
Adopt the prefix sum algorithm
|
Templates/Prefix_Sum.py
|
maxSideLength
|
ZR-Huang/AlgorithmPractices
| 1
|
python
|
def maxSideLength(self, mat, threshold: int) -> int:
'\n \n '
(m, n) = (len(mat), len(mat[0]))
_sum = [([0] * (n + 1)) for _ in range((m + 1))]
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
_sum[i][j] = (((_sum[(i - 1)][j] + _sum[i][(j - 1)]) - _sum[(i - 1)][(j - 1)]) + mat[(i - 1)][(j - 1)])
ans = 0
for k in range(1, (min(m, n) + 1)):
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
if (((i - k) < 0) or ((j - k) < 0)):
continue
tmp = (((_sum[i][j] - _sum[(i - k)][j]) - _sum[i][(j - k)]) + _sum[(i - k)][(j - k)])
if (tmp <= threshold):
ans = max(ans, k)
return ans
|
def maxSideLength(self, mat, threshold: int) -> int:
'\n \n '
(m, n) = (len(mat), len(mat[0]))
_sum = [([0] * (n + 1)) for _ in range((m + 1))]
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
_sum[i][j] = (((_sum[(i - 1)][j] + _sum[i][(j - 1)]) - _sum[(i - 1)][(j - 1)]) + mat[(i - 1)][(j - 1)])
ans = 0
for k in range(1, (min(m, n) + 1)):
for i in range(1, (m + 1)):
for j in range(1, (n + 1)):
if (((i - k) < 0) or ((j - k) < 0)):
continue
tmp = (((_sum[i][j] - _sum[(i - k)][j]) - _sum[i][(j - k)]) + _sum[(i - k)][(j - k)])
if (tmp <= threshold):
ans = max(ans, k)
return ans<|docstring|>Adopt the prefix sum algorithm<|endoftext|>
|
1dacac7fefdbbed6338d05f27c18a0d076b71ba30c948342bc2a680c57b2fea8
|
def __init__(self, **kwargs):
"\n Parameters:\n kwargs['data'] (string): Data to be submitted\n "
self.__data = kwargs['data']
self.__URL = None
self.log = []
|
Parameters:
kwargs['data'] (string): Data to be submitted
|
deliv/scraper.py
|
__init__
|
sourcerer0/packetStatus
| 0
|
python
|
def __init__(self, **kwargs):
"\n Parameters:\n kwargs['data'] (string): Data to be submitted\n "
self.__data = kwargs['data']
self.__URL = None
self.log = []
|
def __init__(self, **kwargs):
"\n Parameters:\n kwargs['data'] (string): Data to be submitted\n "
self.__data = kwargs['data']
self.__URL = None
self.log = []<|docstring|>Parameters:
kwargs['data'] (string): Data to be submitted<|endoftext|>
|
0672b99fed63c9bd51e20a54ca9274cb3f81f0d264893d9a45f500100144d931
|
def __init__(self, n):
'\n Initialize your data structure here.\n :type n: int\n '
self.counter = collections.Counter()
self.n = n
|
Initialize your data structure here.
:type n: int
|
Python3/0348-Design-Tic-Tac-Toe/soln.py
|
__init__
|
wyaadarsh/LeetCode-Solutions
| 5
|
python
|
def __init__(self, n):
'\n Initialize your data structure here.\n :type n: int\n '
self.counter = collections.Counter()
self.n = n
|
def __init__(self, n):
'\n Initialize your data structure here.\n :type n: int\n '
self.counter = collections.Counter()
self.n = n<|docstring|>Initialize your data structure here.
:type n: int<|endoftext|>
|
6b0088228a8ecc5ab962c1949450839dd1840c5fc59f4dff37fbf1642aad2e95
|
def move(self, row, col, player):
'\n Player {player} makes a move at ({row}, {col}).\n @param row The row of the board.\n @param col The column of the board.\n @param player The player, can be either 1 or 2.\n @return The current winning condition, can be either:\n 0: No one wins.\n 1: Player 1 wins.\n 2: Player 2 wins.\n :type row: int\n :type col: int\n :type player: int\n :rtype: int\n '
for (i, num) in enumerate([row, col, (row + col), (row - col)]):
self.counter[(player, i, num)] += 1
if (self.counter[(player, i, num)] == self.n):
return player
return 0
|
Player {player} makes a move at ({row}, {col}).
@param row The row of the board.
@param col The column of the board.
@param player The player, can be either 1 or 2.
@return The current winning condition, can be either:
0: No one wins.
1: Player 1 wins.
2: Player 2 wins.
:type row: int
:type col: int
:type player: int
:rtype: int
|
Python3/0348-Design-Tic-Tac-Toe/soln.py
|
move
|
wyaadarsh/LeetCode-Solutions
| 5
|
python
|
def move(self, row, col, player):
'\n Player {player} makes a move at ({row}, {col}).\n @param row The row of the board.\n @param col The column of the board.\n @param player The player, can be either 1 or 2.\n @return The current winning condition, can be either:\n 0: No one wins.\n 1: Player 1 wins.\n 2: Player 2 wins.\n :type row: int\n :type col: int\n :type player: int\n :rtype: int\n '
for (i, num) in enumerate([row, col, (row + col), (row - col)]):
self.counter[(player, i, num)] += 1
if (self.counter[(player, i, num)] == self.n):
return player
return 0
|
def move(self, row, col, player):
'\n Player {player} makes a move at ({row}, {col}).\n @param row The row of the board.\n @param col The column of the board.\n @param player The player, can be either 1 or 2.\n @return The current winning condition, can be either:\n 0: No one wins.\n 1: Player 1 wins.\n 2: Player 2 wins.\n :type row: int\n :type col: int\n :type player: int\n :rtype: int\n '
for (i, num) in enumerate([row, col, (row + col), (row - col)]):
self.counter[(player, i, num)] += 1
if (self.counter[(player, i, num)] == self.n):
return player
return 0<|docstring|>Player {player} makes a move at ({row}, {col}).
@param row The row of the board.
@param col The column of the board.
@param player The player, can be either 1 or 2.
@return The current winning condition, can be either:
0: No one wins.
1: Player 1 wins.
2: Player 2 wins.
:type row: int
:type col: int
:type player: int
:rtype: int<|endoftext|>
|
485496bc7dd76841eee26bf8ef9bce0bcbcca6f5c1fcde37fbefbfd3d9bc4408
|
def run_convergence(ref_list, saveplot=False, **options):
'Runs test for a list of refinements and computes error convergence rate'
polynomial_degree = options.get('polynomial_degree', 1)
l2_err = []
for r in ref_list:
l2_err.append(run(r, **options))
x_log = numpy.log10((numpy.array(ref_list, dtype=float) ** (- 1)))
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'h-advection'
def check_convergence(x_log, y_log, expected_slope, field_str, saveplot):
slope_rtol = 0.2
(slope, intercept, r_value, p_value, std_err) = stats.linregress(x_log, y_log)
if saveplot:
import matplotlib.pyplot as plt
(fig, ax) = plt.subplots(figsize=(5, 5))
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = (0.05 * (x_max - x_min))
npoints = 50
xx = numpy.linspace((x_min - offset), (x_max + offset), npoints)
yy = (intercept + (slope * xx))
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[(2 * int((npoints / 3)))], yy[(2 * int((npoints / 3)))], '{:4.2f}'.format(slope), verticalalignment='top', horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(' '.join([setup_name, field_str, 'degree={:}'.format(polynomial_degree)]))
ref_str = ('ref-' + '-'.join([str(r) for r in ref_list]))
degree_str = 'o{:}'.format(polynomial_degree)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, degree_str])
imgfile += '.png'
imgdir = create_directory('plots')
imgfile = os.path.join(imgdir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if (expected_slope is not None):
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert (slope > (expected_slope * (1 - slope_rtol))), err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, (polynomial_degree + 1), 'tracer', saveplot)
|
Runs test for a list of refinements and computes error convergence rate
|
test/tracerEq/test_h-advection_mes_2d.py
|
run_convergence
|
pbrubeck/thetis
| 45
|
python
|
def run_convergence(ref_list, saveplot=False, **options):
polynomial_degree = options.get('polynomial_degree', 1)
l2_err = []
for r in ref_list:
l2_err.append(run(r, **options))
x_log = numpy.log10((numpy.array(ref_list, dtype=float) ** (- 1)))
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'h-advection'
def check_convergence(x_log, y_log, expected_slope, field_str, saveplot):
slope_rtol = 0.2
(slope, intercept, r_value, p_value, std_err) = stats.linregress(x_log, y_log)
if saveplot:
import matplotlib.pyplot as plt
(fig, ax) = plt.subplots(figsize=(5, 5))
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = (0.05 * (x_max - x_min))
npoints = 50
xx = numpy.linspace((x_min - offset), (x_max + offset), npoints)
yy = (intercept + (slope * xx))
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[(2 * int((npoints / 3)))], yy[(2 * int((npoints / 3)))], '{:4.2f}'.format(slope), verticalalignment='top', horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(' '.join([setup_name, field_str, 'degree={:}'.format(polynomial_degree)]))
ref_str = ('ref-' + '-'.join([str(r) for r in ref_list]))
degree_str = 'o{:}'.format(polynomial_degree)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, degree_str])
imgfile += '.png'
imgdir = create_directory('plots')
imgfile = os.path.join(imgdir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if (expected_slope is not None):
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert (slope > (expected_slope * (1 - slope_rtol))), err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, (polynomial_degree + 1), 'tracer', saveplot)
|
def run_convergence(ref_list, saveplot=False, **options):
polynomial_degree = options.get('polynomial_degree', 1)
l2_err = []
for r in ref_list:
l2_err.append(run(r, **options))
x_log = numpy.log10((numpy.array(ref_list, dtype=float) ** (- 1)))
y_log = numpy.log10(numpy.array(l2_err))
setup_name = 'h-advection'
def check_convergence(x_log, y_log, expected_slope, field_str, saveplot):
slope_rtol = 0.2
(slope, intercept, r_value, p_value, std_err) = stats.linregress(x_log, y_log)
if saveplot:
import matplotlib.pyplot as plt
(fig, ax) = plt.subplots(figsize=(5, 5))
ax.plot(x_log, y_log, 'k.')
x_min = x_log.min()
x_max = x_log.max()
offset = (0.05 * (x_max - x_min))
npoints = 50
xx = numpy.linspace((x_min - offset), (x_max + offset), npoints)
yy = (intercept + (slope * xx))
ax.plot(xx, yy, linestyle='--', linewidth=0.5, color='k')
ax.text(xx[(2 * int((npoints / 3)))], yy[(2 * int((npoints / 3)))], '{:4.2f}'.format(slope), verticalalignment='top', horizontalalignment='left')
ax.set_xlabel('log10(dx)')
ax.set_ylabel('log10(L2 error)')
ax.set_title(' '.join([setup_name, field_str, 'degree={:}'.format(polynomial_degree)]))
ref_str = ('ref-' + '-'.join([str(r) for r in ref_list]))
degree_str = 'o{:}'.format(polynomial_degree)
imgfile = '_'.join(['convergence', setup_name, field_str, ref_str, degree_str])
imgfile += '.png'
imgdir = create_directory('plots')
imgfile = os.path.join(imgdir, imgfile)
print_output('saving figure {:}'.format(imgfile))
plt.savefig(imgfile, dpi=200, bbox_inches='tight')
if (expected_slope is not None):
err_msg = '{:}: Wrong convergence rate {:.4f}, expected {:.4f}'.format(setup_name, slope, expected_slope)
assert (slope > (expected_slope * (1 - slope_rtol))), err_msg
print_output('{:}: convergence rate {:.4f} PASSED'.format(setup_name, slope))
else:
print_output('{:}: {:} convergence rate {:.4f}'.format(setup_name, field_str, slope))
return slope
check_convergence(x_log, y_log, (polynomial_degree + 1), 'tracer', saveplot)<|docstring|>Runs test for a list of refinements and computes error convergence rate<|endoftext|>
|
927e46b8fcb2a15c91dabb2f77777636a6c04fc79a96fd21cbbce7ee2617580e
|
def cmdLineParser():
'\n Command line parser.\n '
parser = argparse.ArgumentParser(description='unwrap estimated wrapped phase', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--interferogram_file', type=str, dest='interferogramFile', required=True, help='Input interferogram file with complex format')
parser.add_argument('-c', '--coherence_file', type=str, dest='coherenceFile', required=True, help='Input coherence file')
parser.add_argument('-o', '--unwrap_file', type=str, dest='unwrapFile', required=True, help='Output unwrapped file')
parser.add_argument('-m', '--method', type=str, dest='method', default='snaphu', help='unwrapping method: default = snaphu')
return parser.parse_args()
|
Command line parser.
|
python/unwrap_fringe.py
|
cmdLineParser
|
dbekaert/fringe
| 0
|
python
|
def cmdLineParser():
'\n \n '
parser = argparse.ArgumentParser(description='unwrap estimated wrapped phase', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--interferogram_file', type=str, dest='interferogramFile', required=True, help='Input interferogram file with complex format')
parser.add_argument('-c', '--coherence_file', type=str, dest='coherenceFile', required=True, help='Input coherence file')
parser.add_argument('-o', '--unwrap_file', type=str, dest='unwrapFile', required=True, help='Output unwrapped file')
parser.add_argument('-m', '--method', type=str, dest='method', default='snaphu', help='unwrapping method: default = snaphu')
return parser.parse_args()
|
def cmdLineParser():
'\n \n '
parser = argparse.ArgumentParser(description='unwrap estimated wrapped phase', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--interferogram_file', type=str, dest='interferogramFile', required=True, help='Input interferogram file with complex format')
parser.add_argument('-c', '--coherence_file', type=str, dest='coherenceFile', required=True, help='Input coherence file')
parser.add_argument('-o', '--unwrap_file', type=str, dest='unwrapFile', required=True, help='Output unwrapped file')
parser.add_argument('-m', '--method', type=str, dest='method', default='snaphu', help='unwrapping method: default = snaphu')
return parser.parse_args()<|docstring|>Command line parser.<|endoftext|>
|
feeeb9bae75148c8b40f0f9d73490ed89857b49ebd9d84a7a2960f2a616f16a5
|
def fetch_email(M, msg_id):
'Returns the given email message as a unicode string.'
(res, data) = M.fetch(msg_id, '(RFC822)')
if (res == 'OK'):
raw_msg_txt = data[0][1]
try:
msg = email.message_from_bytes(raw_msg_txt)
except AttributeError:
msg = email.message_from_string(raw_msg_txt)
return msg
else:
return None
|
Returns the given email message as a unicode string.
|
indra/tools/machine/gmail_client.py
|
fetch_email
|
dianakolusheva/indra
| 136
|
python
|
def fetch_email(M, msg_id):
(res, data) = M.fetch(msg_id, '(RFC822)')
if (res == 'OK'):
raw_msg_txt = data[0][1]
try:
msg = email.message_from_bytes(raw_msg_txt)
except AttributeError:
msg = email.message_from_string(raw_msg_txt)
return msg
else:
return None
|
def fetch_email(M, msg_id):
(res, data) = M.fetch(msg_id, '(RFC822)')
if (res == 'OK'):
raw_msg_txt = data[0][1]
try:
msg = email.message_from_bytes(raw_msg_txt)
except AttributeError:
msg = email.message_from_string(raw_msg_txt)
return msg
else:
return None<|docstring|>Returns the given email message as a unicode string.<|endoftext|>
|
afc99158eedae2851f36fad0c1de94042f13b261cb4a1e81164f5a540c36b58f
|
def get_headers(msg):
'Takes email.message.Message object initialized from unicode string,\n returns dict with header fields.'
headers = {}
for k in msg.keys():
(header_txt, charset) = email.header.decode_header(msg[k])[0]
if (charset is not None):
header_txt = header_txt.decode(charset)
headers[k] = header_txt
return headers
|
Takes email.message.Message object initialized from unicode string,
returns dict with header fields.
|
indra/tools/machine/gmail_client.py
|
get_headers
|
dianakolusheva/indra
| 136
|
python
|
def get_headers(msg):
'Takes email.message.Message object initialized from unicode string,\n returns dict with header fields.'
headers = {}
for k in msg.keys():
(header_txt, charset) = email.header.decode_header(msg[k])[0]
if (charset is not None):
header_txt = header_txt.decode(charset)
headers[k] = header_txt
return headers
|
def get_headers(msg):
'Takes email.message.Message object initialized from unicode string,\n returns dict with header fields.'
headers = {}
for k in msg.keys():
(header_txt, charset) = email.header.decode_header(msg[k])[0]
if (charset is not None):
header_txt = header_txt.decode(charset)
headers[k] = header_txt
return headers<|docstring|>Takes email.message.Message object initialized from unicode string,
returns dict with header fields.<|endoftext|>
|
4a91922f884d5c540b99408711b0eb75bd15e827e30ee86f8367c60c94d648ff
|
def get_row(fname, index_of_book, start_row=0, verify_key=(- 1), sheet_key=0):
' start_rwo, verify_key, sheet_key, fname '
global results
current_file = os.path.split(fname)[1]
if (verify_key == (- 1)):
verify_key = 0
sh = open_workbook(fname).sheet_by_index(sheet_key)
(nrows, ncols) = (sh.nrows, sh.ncols)
for rowx in range((start_row - 1), nrows):
if (sh.row_types(rowx)[verify_key] == xlrd.XL_CELL_EMPTY):
nrows = rowx
break
results.append((sh.row_values(rowx) + [current_file[:4]]))
if (current_file[2:4] == '01'):
print()
print(f' {(index_of_book + 1):02} {current_file[:2]}级{current_file[2:4]}班 共统计到{((nrows - start_row) + 1)}条记录')
return results
|
start_rwo, verify_key, sheet_key, fname
|
build/lib/mgtb/mgtb.py
|
get_row
|
starttolearning/mergetables
| 0
|
python
|
def get_row(fname, index_of_book, start_row=0, verify_key=(- 1), sheet_key=0):
' '
global results
current_file = os.path.split(fname)[1]
if (verify_key == (- 1)):
verify_key = 0
sh = open_workbook(fname).sheet_by_index(sheet_key)
(nrows, ncols) = (sh.nrows, sh.ncols)
for rowx in range((start_row - 1), nrows):
if (sh.row_types(rowx)[verify_key] == xlrd.XL_CELL_EMPTY):
nrows = rowx
break
results.append((sh.row_values(rowx) + [current_file[:4]]))
if (current_file[2:4] == '01'):
print()
print(f' {(index_of_book + 1):02} {current_file[:2]}级{current_file[2:4]}班 共统计到{((nrows - start_row) + 1)}条记录')
return results
|
def get_row(fname, index_of_book, start_row=0, verify_key=(- 1), sheet_key=0):
' '
global results
current_file = os.path.split(fname)[1]
if (verify_key == (- 1)):
verify_key = 0
sh = open_workbook(fname).sheet_by_index(sheet_key)
(nrows, ncols) = (sh.nrows, sh.ncols)
for rowx in range((start_row - 1), nrows):
if (sh.row_types(rowx)[verify_key] == xlrd.XL_CELL_EMPTY):
nrows = rowx
break
results.append((sh.row_values(rowx) + [current_file[:4]]))
if (current_file[2:4] == '01'):
print()
print(f' {(index_of_book + 1):02} {current_file[:2]}级{current_file[2:4]}班 共统计到{((nrows - start_row) + 1)}条记录')
return results<|docstring|>start_rwo, verify_key, sheet_key, fname<|endoftext|>
|
3fca376cb64cfc7fad95abac15463770ead6e8b2d6545f0bad19eb1769789ce8
|
def cnfg_fermi():
"\n The output folder will contain initial, indermediate, and output image and data files. To define this folder, tou should either\n 1) set $PCAT_DATA_PATH environment variable, as in\n os.environ['PCAT_DATA_PATH'] = '/path/to/your/pcat/data/folder'\n inside your .bashrc or .zshrc, or\n 2) input pathpcat as an argument.\n "
numbener = 5
numbside = 256
numbpixl = (12 * (numbside ** 2))
numbevtt = 4
fluxisot = (1e-06 * np.ones((numbener, numbpixl, numbevtt)))
path = (os.environ['PCAT_DATA_PATH'] + '/data/inpt/isottuto.fits')
pf.writeto(path, fluxisot, clobber=True)
cmnd = 'wget https://faun.rc.fas.harvard.edu/tansu/pcat/tuto/psf_P7REP_SOURCE_V15_back.fits $PCAT_DATA_PATH/data/inpt/psf_P7REP_SOURCE_V15_back.fits'
pcat.init(forccart=True, pixltype='cart', diagmode=False, backtype=[1.0], numbswep=2000000, strgexpo=100000000000.0, probbrde=0.5)
|
The output folder will contain initial, indermediate, and output image and data files. To define this folder, tou should either
1) set $PCAT_DATA_PATH environment variable, as in
os.environ['PCAT_DATA_PATH'] = '/path/to/your/pcat/data/folder'
inside your .bashrc or .zshrc, or
2) input pathpcat as an argument.
|
tutorial.py
|
cnfg_fermi
|
tdaylan/pnts_tran
| 0
|
python
|
def cnfg_fermi():
"\n The output folder will contain initial, indermediate, and output image and data files. To define this folder, tou should either\n 1) set $PCAT_DATA_PATH environment variable, as in\n os.environ['PCAT_DATA_PATH'] = '/path/to/your/pcat/data/folder'\n inside your .bashrc or .zshrc, or\n 2) input pathpcat as an argument.\n "
numbener = 5
numbside = 256
numbpixl = (12 * (numbside ** 2))
numbevtt = 4
fluxisot = (1e-06 * np.ones((numbener, numbpixl, numbevtt)))
path = (os.environ['PCAT_DATA_PATH'] + '/data/inpt/isottuto.fits')
pf.writeto(path, fluxisot, clobber=True)
cmnd = 'wget https://faun.rc.fas.harvard.edu/tansu/pcat/tuto/psf_P7REP_SOURCE_V15_back.fits $PCAT_DATA_PATH/data/inpt/psf_P7REP_SOURCE_V15_back.fits'
pcat.init(forccart=True, pixltype='cart', diagmode=False, backtype=[1.0], numbswep=2000000, strgexpo=100000000000.0, probbrde=0.5)
|
def cnfg_fermi():
"\n The output folder will contain initial, indermediate, and output image and data files. To define this folder, tou should either\n 1) set $PCAT_DATA_PATH environment variable, as in\n os.environ['PCAT_DATA_PATH'] = '/path/to/your/pcat/data/folder'\n inside your .bashrc or .zshrc, or\n 2) input pathpcat as an argument.\n "
numbener = 5
numbside = 256
numbpixl = (12 * (numbside ** 2))
numbevtt = 4
fluxisot = (1e-06 * np.ones((numbener, numbpixl, numbevtt)))
path = (os.environ['PCAT_DATA_PATH'] + '/data/inpt/isottuto.fits')
pf.writeto(path, fluxisot, clobber=True)
cmnd = 'wget https://faun.rc.fas.harvard.edu/tansu/pcat/tuto/psf_P7REP_SOURCE_V15_back.fits $PCAT_DATA_PATH/data/inpt/psf_P7REP_SOURCE_V15_back.fits'
pcat.init(forccart=True, pixltype='cart', diagmode=False, backtype=[1.0], numbswep=2000000, strgexpo=100000000000.0, probbrde=0.5)<|docstring|>The output folder will contain initial, indermediate, and output image and data files. To define this folder, tou should either
1) set $PCAT_DATA_PATH environment variable, as in
os.environ['PCAT_DATA_PATH'] = '/path/to/your/pcat/data/folder'
inside your .bashrc or .zshrc, or
2) input pathpcat as an argument.<|endoftext|>
|
d7811da83b0afd181162bf801888aa916e44f56fa18e55ed1e6bb7754fd457ea
|
def cnfg_GaussianMix():
'\n Gaussian Mixture\n '
pcat.init()
|
Gaussian Mixture
|
tutorial.py
|
cnfg_GaussianMix
|
tdaylan/pnts_tran
| 0
|
python
|
def cnfg_GaussianMix():
'\n \n '
pcat.init()
|
def cnfg_GaussianMix():
'\n \n '
pcat.init()<|docstring|>Gaussian Mixture<|endoftext|>
|
e47627b39706d1aae090aaa1623a4c35f496826449ec16004173bb4acbfb4ea1
|
def cnfg_GaussianMix_unbinned():
'\n Unbinned Gaussian Mixture\n '
pcat.init(boolbins=False)
|
Unbinned Gaussian Mixture
|
tutorial.py
|
cnfg_GaussianMix_unbinned
|
tdaylan/pnts_tran
| 0
|
python
|
def cnfg_GaussianMix_unbinned():
'\n \n '
pcat.init(boolbins=False)
|
def cnfg_GaussianMix_unbinned():
'\n \n '
pcat.init(boolbins=False)<|docstring|>Unbinned Gaussian Mixture<|endoftext|>
|
441efd1d279e5173f639b0e97449ca6bc292c06f263957d91867edd02f3907cf
|
def __init__(self, logger_=None, archiver_cls=None):
'\n\n :param logger_: gradient.logger\n '
self.logger = (logger_ or MuteLogger())
self.archiver_cls = (archiver_cls or self.WORKSPACE_ARCHIVER_CLS)
|
:param logger_: gradient.logger
|
gradient/api_sdk/workspace.py
|
__init__
|
vishalbelsare/gradient-cli
| 52
|
python
|
def __init__(self, logger_=None, archiver_cls=None):
'\n\n \n '
self.logger = (logger_ or MuteLogger())
self.archiver_cls = (archiver_cls or self.WORKSPACE_ARCHIVER_CLS)
|
def __init__(self, logger_=None, archiver_cls=None):
'\n\n \n '
self.logger = (logger_ or MuteLogger())
self.archiver_cls = (archiver_cls or self.WORKSPACE_ARCHIVER_CLS)<|docstring|>:param logger_: gradient.logger<|endoftext|>
|
d97a27e72e4ca8b9c3ceae9b723dc63236b201c8e6d1f3ef342b42fb00d3c8bd
|
def __init__(self, api_key, client_name=None, uploader_cls=None, *args, **kwargs):
'\n :param str api_key:\n :param str client_name:\n :param object uploader_cls:\n :param gradient.logger logger_:\n '
super(S3WorkspaceHandler, self).__init__(*args, **kwargs)
self.api_key = api_key
self.client_name = client_name
self.uploader_cls = (uploader_cls or self.WORKSPACE_UPLOADER_CLS)
|
:param str api_key:
:param str client_name:
:param object uploader_cls:
:param gradient.logger logger_:
|
gradient/api_sdk/workspace.py
|
__init__
|
vishalbelsare/gradient-cli
| 52
|
python
|
def __init__(self, api_key, client_name=None, uploader_cls=None, *args, **kwargs):
'\n :param str api_key:\n :param str client_name:\n :param object uploader_cls:\n :param gradient.logger logger_:\n '
super(S3WorkspaceHandler, self).__init__(*args, **kwargs)
self.api_key = api_key
self.client_name = client_name
self.uploader_cls = (uploader_cls or self.WORKSPACE_UPLOADER_CLS)
|
def __init__(self, api_key, client_name=None, uploader_cls=None, *args, **kwargs):
'\n :param str api_key:\n :param str client_name:\n :param object uploader_cls:\n :param gradient.logger logger_:\n '
super(S3WorkspaceHandler, self).__init__(*args, **kwargs)
self.api_key = api_key
self.client_name = client_name
self.uploader_cls = (uploader_cls or self.WORKSPACE_UPLOADER_CLS)<|docstring|>:param str api_key:
:param str client_name:
:param object uploader_cls:
:param gradient.logger logger_:<|endoftext|>
|
77209eeebbd5d0d226aec34cb9c55fae9537dc85efbea13284fe78449ad87d1a
|
async def delete_app_requests(self, return_raw_response: bool=False) -> typing.Union[(dict, BaseOkResponse)]:
'\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('deleteAppRequests', params))
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
|
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
delete_app_requests
|
amishakov/vkwave
| 222
|
python
|
async def delete_app_requests(self, return_raw_response: bool=False) -> typing.Union[(dict, BaseOkResponse)]:
'\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('deleteAppRequests', params))
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
|
async def delete_app_requests(self, return_raw_response: bool=False) -> typing.Union[(dict, BaseOkResponse)]:
'\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('deleteAppRequests', params))
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result<|docstring|>:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
a428575576ce9f1dfba377894dcf565ad86dc183886ad19d1dd56040acb48f81
|
async def get(self, return_raw_response: bool=False, app_id: typing.Optional[int]=None, app_ids: typing.Optional[typing.List[str]]=None, platform: typing.Optional[str]=None, extended: typing.Optional[BaseBoolInt]=None, return_friends: typing.Optional[bool]=None, fields: typing.Optional[typing.List[UsersFields]]=None, name_case: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetResponse)]:
"\n :param app_id: - Application ID\n :param app_ids: - List of application ID\n :param platform: - platform. Possible values: *'ios' — iOS,, *'android' — Android,, *'winphone' — Windows Phone,, *'web' — приложения на vk.com. By default: 'web'.\n :param extended:\n :param return_friends:\n :param fields: - Profile fields to return. Sample values: 'nickname', 'screen_name', 'sex', 'bdate' (birthdate), 'city', 'country', 'timezone', 'photo', 'photo_medium', 'photo_big', 'has_mobile', 'contacts', 'education', 'online', 'counters', 'relation', 'last_seen', 'activity', 'can_write_private_message', 'can_see_all_posts', 'can_post', 'universities', (only if return_friends - 1)\n :param name_case: - Case for declension of user name and surname: 'nom' — nominative (default),, 'gen' — genitive,, 'dat' — dative,, 'acc' — accusative,, 'ins' — instrumental,, 'abl' — prepositional. (only if 'return_friends' = '1')\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('get', params))
if return_raw_response:
return raw_result
result = AppsGetResponse(**raw_result)
return result
|
:param app_id: - Application ID
:param app_ids: - List of application ID
:param platform: - platform. Possible values: *'ios' — iOS,, *'android' — Android,, *'winphone' — Windows Phone,, *'web' — приложения на vk.com. By default: 'web'.
:param extended:
:param return_friends:
:param fields: - Profile fields to return. Sample values: 'nickname', 'screen_name', 'sex', 'bdate' (birthdate), 'city', 'country', 'timezone', 'photo', 'photo_medium', 'photo_big', 'has_mobile', 'contacts', 'education', 'online', 'counters', 'relation', 'last_seen', 'activity', 'can_write_private_message', 'can_see_all_posts', 'can_post', 'universities', (only if return_friends - 1)
:param name_case: - Case for declension of user name and surname: 'nom' — nominative (default),, 'gen' — genitive,, 'dat' — dative,, 'acc' — accusative,, 'ins' — instrumental,, 'abl' — prepositional. (only if 'return_friends' = '1')
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
get
|
amishakov/vkwave
| 222
|
python
|
async def get(self, return_raw_response: bool=False, app_id: typing.Optional[int]=None, app_ids: typing.Optional[typing.List[str]]=None, platform: typing.Optional[str]=None, extended: typing.Optional[BaseBoolInt]=None, return_friends: typing.Optional[bool]=None, fields: typing.Optional[typing.List[UsersFields]]=None, name_case: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetResponse)]:
"\n :param app_id: - Application ID\n :param app_ids: - List of application ID\n :param platform: - platform. Possible values: *'ios' — iOS,, *'android' — Android,, *'winphone' — Windows Phone,, *'web' — приложения на vk.com. By default: 'web'.\n :param extended:\n :param return_friends:\n :param fields: - Profile fields to return. Sample values: 'nickname', 'screen_name', 'sex', 'bdate' (birthdate), 'city', 'country', 'timezone', 'photo', 'photo_medium', 'photo_big', 'has_mobile', 'contacts', 'education', 'online', 'counters', 'relation', 'last_seen', 'activity', 'can_write_private_message', 'can_see_all_posts', 'can_post', 'universities', (only if return_friends - 1)\n :param name_case: - Case for declension of user name and surname: 'nom' — nominative (default),, 'gen' — genitive,, 'dat' — dative,, 'acc' — accusative,, 'ins' — instrumental,, 'abl' — prepositional. (only if 'return_friends' = '1')\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('get', params))
if return_raw_response:
return raw_result
result = AppsGetResponse(**raw_result)
return result
|
async def get(self, return_raw_response: bool=False, app_id: typing.Optional[int]=None, app_ids: typing.Optional[typing.List[str]]=None, platform: typing.Optional[str]=None, extended: typing.Optional[BaseBoolInt]=None, return_friends: typing.Optional[bool]=None, fields: typing.Optional[typing.List[UsersFields]]=None, name_case: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetResponse)]:
"\n :param app_id: - Application ID\n :param app_ids: - List of application ID\n :param platform: - platform. Possible values: *'ios' — iOS,, *'android' — Android,, *'winphone' — Windows Phone,, *'web' — приложения на vk.com. By default: 'web'.\n :param extended:\n :param return_friends:\n :param fields: - Profile fields to return. Sample values: 'nickname', 'screen_name', 'sex', 'bdate' (birthdate), 'city', 'country', 'timezone', 'photo', 'photo_medium', 'photo_big', 'has_mobile', 'contacts', 'education', 'online', 'counters', 'relation', 'last_seen', 'activity', 'can_write_private_message', 'can_see_all_posts', 'can_post', 'universities', (only if return_friends - 1)\n :param name_case: - Case for declension of user name and surname: 'nom' — nominative (default),, 'gen' — genitive,, 'dat' — dative,, 'acc' — accusative,, 'ins' — instrumental,, 'abl' — prepositional. (only if 'return_friends' = '1')\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('get', params))
if return_raw_response:
return raw_result
result = AppsGetResponse(**raw_result)
return result<|docstring|>:param app_id: - Application ID
:param app_ids: - List of application ID
:param platform: - platform. Possible values: *'ios' — iOS,, *'android' — Android,, *'winphone' — Windows Phone,, *'web' — приложения на vk.com. By default: 'web'.
:param extended:
:param return_friends:
:param fields: - Profile fields to return. Sample values: 'nickname', 'screen_name', 'sex', 'bdate' (birthdate), 'city', 'country', 'timezone', 'photo', 'photo_medium', 'photo_big', 'has_mobile', 'contacts', 'education', 'online', 'counters', 'relation', 'last_seen', 'activity', 'can_write_private_message', 'can_see_all_posts', 'can_post', 'universities', (only if return_friends - 1)
:param name_case: - Case for declension of user name and surname: 'nom' — nominative (default),, 'gen' — genitive,, 'dat' — dative,, 'acc' — accusative,, 'ins' — instrumental,, 'abl' — prepositional. (only if 'return_friends' = '1')
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
a161772defe2aa6212cfc2033e4f14a972b6e31c6134b33174d6eec652f48dcb
|
async def get_catalog(self, count: int, return_raw_response: bool=False, sort: typing.Optional[str]=None, offset: typing.Optional[int]=None, platform: typing.Optional[str]=None, extended: typing.Optional[BaseBoolInt]=None, return_friends: typing.Optional[bool]=None, fields: typing.Optional[typing.List[UsersFields]]=None, name_case: typing.Optional[str]=None, q: typing.Optional[str]=None, genre_id: typing.Optional[int]=None, filter: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetCatalogResponse)]:
"\n :param sort: - Sort order: 'popular_today' — popular for one day (default), 'visitors' — by visitors number , 'create_date' — by creation date, 'growth_rate' — by growth rate, 'popular_week' — popular for one week\n :param offset: - Offset required to return a specific subset of apps.\n :param count: - Number of apps to return.\n :param platform:\n :param extended: - '1' — to return additional fields 'screenshots', 'MAU', 'catalog_position', and 'international'. If set, 'count' must be less than or equal to '100'. '0' — not to return additional fields (default).\n :param return_friends:\n :param fields:\n :param name_case:\n :param q: - Search query string.\n :param genre_id:\n :param filter: - 'installed' — to return list of installed apps (only for mobile platform).\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getCatalog', params))
if return_raw_response:
return raw_result
result = AppsGetCatalogResponse(**raw_result)
return result
|
:param sort: - Sort order: 'popular_today' — popular for one day (default), 'visitors' — by visitors number , 'create_date' — by creation date, 'growth_rate' — by growth rate, 'popular_week' — popular for one week
:param offset: - Offset required to return a specific subset of apps.
:param count: - Number of apps to return.
:param platform:
:param extended: - '1' — to return additional fields 'screenshots', 'MAU', 'catalog_position', and 'international'. If set, 'count' must be less than or equal to '100'. '0' — not to return additional fields (default).
:param return_friends:
:param fields:
:param name_case:
:param q: - Search query string.
:param genre_id:
:param filter: - 'installed' — to return list of installed apps (only for mobile platform).
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
get_catalog
|
amishakov/vkwave
| 222
|
python
|
async def get_catalog(self, count: int, return_raw_response: bool=False, sort: typing.Optional[str]=None, offset: typing.Optional[int]=None, platform: typing.Optional[str]=None, extended: typing.Optional[BaseBoolInt]=None, return_friends: typing.Optional[bool]=None, fields: typing.Optional[typing.List[UsersFields]]=None, name_case: typing.Optional[str]=None, q: typing.Optional[str]=None, genre_id: typing.Optional[int]=None, filter: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetCatalogResponse)]:
"\n :param sort: - Sort order: 'popular_today' — popular for one day (default), 'visitors' — by visitors number , 'create_date' — by creation date, 'growth_rate' — by growth rate, 'popular_week' — popular for one week\n :param offset: - Offset required to return a specific subset of apps.\n :param count: - Number of apps to return.\n :param platform:\n :param extended: - '1' — to return additional fields 'screenshots', 'MAU', 'catalog_position', and 'international'. If set, 'count' must be less than or equal to '100'. '0' — not to return additional fields (default).\n :param return_friends:\n :param fields:\n :param name_case:\n :param q: - Search query string.\n :param genre_id:\n :param filter: - 'installed' — to return list of installed apps (only for mobile platform).\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getCatalog', params))
if return_raw_response:
return raw_result
result = AppsGetCatalogResponse(**raw_result)
return result
|
async def get_catalog(self, count: int, return_raw_response: bool=False, sort: typing.Optional[str]=None, offset: typing.Optional[int]=None, platform: typing.Optional[str]=None, extended: typing.Optional[BaseBoolInt]=None, return_friends: typing.Optional[bool]=None, fields: typing.Optional[typing.List[UsersFields]]=None, name_case: typing.Optional[str]=None, q: typing.Optional[str]=None, genre_id: typing.Optional[int]=None, filter: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetCatalogResponse)]:
"\n :param sort: - Sort order: 'popular_today' — popular for one day (default), 'visitors' — by visitors number , 'create_date' — by creation date, 'growth_rate' — by growth rate, 'popular_week' — popular for one week\n :param offset: - Offset required to return a specific subset of apps.\n :param count: - Number of apps to return.\n :param platform:\n :param extended: - '1' — to return additional fields 'screenshots', 'MAU', 'catalog_position', and 'international'. If set, 'count' must be less than or equal to '100'. '0' — not to return additional fields (default).\n :param return_friends:\n :param fields:\n :param name_case:\n :param q: - Search query string.\n :param genre_id:\n :param filter: - 'installed' — to return list of installed apps (only for mobile platform).\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getCatalog', params))
if return_raw_response:
return raw_result
result = AppsGetCatalogResponse(**raw_result)
return result<|docstring|>:param sort: - Sort order: 'popular_today' — popular for one day (default), 'visitors' — by visitors number , 'create_date' — by creation date, 'growth_rate' — by growth rate, 'popular_week' — popular for one week
:param offset: - Offset required to return a specific subset of apps.
:param count: - Number of apps to return.
:param platform:
:param extended: - '1' — to return additional fields 'screenshots', 'MAU', 'catalog_position', and 'international'. If set, 'count' must be less than or equal to '100'. '0' — not to return additional fields (default).
:param return_friends:
:param fields:
:param name_case:
:param q: - Search query string.
:param genre_id:
:param filter: - 'installed' — to return list of installed apps (only for mobile platform).
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
0f5af28c3f315884c9aff979c7ccf6aa7f64c02a2f5fb0da8eac629dade0cdb9
|
async def get_friends_list(self, return_raw_response: bool=False, extended: typing.Optional[BaseBoolInt]=None, count: typing.Optional[int]=None, offset: typing.Optional[int]=None, type: typing.Optional[str]=None, fields: typing.Optional[typing.List[UsersFields]]=None) -> typing.Union[(dict, AppsGetFriendsListResponse)]:
"\n :param extended:\n :param count: - List size.\n :param offset:\n :param type: - List type. Possible values: * 'invite' — available for invites (don't play the game),, * 'request' — available for request (play the game). By default: 'invite'.\n :param fields: - Additional profile fields, see [vk.com/dev/fields|description].\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getFriendsList', params))
if return_raw_response:
return raw_result
result = AppsGetFriendsListResponse(**raw_result)
return result
|
:param extended:
:param count: - List size.
:param offset:
:param type: - List type. Possible values: * 'invite' — available for invites (don't play the game),, * 'request' — available for request (play the game). By default: 'invite'.
:param fields: - Additional profile fields, see [vk.com/dev/fields|description].
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
get_friends_list
|
amishakov/vkwave
| 222
|
python
|
async def get_friends_list(self, return_raw_response: bool=False, extended: typing.Optional[BaseBoolInt]=None, count: typing.Optional[int]=None, offset: typing.Optional[int]=None, type: typing.Optional[str]=None, fields: typing.Optional[typing.List[UsersFields]]=None) -> typing.Union[(dict, AppsGetFriendsListResponse)]:
"\n :param extended:\n :param count: - List size.\n :param offset:\n :param type: - List type. Possible values: * 'invite' — available for invites (don't play the game),, * 'request' — available for request (play the game). By default: 'invite'.\n :param fields: - Additional profile fields, see [vk.com/dev/fields|description].\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getFriendsList', params))
if return_raw_response:
return raw_result
result = AppsGetFriendsListResponse(**raw_result)
return result
|
async def get_friends_list(self, return_raw_response: bool=False, extended: typing.Optional[BaseBoolInt]=None, count: typing.Optional[int]=None, offset: typing.Optional[int]=None, type: typing.Optional[str]=None, fields: typing.Optional[typing.List[UsersFields]]=None) -> typing.Union[(dict, AppsGetFriendsListResponse)]:
"\n :param extended:\n :param count: - List size.\n :param offset:\n :param type: - List type. Possible values: * 'invite' — available for invites (don't play the game),, * 'request' — available for request (play the game). By default: 'invite'.\n :param fields: - Additional profile fields, see [vk.com/dev/fields|description].\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getFriendsList', params))
if return_raw_response:
return raw_result
result = AppsGetFriendsListResponse(**raw_result)
return result<|docstring|>:param extended:
:param count: - List size.
:param offset:
:param type: - List type. Possible values: * 'invite' — available for invites (don't play the game),, * 'request' — available for request (play the game). By default: 'invite'.
:param fields: - Additional profile fields, see [vk.com/dev/fields|description].
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
5866836ddb22032fc35bbd63eb4583e60b2eb73fc2cf06be1e84acdd7c7ccc2a
|
async def get_leaderboard(self, type: str, return_raw_response: bool=False, global_: typing.Optional[BaseBoolInt]=None, extended: typing.Optional[BaseBoolInt]=None) -> typing.Union[(dict, AppsGetLeaderboardResponse, AppsGetLeaderboardExtendedResponse)]:
"\n :param type: - Leaderboard type. Possible values: *'level' — by level,, *'points' — by mission points,, *'score' — by score ().\n :param global_: - Rating type. Possible values: *'1' — global rating among all players,, *'0' — rating among user friends.\n :param extended: - 1 — to return additional info about users\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getLeaderboard', params))
if return_raw_response:
return raw_result
result = (AppsGetLeaderboardResponse(**raw_result) if (not extended) else AppsGetLeaderboardExtendedResponse(**raw_result))
return result
|
:param type: - Leaderboard type. Possible values: *'level' — by level,, *'points' — by mission points,, *'score' — by score ().
:param global_: - Rating type. Possible values: *'1' — global rating among all players,, *'0' — rating among user friends.
:param extended: - 1 — to return additional info about users
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
get_leaderboard
|
amishakov/vkwave
| 222
|
python
|
async def get_leaderboard(self, type: str, return_raw_response: bool=False, global_: typing.Optional[BaseBoolInt]=None, extended: typing.Optional[BaseBoolInt]=None) -> typing.Union[(dict, AppsGetLeaderboardResponse, AppsGetLeaderboardExtendedResponse)]:
"\n :param type: - Leaderboard type. Possible values: *'level' — by level,, *'points' — by mission points,, *'score' — by score ().\n :param global_: - Rating type. Possible values: *'1' — global rating among all players,, *'0' — rating among user friends.\n :param extended: - 1 — to return additional info about users\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getLeaderboard', params))
if return_raw_response:
return raw_result
result = (AppsGetLeaderboardResponse(**raw_result) if (not extended) else AppsGetLeaderboardExtendedResponse(**raw_result))
return result
|
async def get_leaderboard(self, type: str, return_raw_response: bool=False, global_: typing.Optional[BaseBoolInt]=None, extended: typing.Optional[BaseBoolInt]=None) -> typing.Union[(dict, AppsGetLeaderboardResponse, AppsGetLeaderboardExtendedResponse)]:
"\n :param type: - Leaderboard type. Possible values: *'level' — by level,, *'points' — by mission points,, *'score' — by score ().\n :param global_: - Rating type. Possible values: *'1' — global rating among all players,, *'0' — rating among user friends.\n :param extended: - 1 — to return additional info about users\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('getLeaderboard', params))
if return_raw_response:
return raw_result
result = (AppsGetLeaderboardResponse(**raw_result) if (not extended) else AppsGetLeaderboardExtendedResponse(**raw_result))
return result<|docstring|>:param type: - Leaderboard type. Possible values: *'level' — by level,, *'points' — by mission points,, *'score' — by score ().
:param global_: - Rating type. Possible values: *'1' — global rating among all players,, *'0' — rating among user friends.
:param extended: - 1 — to return additional info about users
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
f251cd050ef8a56cb22a37ef3a89bc84321a5cc070b7b62dbe11a8570f47ea61
|
async def get_scopes(self, return_raw_response: bool=False, type: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetScopesResponse)]:
'\n :param type:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('getScopes', params))
if return_raw_response:
return raw_result
result = AppsGetScopesResponse(**raw_result)
return result
|
:param type:
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
get_scopes
|
amishakov/vkwave
| 222
|
python
|
async def get_scopes(self, return_raw_response: bool=False, type: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetScopesResponse)]:
'\n :param type:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('getScopes', params))
if return_raw_response:
return raw_result
result = AppsGetScopesResponse(**raw_result)
return result
|
async def get_scopes(self, return_raw_response: bool=False, type: typing.Optional[str]=None) -> typing.Union[(dict, AppsGetScopesResponse)]:
'\n :param type:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('getScopes', params))
if return_raw_response:
return raw_result
result = AppsGetScopesResponse(**raw_result)
return result<|docstring|>:param type:
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
af774bc5c0d47ab6415887996aa7c38478648fe568e85819082d98b4c48dacec
|
async def get_score(self, user_id: int, return_raw_response: bool=False) -> typing.Union[(dict, AppsGetScoreResponse)]:
'\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('getScore', params))
if return_raw_response:
return raw_result
result = AppsGetScoreResponse(**raw_result)
return result
|
:param user_id:
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
get_score
|
amishakov/vkwave
| 222
|
python
|
async def get_score(self, user_id: int, return_raw_response: bool=False) -> typing.Union[(dict, AppsGetScoreResponse)]:
'\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('getScore', params))
if return_raw_response:
return raw_result
result = AppsGetScoreResponse(**raw_result)
return result
|
async def get_score(self, user_id: int, return_raw_response: bool=False) -> typing.Union[(dict, AppsGetScoreResponse)]:
'\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('getScore', params))
if return_raw_response:
return raw_result
result = AppsGetScoreResponse(**raw_result)
return result<|docstring|>:param user_id:
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
3bfb62af4ada15a2323dbe1cf1ad8e4b5fda0bcf444623bdb27a917e99aa1784
|
async def promo_has_active_gift(self, promo_id: int, return_raw_response: bool=False, user_id: typing.Optional[int]=None) -> typing.Union[(dict, BaseBoolResponse)]:
'\n :param promo_id: - Id of game promo action\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('promoHasActiveGift', params))
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
|
:param promo_id: - Id of game promo action
:param user_id:
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
promo_has_active_gift
|
amishakov/vkwave
| 222
|
python
|
async def promo_has_active_gift(self, promo_id: int, return_raw_response: bool=False, user_id: typing.Optional[int]=None) -> typing.Union[(dict, BaseBoolResponse)]:
'\n :param promo_id: - Id of game promo action\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('promoHasActiveGift', params))
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
|
async def promo_has_active_gift(self, promo_id: int, return_raw_response: bool=False, user_id: typing.Optional[int]=None) -> typing.Union[(dict, BaseBoolResponse)]:
'\n :param promo_id: - Id of game promo action\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('promoHasActiveGift', params))
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result<|docstring|>:param promo_id: - Id of game promo action
:param user_id:
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
8cf2ef52ed2c5fbcff6c1a64134cacbd48b46f2c2c14f090ba6ef2189529b56e
|
async def promo_use_gift(self, promo_id: int, return_raw_response: bool=False, user_id: typing.Optional[int]=None) -> typing.Union[(dict, BaseBoolResponse)]:
'\n :param promo_id: - Id of game promo action\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('promoUseGift', params))
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
|
:param promo_id: - Id of game promo action
:param user_id:
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
promo_use_gift
|
amishakov/vkwave
| 222
|
python
|
async def promo_use_gift(self, promo_id: int, return_raw_response: bool=False, user_id: typing.Optional[int]=None) -> typing.Union[(dict, BaseBoolResponse)]:
'\n :param promo_id: - Id of game promo action\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('promoUseGift', params))
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
|
async def promo_use_gift(self, promo_id: int, return_raw_response: bool=False, user_id: typing.Optional[int]=None) -> typing.Union[(dict, BaseBoolResponse)]:
'\n :param promo_id: - Id of game promo action\n :param user_id:\n :param return_raw_response: - return result at dict\n :return:\n '
params = get_params(locals())
raw_result = (await self.api_request('promoUseGift', params))
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result<|docstring|>:param promo_id: - Id of game promo action
:param user_id:
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
293a2cb77b0e81e62f9e5a57b1ceea3f2696d21f5fa07004e4c07ccb942034b8
|
async def send_request(self, user_id: int, return_raw_response: bool=False, text: typing.Optional[str]=None, type: typing.Optional[str]=None, name: typing.Optional[str]=None, key: typing.Optional[str]=None, separate: typing.Optional[bool]=None) -> typing.Union[(dict, AppsSendRequestResponse)]:
"\n :param user_id: - id of the user to send a request\n :param text: - request text\n :param type: - request type. Values: 'invite' – if the request is sent to a user who does not have the app installed,, 'request' – if a user has already installed the app\n :param name:\n :param key: - special string key to be sent with the request\n :param separate:\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('sendRequest', params))
if return_raw_response:
return raw_result
result = AppsSendRequestResponse(**raw_result)
return result
|
:param user_id: - id of the user to send a request
:param text: - request text
:param type: - request type. Values: 'invite' – if the request is sent to a user who does not have the app installed,, 'request' – if a user has already installed the app
:param name:
:param key: - special string key to be sent with the request
:param separate:
:param return_raw_response: - return result at dict
:return:
|
vkwave/api/methods/apps.py
|
send_request
|
amishakov/vkwave
| 222
|
python
|
async def send_request(self, user_id: int, return_raw_response: bool=False, text: typing.Optional[str]=None, type: typing.Optional[str]=None, name: typing.Optional[str]=None, key: typing.Optional[str]=None, separate: typing.Optional[bool]=None) -> typing.Union[(dict, AppsSendRequestResponse)]:
"\n :param user_id: - id of the user to send a request\n :param text: - request text\n :param type: - request type. Values: 'invite' – if the request is sent to a user who does not have the app installed,, 'request' – if a user has already installed the app\n :param name:\n :param key: - special string key to be sent with the request\n :param separate:\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('sendRequest', params))
if return_raw_response:
return raw_result
result = AppsSendRequestResponse(**raw_result)
return result
|
async def send_request(self, user_id: int, return_raw_response: bool=False, text: typing.Optional[str]=None, type: typing.Optional[str]=None, name: typing.Optional[str]=None, key: typing.Optional[str]=None, separate: typing.Optional[bool]=None) -> typing.Union[(dict, AppsSendRequestResponse)]:
"\n :param user_id: - id of the user to send a request\n :param text: - request text\n :param type: - request type. Values: 'invite' – if the request is sent to a user who does not have the app installed,, 'request' – if a user has already installed the app\n :param name:\n :param key: - special string key to be sent with the request\n :param separate:\n :param return_raw_response: - return result at dict\n :return:\n "
params = get_params(locals())
raw_result = (await self.api_request('sendRequest', params))
if return_raw_response:
return raw_result
result = AppsSendRequestResponse(**raw_result)
return result<|docstring|>:param user_id: - id of the user to send a request
:param text: - request text
:param type: - request type. Values: 'invite' – if the request is sent to a user who does not have the app installed,, 'request' – if a user has already installed the app
:param name:
:param key: - special string key to be sent with the request
:param separate:
:param return_raw_response: - return result at dict
:return:<|endoftext|>
|
79454b9b5bc6cfd080e895e51f1037d0e274954bf8218aee7421cf31498abf52
|
def merge_chunks(chunk_results: List[Tuple[(Tuple[int], Any)]]) -> Any:
'\n Concatenate chunk results according to index.\n\n Parameters\n ----------\n chunk_results : list of tuple, {(chunk_idx, chunk_result), ...,}\n\n Returns\n -------\n Data\n '
from .dataframe.utils import is_dataframe, is_index, is_series, get_xdf
from .lib.groupby_wrapper import GroupByWrapper
from .tensor.array_utils import get_array_module, is_array
chunk_results = sorted(chunk_results, key=operator.itemgetter(0))
v = chunk_results[0][1]
if ((len(chunk_results) == 1) and (not chunk_results[0][0])):
return v
if is_array(v):
xp = get_array_module(v)
ndim = v.ndim
for i in range((ndim - 1)):
new_chunks = []
for (idx, cs) in itertools.groupby(chunk_results, key=(lambda t: t[0][:(- 1)])):
new_chunks.append((idx, xp.concatenate([c[1] for c in cs], axis=((ndim - i) - 1))))
chunk_results = new_chunks
to_concat = [c[1] for c in chunk_results]
if (len(to_concat) == 1):
return to_concat[0]
concat_result = xp.concatenate(to_concat)
return concat_result
elif is_dataframe(v):
xdf = get_xdf(v)
concats = []
for (_, cs) in itertools.groupby(chunk_results, key=(lambda t: t[0][0])):
concats.append(xdf.concat([c[1] for c in cs], axis='columns'))
return xdf.concat(concats, axis='index')
elif is_series(v):
xdf = get_xdf(v)
return xdf.concat([c[1] for c in chunk_results])
elif is_index(v):
xdf = get_xdf(v)
df = xdf.concat([xdf.DataFrame(index=r[1]) for r in chunk_results])
return df.index
elif isinstance(v, pd.Categorical):
categories = [r[1] for r in chunk_results]
arrays = [np.asarray(r) for r in categories]
array = np.concatenate(arrays)
return pd.Categorical(array, categories=categories[0].categories, ordered=categories[0].ordered)
elif isinstance(v, GroupByWrapper):
df = pd.concat([r[1].obj for r in chunk_results], axis=0)
if (not isinstance(v.keys, list)):
keys = v.keys
else:
keys = []
for (idx, k) in enumerate(v.keys):
if isinstance(k, pd.Series):
keys.append(pd.concat([r[1].keys[idx] for r in chunk_results]))
else:
keys.append(k)
grouped = GroupByWrapper(df, None, keys=keys, axis=v.axis, level=v.level, exclusions=v.exclusions, selection=v.selection, as_index=v.as_index, sort=v.sort, group_keys=v.group_keys, squeeze=v.squeeze, observed=v.observed, mutated=v.mutated)
return grouped.groupby_obj
elif isinstance(v, (str, bytes, memoryview, BaseEstimator)):
result = [r[1] for r in chunk_results]
if (len(result) == 1):
return result[0]
return result
else:
result = None
for cr in chunk_results:
if (cr[1] is None):
continue
if (isinstance(cr[1], dict) and (not cr[1])):
continue
if (result is None):
result = cr[1]
result = (result.item() if hasattr(result, 'item') else result)
else:
raise TypeError(f'unsupported type {type(v)}')
return result
|
Concatenate chunk results according to index.
Parameters
----------
chunk_results : list of tuple, {(chunk_idx, chunk_result), ...,}
Returns
-------
Data
|
mars/utils.py
|
merge_chunks
|
mars-project/mars
| 2,413
|
python
|
def merge_chunks(chunk_results: List[Tuple[(Tuple[int], Any)]]) -> Any:
'\n Concatenate chunk results according to index.\n\n Parameters\n ----------\n chunk_results : list of tuple, {(chunk_idx, chunk_result), ...,}\n\n Returns\n -------\n Data\n '
from .dataframe.utils import is_dataframe, is_index, is_series, get_xdf
from .lib.groupby_wrapper import GroupByWrapper
from .tensor.array_utils import get_array_module, is_array
chunk_results = sorted(chunk_results, key=operator.itemgetter(0))
v = chunk_results[0][1]
if ((len(chunk_results) == 1) and (not chunk_results[0][0])):
return v
if is_array(v):
xp = get_array_module(v)
ndim = v.ndim
for i in range((ndim - 1)):
new_chunks = []
for (idx, cs) in itertools.groupby(chunk_results, key=(lambda t: t[0][:(- 1)])):
new_chunks.append((idx, xp.concatenate([c[1] for c in cs], axis=((ndim - i) - 1))))
chunk_results = new_chunks
to_concat = [c[1] for c in chunk_results]
if (len(to_concat) == 1):
return to_concat[0]
concat_result = xp.concatenate(to_concat)
return concat_result
elif is_dataframe(v):
xdf = get_xdf(v)
concats = []
for (_, cs) in itertools.groupby(chunk_results, key=(lambda t: t[0][0])):
concats.append(xdf.concat([c[1] for c in cs], axis='columns'))
return xdf.concat(concats, axis='index')
elif is_series(v):
xdf = get_xdf(v)
return xdf.concat([c[1] for c in chunk_results])
elif is_index(v):
xdf = get_xdf(v)
df = xdf.concat([xdf.DataFrame(index=r[1]) for r in chunk_results])
return df.index
elif isinstance(v, pd.Categorical):
categories = [r[1] for r in chunk_results]
arrays = [np.asarray(r) for r in categories]
array = np.concatenate(arrays)
return pd.Categorical(array, categories=categories[0].categories, ordered=categories[0].ordered)
elif isinstance(v, GroupByWrapper):
df = pd.concat([r[1].obj for r in chunk_results], axis=0)
if (not isinstance(v.keys, list)):
keys = v.keys
else:
keys = []
for (idx, k) in enumerate(v.keys):
if isinstance(k, pd.Series):
keys.append(pd.concat([r[1].keys[idx] for r in chunk_results]))
else:
keys.append(k)
grouped = GroupByWrapper(df, None, keys=keys, axis=v.axis, level=v.level, exclusions=v.exclusions, selection=v.selection, as_index=v.as_index, sort=v.sort, group_keys=v.group_keys, squeeze=v.squeeze, observed=v.observed, mutated=v.mutated)
return grouped.groupby_obj
elif isinstance(v, (str, bytes, memoryview, BaseEstimator)):
result = [r[1] for r in chunk_results]
if (len(result) == 1):
return result[0]
return result
else:
result = None
for cr in chunk_results:
if (cr[1] is None):
continue
if (isinstance(cr[1], dict) and (not cr[1])):
continue
if (result is None):
result = cr[1]
result = (result.item() if hasattr(result, 'item') else result)
else:
raise TypeError(f'unsupported type {type(v)}')
return result
|
def merge_chunks(chunk_results: List[Tuple[(Tuple[int], Any)]]) -> Any:
'\n Concatenate chunk results according to index.\n\n Parameters\n ----------\n chunk_results : list of tuple, {(chunk_idx, chunk_result), ...,}\n\n Returns\n -------\n Data\n '
from .dataframe.utils import is_dataframe, is_index, is_series, get_xdf
from .lib.groupby_wrapper import GroupByWrapper
from .tensor.array_utils import get_array_module, is_array
chunk_results = sorted(chunk_results, key=operator.itemgetter(0))
v = chunk_results[0][1]
if ((len(chunk_results) == 1) and (not chunk_results[0][0])):
return v
if is_array(v):
xp = get_array_module(v)
ndim = v.ndim
for i in range((ndim - 1)):
new_chunks = []
for (idx, cs) in itertools.groupby(chunk_results, key=(lambda t: t[0][:(- 1)])):
new_chunks.append((idx, xp.concatenate([c[1] for c in cs], axis=((ndim - i) - 1))))
chunk_results = new_chunks
to_concat = [c[1] for c in chunk_results]
if (len(to_concat) == 1):
return to_concat[0]
concat_result = xp.concatenate(to_concat)
return concat_result
elif is_dataframe(v):
xdf = get_xdf(v)
concats = []
for (_, cs) in itertools.groupby(chunk_results, key=(lambda t: t[0][0])):
concats.append(xdf.concat([c[1] for c in cs], axis='columns'))
return xdf.concat(concats, axis='index')
elif is_series(v):
xdf = get_xdf(v)
return xdf.concat([c[1] for c in chunk_results])
elif is_index(v):
xdf = get_xdf(v)
df = xdf.concat([xdf.DataFrame(index=r[1]) for r in chunk_results])
return df.index
elif isinstance(v, pd.Categorical):
categories = [r[1] for r in chunk_results]
arrays = [np.asarray(r) for r in categories]
array = np.concatenate(arrays)
return pd.Categorical(array, categories=categories[0].categories, ordered=categories[0].ordered)
elif isinstance(v, GroupByWrapper):
df = pd.concat([r[1].obj for r in chunk_results], axis=0)
if (not isinstance(v.keys, list)):
keys = v.keys
else:
keys = []
for (idx, k) in enumerate(v.keys):
if isinstance(k, pd.Series):
keys.append(pd.concat([r[1].keys[idx] for r in chunk_results]))
else:
keys.append(k)
grouped = GroupByWrapper(df, None, keys=keys, axis=v.axis, level=v.level, exclusions=v.exclusions, selection=v.selection, as_index=v.as_index, sort=v.sort, group_keys=v.group_keys, squeeze=v.squeeze, observed=v.observed, mutated=v.mutated)
return grouped.groupby_obj
elif isinstance(v, (str, bytes, memoryview, BaseEstimator)):
result = [r[1] for r in chunk_results]
if (len(result) == 1):
return result[0]
return result
else:
result = None
for cr in chunk_results:
if (cr[1] is None):
continue
if (isinstance(cr[1], dict) and (not cr[1])):
continue
if (result is None):
result = cr[1]
result = (result.item() if hasattr(result, 'item') else result)
else:
raise TypeError(f'unsupported type {type(v)}')
return result<|docstring|>Concatenate chunk results according to index.
Parameters
----------
chunk_results : list of tuple, {(chunk_idx, chunk_result), ...,}
Returns
-------
Data<|endoftext|>
|
97fa69d035f5d6ed21073c2c5d924ab8419b30328663ad79fa00e54f835e107d
|
def calc_nsplits(chunk_idx_to_shape: Dict[(Tuple[int], Tuple[int])]) -> Tuple[Tuple[int]]:
"\n Calculate a tiled entity's nsplits.\n\n Parameters\n ----------\n chunk_idx_to_shape : Dict type, {chunk_idx: chunk_shape}\n\n Returns\n -------\n nsplits\n "
ndim = len(next(iter(chunk_idx_to_shape)))
tileable_nsplits = []
for i in range(ndim):
splits = []
for (index, shape) in chunk_idx_to_shape.items():
if all(((idx == 0) for (j, idx) in enumerate(index) if (j != i))):
splits.append(shape[i])
tileable_nsplits.append(tuple(splits))
return tuple(tileable_nsplits)
|
Calculate a tiled entity's nsplits.
Parameters
----------
chunk_idx_to_shape : Dict type, {chunk_idx: chunk_shape}
Returns
-------
nsplits
|
mars/utils.py
|
calc_nsplits
|
mars-project/mars
| 2,413
|
python
|
def calc_nsplits(chunk_idx_to_shape: Dict[(Tuple[int], Tuple[int])]) -> Tuple[Tuple[int]]:
"\n Calculate a tiled entity's nsplits.\n\n Parameters\n ----------\n chunk_idx_to_shape : Dict type, {chunk_idx: chunk_shape}\n\n Returns\n -------\n nsplits\n "
ndim = len(next(iter(chunk_idx_to_shape)))
tileable_nsplits = []
for i in range(ndim):
splits = []
for (index, shape) in chunk_idx_to_shape.items():
if all(((idx == 0) for (j, idx) in enumerate(index) if (j != i))):
splits.append(shape[i])
tileable_nsplits.append(tuple(splits))
return tuple(tileable_nsplits)
|
def calc_nsplits(chunk_idx_to_shape: Dict[(Tuple[int], Tuple[int])]) -> Tuple[Tuple[int]]:
"\n Calculate a tiled entity's nsplits.\n\n Parameters\n ----------\n chunk_idx_to_shape : Dict type, {chunk_idx: chunk_shape}\n\n Returns\n -------\n nsplits\n "
ndim = len(next(iter(chunk_idx_to_shape)))
tileable_nsplits = []
for i in range(ndim):
splits = []
for (index, shape) in chunk_idx_to_shape.items():
if all(((idx == 0) for (j, idx) in enumerate(index) if (j != i))):
splits.append(shape[i])
tileable_nsplits.append(tuple(splits))
return tuple(tileable_nsplits)<|docstring|>Calculate a tiled entity's nsplits.
Parameters
----------
chunk_idx_to_shape : Dict type, {chunk_idx: chunk_shape}
Returns
-------
nsplits<|endoftext|>
|
ee43df024c0f57e83bdccfb016fcddecb9d52ecc1a6ab81c0df5c42914f3c98d
|
def sort_dataframe_result(df, result: pd.DataFrame) -> pd.DataFrame:
' sort DataFrame on client according to `should_be_monotonic` attribute '
if hasattr(df, 'index_value'):
if getattr(df.index_value, 'should_be_monotonic', False):
try:
result.sort_index(inplace=True)
except TypeError:
result = result.sort_index()
if hasattr(df, 'columns_value'):
if getattr(df.columns_value, 'should_be_monotonic', False):
try:
result.sort_index(axis=1, inplace=True)
except TypeError:
result = result.sort_index(axis=1)
return result
|
sort DataFrame on client according to `should_be_monotonic` attribute
|
mars/utils.py
|
sort_dataframe_result
|
mars-project/mars
| 2,413
|
python
|
def sort_dataframe_result(df, result: pd.DataFrame) -> pd.DataFrame:
' '
if hasattr(df, 'index_value'):
if getattr(df.index_value, 'should_be_monotonic', False):
try:
result.sort_index(inplace=True)
except TypeError:
result = result.sort_index()
if hasattr(df, 'columns_value'):
if getattr(df.columns_value, 'should_be_monotonic', False):
try:
result.sort_index(axis=1, inplace=True)
except TypeError:
result = result.sort_index(axis=1)
return result
|
def sort_dataframe_result(df, result: pd.DataFrame) -> pd.DataFrame:
' '
if hasattr(df, 'index_value'):
if getattr(df.index_value, 'should_be_monotonic', False):
try:
result.sort_index(inplace=True)
except TypeError:
result = result.sort_index()
if hasattr(df, 'columns_value'):
if getattr(df.columns_value, 'should_be_monotonic', False):
try:
result.sort_index(axis=1, inplace=True)
except TypeError:
result = result.sort_index(axis=1)
return result<|docstring|>sort DataFrame on client according to `should_be_monotonic` attribute<|endoftext|>
|
9c47cee7f5e30e371d3d515de0b30f141361a4b51a6be482f8fb1558240b176b
|
def flatten(nested_iterable: Union[(List, Tuple)]) -> List:
'\n Flatten a nested iterable into a list.\n\n Parameters\n ----------\n nested_iterable : list or tuple\n an iterable which can contain other iterables\n\n Returns\n -------\n flattened : list\n\n Examples\n --------\n >>> flatten([[0, 1], [2, 3]])\n [0, 1, 2, 3]\n >>> flatten([[0, 1], [[3], [4, 5]]])\n [0, 1, 3, 4, 5]\n '
flattened = []
stack = list(nested_iterable)[::(- 1)]
while (len(stack) > 0):
inp = stack.pop()
if isinstance(inp, (tuple, list)):
stack.extend(inp[::(- 1)])
else:
flattened.append(inp)
return flattened
|
Flatten a nested iterable into a list.
Parameters
----------
nested_iterable : list or tuple
an iterable which can contain other iterables
Returns
-------
flattened : list
Examples
--------
>>> flatten([[0, 1], [2, 3]])
[0, 1, 2, 3]
>>> flatten([[0, 1], [[3], [4, 5]]])
[0, 1, 3, 4, 5]
|
mars/utils.py
|
flatten
|
mars-project/mars
| 2,413
|
python
|
def flatten(nested_iterable: Union[(List, Tuple)]) -> List:
'\n Flatten a nested iterable into a list.\n\n Parameters\n ----------\n nested_iterable : list or tuple\n an iterable which can contain other iterables\n\n Returns\n -------\n flattened : list\n\n Examples\n --------\n >>> flatten([[0, 1], [2, 3]])\n [0, 1, 2, 3]\n >>> flatten([[0, 1], [[3], [4, 5]]])\n [0, 1, 3, 4, 5]\n '
flattened = []
stack = list(nested_iterable)[::(- 1)]
while (len(stack) > 0):
inp = stack.pop()
if isinstance(inp, (tuple, list)):
stack.extend(inp[::(- 1)])
else:
flattened.append(inp)
return flattened
|
def flatten(nested_iterable: Union[(List, Tuple)]) -> List:
'\n Flatten a nested iterable into a list.\n\n Parameters\n ----------\n nested_iterable : list or tuple\n an iterable which can contain other iterables\n\n Returns\n -------\n flattened : list\n\n Examples\n --------\n >>> flatten([[0, 1], [2, 3]])\n [0, 1, 2, 3]\n >>> flatten([[0, 1], [[3], [4, 5]]])\n [0, 1, 3, 4, 5]\n '
flattened = []
stack = list(nested_iterable)[::(- 1)]
while (len(stack) > 0):
inp = stack.pop()
if isinstance(inp, (tuple, list)):
stack.extend(inp[::(- 1)])
else:
flattened.append(inp)
return flattened<|docstring|>Flatten a nested iterable into a list.
Parameters
----------
nested_iterable : list or tuple
an iterable which can contain other iterables
Returns
-------
flattened : list
Examples
--------
>>> flatten([[0, 1], [2, 3]])
[0, 1, 2, 3]
>>> flatten([[0, 1], [[3], [4, 5]]])
[0, 1, 3, 4, 5]<|endoftext|>
|
155517db0be7158100c6b0974e4d3f7b437608f373e1476ee92a862d29898881
|
def stack_back(flattened: List, raw: Union[(List, Tuple)]) -> Union[(List, Tuple)]:
'\n Organize a new iterable from a flattened list according to raw iterable.\n\n Parameters\n ----------\n flattened : list\n flattened list\n raw: list\n raw iterable\n\n Returns\n -------\n ret : list\n\n Examples\n --------\n >>> raw = [[0, 1], [2, [3, 4]]]\n >>> flattened = flatten(raw)\n >>> flattened\n [0, 1, 2, 3, 4]\n >>> a = [f + 1 for f in flattened]\n >>> a\n [1, 2, 3, 4, 5]\n >>> stack_back(a, raw)\n [[1, 2], [3, [4, 5]]]\n '
flattened_iter = iter(flattened)
result = list()
def _stack(container, items):
for item in items:
if (not isinstance(item, (list, tuple))):
container.append(next(flattened_iter))
else:
new_container = list()
container.append(new_container)
_stack(new_container, item)
return container
return _stack(result, raw)
|
Organize a new iterable from a flattened list according to raw iterable.
Parameters
----------
flattened : list
flattened list
raw: list
raw iterable
Returns
-------
ret : list
Examples
--------
>>> raw = [[0, 1], [2, [3, 4]]]
>>> flattened = flatten(raw)
>>> flattened
[0, 1, 2, 3, 4]
>>> a = [f + 1 for f in flattened]
>>> a
[1, 2, 3, 4, 5]
>>> stack_back(a, raw)
[[1, 2], [3, [4, 5]]]
|
mars/utils.py
|
stack_back
|
mars-project/mars
| 2,413
|
python
|
def stack_back(flattened: List, raw: Union[(List, Tuple)]) -> Union[(List, Tuple)]:
'\n Organize a new iterable from a flattened list according to raw iterable.\n\n Parameters\n ----------\n flattened : list\n flattened list\n raw: list\n raw iterable\n\n Returns\n -------\n ret : list\n\n Examples\n --------\n >>> raw = [[0, 1], [2, [3, 4]]]\n >>> flattened = flatten(raw)\n >>> flattened\n [0, 1, 2, 3, 4]\n >>> a = [f + 1 for f in flattened]\n >>> a\n [1, 2, 3, 4, 5]\n >>> stack_back(a, raw)\n [[1, 2], [3, [4, 5]]]\n '
flattened_iter = iter(flattened)
result = list()
def _stack(container, items):
for item in items:
if (not isinstance(item, (list, tuple))):
container.append(next(flattened_iter))
else:
new_container = list()
container.append(new_container)
_stack(new_container, item)
return container
return _stack(result, raw)
|
def stack_back(flattened: List, raw: Union[(List, Tuple)]) -> Union[(List, Tuple)]:
'\n Organize a new iterable from a flattened list according to raw iterable.\n\n Parameters\n ----------\n flattened : list\n flattened list\n raw: list\n raw iterable\n\n Returns\n -------\n ret : list\n\n Examples\n --------\n >>> raw = [[0, 1], [2, [3, 4]]]\n >>> flattened = flatten(raw)\n >>> flattened\n [0, 1, 2, 3, 4]\n >>> a = [f + 1 for f in flattened]\n >>> a\n [1, 2, 3, 4, 5]\n >>> stack_back(a, raw)\n [[1, 2], [3, [4, 5]]]\n '
flattened_iter = iter(flattened)
result = list()
def _stack(container, items):
for item in items:
if (not isinstance(item, (list, tuple))):
container.append(next(flattened_iter))
else:
new_container = list()
container.append(new_container)
_stack(new_container, item)
return container
return _stack(result, raw)<|docstring|>Organize a new iterable from a flattened list according to raw iterable.
Parameters
----------
flattened : list
flattened list
raw: list
raw iterable
Returns
-------
ret : list
Examples
--------
>>> raw = [[0, 1], [2, [3, 4]]]
>>> flattened = flatten(raw)
>>> flattened
[0, 1, 2, 3, 4]
>>> a = [f + 1 for f in flattened]
>>> a
[1, 2, 3, 4, 5]
>>> stack_back(a, raw)
[[1, 2], [3, [4, 5]]]<|endoftext|>
|
7cb4ba399fec106aea5f51ba95266d2a6c55a20672917ac12196ff57596a9a04
|
def adapt_mars_docstring(doc: str) -> str:
'\n Adapt numpy-style docstrings to Mars docstring.\n\n This util function will add Mars imports, replace object references\n and add execute calls. Note that check is needed after replacement.\n '
if (doc is None):
return None
lines = []
first_prompt = True
prev_prompt = False
has_numpy = ('np.' in doc)
has_pandas = ('pd.' in doc)
for line in doc.splitlines():
sp = line.strip()
if (sp.startswith('>>>') or sp.startswith('...')):
prev_prompt = True
if first_prompt:
first_prompt = False
indent = ''.join(itertools.takewhile((lambda x: (x in (' ', '\t'))), line))
if has_numpy:
lines.extend([(indent + '>>> import mars.tensor as mt')])
if has_pandas:
lines.extend([(indent + '>>> import mars.dataframe as md')])
line = line.replace('np.', 'mt.').replace('pd.', 'md.')
elif prev_prompt:
prev_prompt = False
if sp:
lines[(- 1)] += '.execute()'
lines.append(line)
return '\n'.join(lines)
|
Adapt numpy-style docstrings to Mars docstring.
This util function will add Mars imports, replace object references
and add execute calls. Note that check is needed after replacement.
|
mars/utils.py
|
adapt_mars_docstring
|
mars-project/mars
| 2,413
|
python
|
def adapt_mars_docstring(doc: str) -> str:
'\n Adapt numpy-style docstrings to Mars docstring.\n\n This util function will add Mars imports, replace object references\n and add execute calls. Note that check is needed after replacement.\n '
if (doc is None):
return None
lines = []
first_prompt = True
prev_prompt = False
has_numpy = ('np.' in doc)
has_pandas = ('pd.' in doc)
for line in doc.splitlines():
sp = line.strip()
if (sp.startswith('>>>') or sp.startswith('...')):
prev_prompt = True
if first_prompt:
first_prompt = False
indent = .join(itertools.takewhile((lambda x: (x in (' ', '\t'))), line))
if has_numpy:
lines.extend([(indent + '>>> import mars.tensor as mt')])
if has_pandas:
lines.extend([(indent + '>>> import mars.dataframe as md')])
line = line.replace('np.', 'mt.').replace('pd.', 'md.')
elif prev_prompt:
prev_prompt = False
if sp:
lines[(- 1)] += '.execute()'
lines.append(line)
return '\n'.join(lines)
|
def adapt_mars_docstring(doc: str) -> str:
'\n Adapt numpy-style docstrings to Mars docstring.\n\n This util function will add Mars imports, replace object references\n and add execute calls. Note that check is needed after replacement.\n '
if (doc is None):
return None
lines = []
first_prompt = True
prev_prompt = False
has_numpy = ('np.' in doc)
has_pandas = ('pd.' in doc)
for line in doc.splitlines():
sp = line.strip()
if (sp.startswith('>>>') or sp.startswith('...')):
prev_prompt = True
if first_prompt:
first_prompt = False
indent = .join(itertools.takewhile((lambda x: (x in (' ', '\t'))), line))
if has_numpy:
lines.extend([(indent + '>>> import mars.tensor as mt')])
if has_pandas:
lines.extend([(indent + '>>> import mars.dataframe as md')])
line = line.replace('np.', 'mt.').replace('pd.', 'md.')
elif prev_prompt:
prev_prompt = False
if sp:
lines[(- 1)] += '.execute()'
lines.append(line)
return '\n'.join(lines)<|docstring|>Adapt numpy-style docstrings to Mars docstring.
This util function will add Mars imports, replace object references
and add execute calls. Note that check is needed after replacement.<|endoftext|>
|
8f3cbf9a06f959b6283f11279eaf7de4a298e878d91983aac483cd119b0b2343
|
@contextmanager
def quiet_stdio():
'Quiets standard outputs when inferring types of functions'
with _io_quiet_lock:
_io_quiet_local.is_wrapped = True
sys.stdout = _QuietIOWrapper(sys.stdout)
sys.stderr = _QuietIOWrapper(sys.stderr)
try:
(yield)
finally:
with _io_quiet_lock:
sys.stdout = sys.stdout.wrapped
sys.stderr = sys.stderr.wrapped
if (not isinstance(sys.stdout, _QuietIOWrapper)):
_io_quiet_local.is_wrapped = False
|
Quiets standard outputs when inferring types of functions
|
mars/utils.py
|
quiet_stdio
|
mars-project/mars
| 2,413
|
python
|
@contextmanager
def quiet_stdio():
with _io_quiet_lock:
_io_quiet_local.is_wrapped = True
sys.stdout = _QuietIOWrapper(sys.stdout)
sys.stderr = _QuietIOWrapper(sys.stderr)
try:
(yield)
finally:
with _io_quiet_lock:
sys.stdout = sys.stdout.wrapped
sys.stderr = sys.stderr.wrapped
if (not isinstance(sys.stdout, _QuietIOWrapper)):
_io_quiet_local.is_wrapped = False
|
@contextmanager
def quiet_stdio():
with _io_quiet_lock:
_io_quiet_local.is_wrapped = True
sys.stdout = _QuietIOWrapper(sys.stdout)
sys.stderr = _QuietIOWrapper(sys.stderr)
try:
(yield)
finally:
with _io_quiet_lock:
sys.stdout = sys.stdout.wrapped
sys.stderr = sys.stderr.wrapped
if (not isinstance(sys.stdout, _QuietIOWrapper)):
_io_quiet_local.is_wrapped = False<|docstring|>Quiets standard outputs when inferring types of functions<|endoftext|>
|
4e59293a14cc8418030ca8e1495e362381c029a24c037743d155fbf69fd85474
|
def stringify_path(path: Union[(str, os.PathLike)]) -> str:
'\n Convert *path* to a string or unicode path if possible.\n '
if isinstance(path, str):
return path
try:
return path.__fspath__()
except AttributeError:
raise TypeError('not a path-like object')
|
Convert *path* to a string or unicode path if possible.
|
mars/utils.py
|
stringify_path
|
mars-project/mars
| 2,413
|
python
|
def stringify_path(path: Union[(str, os.PathLike)]) -> str:
'\n \n '
if isinstance(path, str):
return path
try:
return path.__fspath__()
except AttributeError:
raise TypeError('not a path-like object')
|
def stringify_path(path: Union[(str, os.PathLike)]) -> str:
'\n \n '
if isinstance(path, str):
return path
try:
return path.__fspath__()
except AttributeError:
raise TypeError('not a path-like object')<|docstring|>Convert *path* to a string or unicode path if possible.<|endoftext|>
|
fdbfdfd43b9e215082278d08a664b5c515d2e5257279afc01bf755af56660e50
|
def register_asyncio_task_timeout_detector(check_interval: int=None, task_timeout_seconds: int=None, task_exclude_filters: List[str]=None) -> Optional[asyncio.Task]:
'Register a asyncio task which print timeout task periodically.'
check_interval = (check_interval or int(os.environ.get('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL', (- 1))))
if (check_interval > 0):
patch_asyncio_task_create_time()
task_timeout_seconds = (task_timeout_seconds or int(os.environ.get('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_SECONDS', check_interval)))
if (not task_exclude_filters):
task_exclude_filters = os.environ.get('MARS_DEBUG_ASYNCIO_TASK_EXCLUDE_FILTERS', 'mars/oscar')
task_exclude_filters = task_exclude_filters.split(';')
if (sys.version_info[:2] < (3, 7)):
logger.warning('asyncio tasks timeout detector is not supported under python %s', sys.version)
else:
loop = asyncio.get_running_loop()
logger.info('Create asyncio tasks timeout detector with check_interval %s task_timeout_seconds %s task_exclude_filters %s', check_interval, task_timeout_seconds, task_exclude_filters)
return loop.create_task(asyncio_task_timeout_detector(check_interval, task_timeout_seconds, task_exclude_filters))
else:
return None
|
Register a asyncio task which print timeout task periodically.
|
mars/utils.py
|
register_asyncio_task_timeout_detector
|
mars-project/mars
| 2,413
|
python
|
def register_asyncio_task_timeout_detector(check_interval: int=None, task_timeout_seconds: int=None, task_exclude_filters: List[str]=None) -> Optional[asyncio.Task]:
check_interval = (check_interval or int(os.environ.get('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL', (- 1))))
if (check_interval > 0):
patch_asyncio_task_create_time()
task_timeout_seconds = (task_timeout_seconds or int(os.environ.get('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_SECONDS', check_interval)))
if (not task_exclude_filters):
task_exclude_filters = os.environ.get('MARS_DEBUG_ASYNCIO_TASK_EXCLUDE_FILTERS', 'mars/oscar')
task_exclude_filters = task_exclude_filters.split(';')
if (sys.version_info[:2] < (3, 7)):
logger.warning('asyncio tasks timeout detector is not supported under python %s', sys.version)
else:
loop = asyncio.get_running_loop()
logger.info('Create asyncio tasks timeout detector with check_interval %s task_timeout_seconds %s task_exclude_filters %s', check_interval, task_timeout_seconds, task_exclude_filters)
return loop.create_task(asyncio_task_timeout_detector(check_interval, task_timeout_seconds, task_exclude_filters))
else:
return None
|
def register_asyncio_task_timeout_detector(check_interval: int=None, task_timeout_seconds: int=None, task_exclude_filters: List[str]=None) -> Optional[asyncio.Task]:
check_interval = (check_interval or int(os.environ.get('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_CHECK_INTERVAL', (- 1))))
if (check_interval > 0):
patch_asyncio_task_create_time()
task_timeout_seconds = (task_timeout_seconds or int(os.environ.get('MARS_DEBUG_ASYNCIO_TASK_TIMEOUT_SECONDS', check_interval)))
if (not task_exclude_filters):
task_exclude_filters = os.environ.get('MARS_DEBUG_ASYNCIO_TASK_EXCLUDE_FILTERS', 'mars/oscar')
task_exclude_filters = task_exclude_filters.split(';')
if (sys.version_info[:2] < (3, 7)):
logger.warning('asyncio tasks timeout detector is not supported under python %s', sys.version)
else:
loop = asyncio.get_running_loop()
logger.info('Create asyncio tasks timeout detector with check_interval %s task_timeout_seconds %s task_exclude_filters %s', check_interval, task_timeout_seconds, task_exclude_filters)
return loop.create_task(asyncio_task_timeout_detector(check_interval, task_timeout_seconds, task_exclude_filters))
else:
return None<|docstring|>Register a asyncio task which print timeout task periodically.<|endoftext|>
|
631075caceba1dc521921f30a8a25365213cc780a0a1ee14dc85f200495bffff
|
def merge_dict(dest: Dict, src: Dict, path=None, overwrite=True):
'\n Merges src dict into dest dict.\n\n Parameters\n ----------\n dest: Dict\n dest dict\n src: Dict\n source dict\n path: List\n merge path\n overwrite: bool\n Whether overwrite dest dict when where is a conflict\n Returns\n -------\n Dict\n Updated dest dict\n '
if (path is None):
path = []
for key in src:
if (key in dest):
if (isinstance(dest[key], Dict) and isinstance(src[key], Dict)):
merge_dict(dest[key], src[key], (path + [str(key)]), overwrite=overwrite)
elif (dest[key] == src[key]):
pass
elif overwrite:
dest[key] = src[key]
else:
raise ValueError(('Conflict at %s' % '.'.join((path + [str(key)]))))
else:
dest[key] = src[key]
return dest
|
Merges src dict into dest dict.
Parameters
----------
dest: Dict
dest dict
src: Dict
source dict
path: List
merge path
overwrite: bool
Whether overwrite dest dict when where is a conflict
Returns
-------
Dict
Updated dest dict
|
mars/utils.py
|
merge_dict
|
mars-project/mars
| 2,413
|
python
|
def merge_dict(dest: Dict, src: Dict, path=None, overwrite=True):
'\n Merges src dict into dest dict.\n\n Parameters\n ----------\n dest: Dict\n dest dict\n src: Dict\n source dict\n path: List\n merge path\n overwrite: bool\n Whether overwrite dest dict when where is a conflict\n Returns\n -------\n Dict\n Updated dest dict\n '
if (path is None):
path = []
for key in src:
if (key in dest):
if (isinstance(dest[key], Dict) and isinstance(src[key], Dict)):
merge_dict(dest[key], src[key], (path + [str(key)]), overwrite=overwrite)
elif (dest[key] == src[key]):
pass
elif overwrite:
dest[key] = src[key]
else:
raise ValueError(('Conflict at %s' % '.'.join((path + [str(key)]))))
else:
dest[key] = src[key]
return dest
|
def merge_dict(dest: Dict, src: Dict, path=None, overwrite=True):
'\n Merges src dict into dest dict.\n\n Parameters\n ----------\n dest: Dict\n dest dict\n src: Dict\n source dict\n path: List\n merge path\n overwrite: bool\n Whether overwrite dest dict when where is a conflict\n Returns\n -------\n Dict\n Updated dest dict\n '
if (path is None):
path = []
for key in src:
if (key in dest):
if (isinstance(dest[key], Dict) and isinstance(src[key], Dict)):
merge_dict(dest[key], src[key], (path + [str(key)]), overwrite=overwrite)
elif (dest[key] == src[key]):
pass
elif overwrite:
dest[key] = src[key]
else:
raise ValueError(('Conflict at %s' % '.'.join((path + [str(key)]))))
else:
dest[key] = src[key]
return dest<|docstring|>Merges src dict into dest dict.
Parameters
----------
dest: Dict
dest dict
src: Dict
source dict
path: List
merge path
overwrite: bool
Whether overwrite dest dict when where is a conflict
Returns
-------
Dict
Updated dest dict<|endoftext|>
|
139ba6cec2351040cb93ea3c55aca72602de1795c079c1f871e9c5ff518a6e03
|
def flatten_dict_to_nested_dict(flatten_dict: Dict, sep='.') -> Dict:
'\n Return nested dict from flatten dict.\n\n Parameters\n ----------\n flatten_dict: Dict\n sep: str\n flatten key separator\n\n Returns\n -------\n Dict\n Nested dict\n '
assert all((isinstance(k, str) for k in flatten_dict.keys()))
nested_dict = dict()
keys = sorted(flatten_dict.keys(), key=(lambda k: (- len(k.split(sep)))))
for k in keys:
sub_keys = k.split(sep)
sub_nested_dict = nested_dict
for (i, sub_key) in enumerate(sub_keys):
if (i == (len(sub_keys) - 1)):
if (sub_key in sub_nested_dict):
raise ValueError(f'Key {k} conflict in sub key {sub_key}.')
sub_nested_dict[sub_key] = flatten_dict[k]
elif (sub_key not in sub_nested_dict):
new_sub_nested_dict = dict()
sub_nested_dict[sub_key] = new_sub_nested_dict
sub_nested_dict = new_sub_nested_dict
else:
sub_nested_dict = sub_nested_dict[sub_key]
return nested_dict
|
Return nested dict from flatten dict.
Parameters
----------
flatten_dict: Dict
sep: str
flatten key separator
Returns
-------
Dict
Nested dict
|
mars/utils.py
|
flatten_dict_to_nested_dict
|
mars-project/mars
| 2,413
|
python
|
def flatten_dict_to_nested_dict(flatten_dict: Dict, sep='.') -> Dict:
'\n Return nested dict from flatten dict.\n\n Parameters\n ----------\n flatten_dict: Dict\n sep: str\n flatten key separator\n\n Returns\n -------\n Dict\n Nested dict\n '
assert all((isinstance(k, str) for k in flatten_dict.keys()))
nested_dict = dict()
keys = sorted(flatten_dict.keys(), key=(lambda k: (- len(k.split(sep)))))
for k in keys:
sub_keys = k.split(sep)
sub_nested_dict = nested_dict
for (i, sub_key) in enumerate(sub_keys):
if (i == (len(sub_keys) - 1)):
if (sub_key in sub_nested_dict):
raise ValueError(f'Key {k} conflict in sub key {sub_key}.')
sub_nested_dict[sub_key] = flatten_dict[k]
elif (sub_key not in sub_nested_dict):
new_sub_nested_dict = dict()
sub_nested_dict[sub_key] = new_sub_nested_dict
sub_nested_dict = new_sub_nested_dict
else:
sub_nested_dict = sub_nested_dict[sub_key]
return nested_dict
|
def flatten_dict_to_nested_dict(flatten_dict: Dict, sep='.') -> Dict:
'\n Return nested dict from flatten dict.\n\n Parameters\n ----------\n flatten_dict: Dict\n sep: str\n flatten key separator\n\n Returns\n -------\n Dict\n Nested dict\n '
assert all((isinstance(k, str) for k in flatten_dict.keys()))
nested_dict = dict()
keys = sorted(flatten_dict.keys(), key=(lambda k: (- len(k.split(sep)))))
for k in keys:
sub_keys = k.split(sep)
sub_nested_dict = nested_dict
for (i, sub_key) in enumerate(sub_keys):
if (i == (len(sub_keys) - 1)):
if (sub_key in sub_nested_dict):
raise ValueError(f'Key {k} conflict in sub key {sub_key}.')
sub_nested_dict[sub_key] = flatten_dict[k]
elif (sub_key not in sub_nested_dict):
new_sub_nested_dict = dict()
sub_nested_dict[sub_key] = new_sub_nested_dict
sub_nested_dict = new_sub_nested_dict
else:
sub_nested_dict = sub_nested_dict[sub_key]
return nested_dict<|docstring|>Return nested dict from flatten dict.
Parameters
----------
flatten_dict: Dict
sep: str
flatten key separator
Returns
-------
Dict
Nested dict<|endoftext|>
|
20c4c6d2b91245cbce4db2353970386233de319ce7ff532cfe66f8865c7add70
|
def decision(probability):
'Get a boolean value with a given probability'
return (random() < probability)
|
Get a boolean value with a given probability
|
development_versions/v3.py
|
decision
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def decision(probability):
return (random() < probability)
|
def decision(probability):
return (random() < probability)<|docstring|>Get a boolean value with a given probability<|endoftext|>
|
d9f41aef498e3964fe2f74d45ae203e9e907272c926f0ae0752ce1db6491c043
|
def __init__(self, resistances=None):
'Initialise an infection within the model'
if (resistances is not None):
self.resistances = resistances
else:
self.resistances = {name: False for name in RESISTANCE_NAMES}
|
Initialise an infection within the model
|
development_versions/v3.py
|
__init__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __init__(self, resistances=None):
if (resistances is not None):
self.resistances = resistances
else:
self.resistances = {name: False for name in RESISTANCE_NAMES}
|
def __init__(self, resistances=None):
if (resistances is not None):
self.resistances = resistances
else:
self.resistances = {name: False for name in RESISTANCE_NAMES}<|docstring|>Initialise an infection within the model<|endoftext|>
|
47208d1e1eaa6dd5e3d34cc09b7663729d0cfb65d35d8c3842c69cb1223efc73
|
def make_resistant(self, resistance):
'Give the infection a specified resistance'
self.resistances[resistance] = True
|
Give the infection a specified resistance
|
development_versions/v3.py
|
make_resistant
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def make_resistant(self, resistance):
self.resistances[resistance] = True
|
def make_resistant(self, resistance):
self.resistances[resistance] = True<|docstring|>Give the infection a specified resistance<|endoftext|>
|
cb843a187916471ba3f770aa9216dddced11ccf9d9d8a367bfa3b275099b777c
|
def is_resistant(self, resistance):
'Return whether the infection has a specified resistance'
return self.resistances[resistance]
|
Return whether the infection has a specified resistance
|
development_versions/v3.py
|
is_resistant
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def is_resistant(self, resistance):
return self.resistances[resistance]
|
def is_resistant(self, resistance):
return self.resistances[resistance]<|docstring|>Return whether the infection has a specified resistance<|endoftext|>
|
7bb5f900803a1317f299c96ad5d53d76d1020230df8a9ff2af81c7b898ee8af8
|
def get_tier(self):
'Return how resistant the infection is - higher is more resistant'
for i in reversed(range(NUM_RESISTANCE_TYPES)):
if (self.resistances[RESISTANCE_NAMES[i]] == True):
return i
return (- 1)
|
Return how resistant the infection is - higher is more resistant
|
development_versions/v3.py
|
get_tier
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def get_tier(self):
for i in reversed(range(NUM_RESISTANCE_TYPES)):
if (self.resistances[RESISTANCE_NAMES[i]] == True):
return i
return (- 1)
|
def get_tier(self):
for i in reversed(range(NUM_RESISTANCE_TYPES)):
if (self.resistances[RESISTANCE_NAMES[i]] == True):
return i
return (- 1)<|docstring|>Return how resistant the infection is - higher is more resistant<|endoftext|>
|
d4dbbfdb7487e957f90d13749e1f113ae0f44627228cd76b888102c7fee6f1a1
|
def get_resistances_string(self):
'Get a canonical name for the present resistances'
string = ','.join([k for (k, v) in self.resistances.items() if v])
if (string == ''):
return '#'
return string
|
Get a canonical name for the present resistances
|
development_versions/v3.py
|
get_resistances_string
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def get_resistances_string(self):
string = ','.join([k for (k, v) in self.resistances.items() if v])
if (string == ):
return '#'
return string
|
def get_resistances_string(self):
string = ','.join([k for (k, v) in self.resistances.items() if v])
if (string == ):
return '#'
return string<|docstring|>Get a canonical name for the present resistances<|endoftext|>
|
c9511a10c63881ad3e335389cf63dcfbeff729c6527b8676e3853cee2c9175f4
|
def __repr__(self):
'Provide a string representation for the infection'
resistances_string = self.get_resistances_string()
if (resistances_string == '#'):
return 'infected'
return 'infected with resistances: {}'.format(resistances_string)
|
Provide a string representation for the infection
|
development_versions/v3.py
|
__repr__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __repr__(self):
resistances_string = self.get_resistances_string()
if (resistances_string == '#'):
return 'infected'
return 'infected with resistances: {}'.format(resistances_string)
|
def __repr__(self):
resistances_string = self.get_resistances_string()
if (resistances_string == '#'):
return 'infected'
return 'infected with resistances: {}'.format(resistances_string)<|docstring|>Provide a string representation for the infection<|endoftext|>
|
2f9f38a7b052a3d0b5028b63b6b3636d45b400bf022f815d6ce2915f9701fe37
|
def __init__(self, drug=RESISTANCE_NAMES[0], time_treated=None):
'Initialise a treatment within the model'
self.drug = drug
if (time_treated is not None):
self.time_treated = time_treated
else:
self.time_treated = 0
|
Initialise a treatment within the model
|
development_versions/v3.py
|
__init__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __init__(self, drug=RESISTANCE_NAMES[0], time_treated=None):
self.drug = drug
if (time_treated is not None):
self.time_treated = time_treated
else:
self.time_treated = 0
|
def __init__(self, drug=RESISTANCE_NAMES[0], time_treated=None):
self.drug = drug
if (time_treated is not None):
self.time_treated = time_treated
else:
self.time_treated = 0<|docstring|>Initialise a treatment within the model<|endoftext|>
|
0ec18b57bc9f4efad7c380a16ade67b921e9c91b99c0e8cf6ab9901ba6c9ee54
|
def next_treatment(self):
'Move up the treatment to the next strongest drug, and reset the\n amout of time that it has been used to zero'
drug_index = RESISTANCE_NAMES.index(self.drug)
if (drug_index < (NUM_RESISTANCE_TYPES - 1)):
self.drug = RESISTANCE_NAMES[(drug_index + 1)]
self.time_treated = 0
|
Move up the treatment to the next strongest drug, and reset the
amout of time that it has been used to zero
|
development_versions/v3.py
|
next_treatment
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def next_treatment(self):
'Move up the treatment to the next strongest drug, and reset the\n amout of time that it has been used to zero'
drug_index = RESISTANCE_NAMES.index(self.drug)
if (drug_index < (NUM_RESISTANCE_TYPES - 1)):
self.drug = RESISTANCE_NAMES[(drug_index + 1)]
self.time_treated = 0
|
def next_treatment(self):
'Move up the treatment to the next strongest drug, and reset the\n amout of time that it has been used to zero'
drug_index = RESISTANCE_NAMES.index(self.drug)
if (drug_index < (NUM_RESISTANCE_TYPES - 1)):
self.drug = RESISTANCE_NAMES[(drug_index + 1)]
self.time_treated = 0<|docstring|>Move up the treatment to the next strongest drug, and reset the
amout of time that it has been used to zero<|endoftext|>
|
9cf12741638e1aca43585001312f906ce0f3dae67eddb96beddae3ca1f293b88
|
def treats_infection(self, infection):
'Return whether the treatment works on the infection given any\n resistances the infection may have'
return (not infection.is_resistant(self.drug))
|
Return whether the treatment works on the infection given any
resistances the infection may have
|
development_versions/v3.py
|
treats_infection
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def treats_infection(self, infection):
'Return whether the treatment works on the infection given any\n resistances the infection may have'
return (not infection.is_resistant(self.drug))
|
def treats_infection(self, infection):
'Return whether the treatment works on the infection given any\n resistances the infection may have'
return (not infection.is_resistant(self.drug))<|docstring|>Return whether the treatment works on the infection given any
resistances the infection may have<|endoftext|>
|
650626ed9a61d29547a35e73e55dce2b538850e870dc8b04ba79ad36e6ccdb66
|
def __init__(self, infection=None, treatment=None, isolated=False, immune=False, alive=True, total_time_infected=0):
'Initialise a person as having various properties within the model'
self.infection = infection
self.treatment = treatment
self.isolated = isolated
self.immune = immune
self.total_time_infected = total_time_infected
self.alive = alive
|
Initialise a person as having various properties within the model
|
development_versions/v3.py
|
__init__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __init__(self, infection=None, treatment=None, isolated=False, immune=False, alive=True, total_time_infected=0):
self.infection = infection
self.treatment = treatment
self.isolated = isolated
self.immune = immune
self.total_time_infected = total_time_infected
self.alive = alive
|
def __init__(self, infection=None, treatment=None, isolated=False, immune=False, alive=True, total_time_infected=0):
self.infection = infection
self.treatment = treatment
self.isolated = isolated
self.immune = immune
self.total_time_infected = total_time_infected
self.alive = alive<|docstring|>Initialise a person as having various properties within the model<|endoftext|>
|
ce465934b0bb9709b7182107c5054c51e7236fb5f2688a9702e14eaf367751ea
|
def recover_from_infection(self):
'Recover the person, returning them to their default state; totally\n uninfected with no resistances, but now immune to the infection -\n irrespective of any resistances it has'
self.__init__(immune=True, isolated=False)
|
Recover the person, returning them to their default state; totally
uninfected with no resistances, but now immune to the infection -
irrespective of any resistances it has
|
development_versions/v3.py
|
recover_from_infection
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def recover_from_infection(self):
'Recover the person, returning them to their default state; totally\n uninfected with no resistances, but now immune to the infection -\n irrespective of any resistances it has'
self.__init__(immune=True, isolated=False)
|
def recover_from_infection(self):
'Recover the person, returning them to their default state; totally\n uninfected with no resistances, but now immune to the infection -\n irrespective of any resistances it has'
self.__init__(immune=True, isolated=False)<|docstring|>Recover the person, returning them to their default state; totally
uninfected with no resistances, but now immune to the infection -
irrespective of any resistances it has<|endoftext|>
|
594c75aea3f936c46e2f6dd66004b1d0259f0a535faca7f8a536b9ff376a1a4c
|
def mutate_infection(self):
'Make the infection become resistant to the treatment with a given\n probability of occurring'
if ((self.infection is not None) and (self.treatment is not None)):
self.infection.make_resistant(self.treatment.drug)
|
Make the infection become resistant to the treatment with a given
probability of occurring
|
development_versions/v3.py
|
mutate_infection
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def mutate_infection(self):
'Make the infection become resistant to the treatment with a given\n probability of occurring'
if ((self.infection is not None) and (self.treatment is not None)):
self.infection.make_resistant(self.treatment.drug)
|
def mutate_infection(self):
'Make the infection become resistant to the treatment with a given\n probability of occurring'
if ((self.infection is not None) and (self.treatment is not None)):
self.infection.make_resistant(self.treatment.drug)<|docstring|>Make the infection become resistant to the treatment with a given
probability of occurring<|endoftext|>
|
a3ca0b1c647b121f0b3142fdf61fee302b2ec3d58c9f1ae6affadf0f4cea34ec
|
def increase_treatment(self):
'Move up the treatment by one'
if (self.treatment is not None):
self.treatment.next_treatment()
|
Move up the treatment by one
|
development_versions/v3.py
|
increase_treatment
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def increase_treatment(self):
if (self.treatment is not None):
self.treatment.next_treatment()
|
def increase_treatment(self):
if (self.treatment is not None):
self.treatment.next_treatment()<|docstring|>Move up the treatment by one<|endoftext|>
|
d3a34b87aa32be8c6d9a1f55cf60c62438848d0d5138921d4c8b3c2b005e7c36
|
def correct_treatment(self):
'Return whether the current treatment is sufficient to overcome\n any resistances of the infection'
if (self.treatment is not None):
return self.treatment.treats_infection(self.infection)
return False
|
Return whether the current treatment is sufficient to overcome
any resistances of the infection
|
development_versions/v3.py
|
correct_treatment
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def correct_treatment(self):
'Return whether the current treatment is sufficient to overcome\n any resistances of the infection'
if (self.treatment is not None):
return self.treatment.treats_infection(self.infection)
return False
|
def correct_treatment(self):
'Return whether the current treatment is sufficient to overcome\n any resistances of the infection'
if (self.treatment is not None):
return self.treatment.treats_infection(self.infection)
return False<|docstring|>Return whether the current treatment is sufficient to overcome
any resistances of the infection<|endoftext|>
|
19fb9a498010beb311d4e89abf1ab895f54caffe787adbf10c33b167f4165459
|
def spread_infection(self, other):
"Give the current infection to another person, as long as they can\n receive it, don't already have a more resistant infection, and neither\n are isolated"
if (self.infection is None):
return None
directional = ((other.infection is None) or (self.infection.get_tier() > other.infection.get_tier()))
susceptible = ((not other.immune) and other.alive)
contactable = ((not self.isolated) and (not other.isolated))
if (directional and susceptible and contactable):
other.infection = deepcopy(self.infection)
|
Give the current infection to another person, as long as they can
receive it, don't already have a more resistant infection, and neither
are isolated
|
development_versions/v3.py
|
spread_infection
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def spread_infection(self, other):
"Give the current infection to another person, as long as they can\n receive it, don't already have a more resistant infection, and neither\n are isolated"
if (self.infection is None):
return None
directional = ((other.infection is None) or (self.infection.get_tier() > other.infection.get_tier()))
susceptible = ((not other.immune) and other.alive)
contactable = ((not self.isolated) and (not other.isolated))
if (directional and susceptible and contactable):
other.infection = deepcopy(self.infection)
|
def spread_infection(self, other):
"Give the current infection to another person, as long as they can\n receive it, don't already have a more resistant infection, and neither\n are isolated"
if (self.infection is None):
return None
directional = ((other.infection is None) or (self.infection.get_tier() > other.infection.get_tier()))
susceptible = ((not other.immune) and other.alive)
contactable = ((not self.isolated) and (not other.isolated))
if (directional and susceptible and contactable):
other.infection = deepcopy(self.infection)<|docstring|>Give the current infection to another person, as long as they can
receive it, don't already have a more resistant infection, and neither
are isolated<|endoftext|>
|
8c2be755efe2f4ab5ddb6f294f5353e06791096b5acad1a0cb3f315a358bd838
|
def isolate(self):
'Put the person in isolation'
self.isolated = True
|
Put the person in isolation
|
development_versions/v3.py
|
isolate
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def isolate(self):
self.isolated = True
|
def isolate(self):
self.isolated = True<|docstring|>Put the person in isolation<|endoftext|>
|
ec502bf4c570272c5bf306adf1e3bfe272a1f3515e1fe5ddec8f3dc5e0e71e21
|
def die(self):
'Make the person no longer alive'
self.__init__(alive=False)
|
Make the person no longer alive
|
development_versions/v3.py
|
die
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def die(self):
self.__init__(alive=False)
|
def die(self):
self.__init__(alive=False)<|docstring|>Make the person no longer alive<|endoftext|>
|
b7573e78b21ee62db6c5a2b861c03289b3b81938389cb2661e0c7664fb136c9d
|
def __repr__(self):
'Provide a string representation for the person'
if (not self.alive):
return 'Dead person'
elif self.immune:
return 'Immune person'
elif (self.infection is not None):
return 'Person {} and {}'.format(self.infection, self.treatment)
return 'Uninfected person'
|
Provide a string representation for the person
|
development_versions/v3.py
|
__repr__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __repr__(self):
if (not self.alive):
return 'Dead person'
elif self.immune:
return 'Immune person'
elif (self.infection is not None):
return 'Person {} and {}'.format(self.infection, self.treatment)
return 'Uninfected person'
|
def __repr__(self):
if (not self.alive):
return 'Dead person'
elif self.immune:
return 'Immune person'
elif (self.infection is not None):
return 'Person {} and {}'.format(self.infection, self.treatment)
return 'Uninfected person'<|docstring|>Provide a string representation for the person<|endoftext|>
|
fb284c27cc566f0be32ca3526b7cf7d72e69015a902072c1424ea9ee68dcc16f
|
def __init__(self, population=None):
'Initialise the model as having a population of people'
if (population is None):
self.population = [Person() for _ in range(POPULATION_SIZE)]
else:
self.population = population
self.data_handler = DataHandler()
|
Initialise the model as having a population of people
|
development_versions/v3.py
|
__init__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __init__(self, population=None):
if (population is None):
self.population = [Person() for _ in range(POPULATION_SIZE)]
else:
self.population = population
self.data_handler = DataHandler()
|
def __init__(self, population=None):
if (population is None):
self.population = [Person() for _ in range(POPULATION_SIZE)]
else:
self.population = population
self.data_handler = DataHandler()<|docstring|>Initialise the model as having a population of people<|endoftext|>
|
17754405d8f2470869ed6633623ddedd59d1c1e77541b37f590c7ea5acf22ded
|
def run(self):
'Simulate a number of timesteps within the model'
self.data_handler.__init__()
for _ in range(NUM_TIMESTEPS):
for person in self.population:
self.data_handler.record_person(person)
if ((person.infection is not None) and person.alive):
if (person.treatment is None):
person.treatment = Treatment()
else:
time_cond = (person.treatment.time_treated > TIMESTEPS_MOVE_UP_LAG_TIME)
rand_cond = decision(PROBABILITY_MOVE_UP_TREATMENT)
if (time_cond and rand_cond):
person.increase_treatment()
if (int(person.treatment.drug) >= ISOLATION_THRESHOLD):
person.isolate()
person.treatment.time_treated += 1
person.total_time_infected += 1
if (PRODUCT_IN_USE and decision(PROBABILIY_PRODUCT_DETECT)):
if person.infection.resistances[str(PRODUCT_DETECTION_LEVEL)]:
person.isolate()
'# If the person is known to have a resistance that\n # is higher than their treatment, increase their\n # treatment\n if person.treatment.drug < str(PRODUCT_DETECTION_LEVEL):\n person.treatment.drug = str(PRODUCT_DETECTION_LEVEL)'
general_recovery = decision(PROBABILITY_GENERAL_RECOVERY)
treatment_recovery = (person.correct_treatment() and decision(PROBABILITY_TREATMENT_RECOVERY))
if (general_recovery or treatment_recovery):
person.recover_from_infection()
if decision(PROBABILITY_MUTATION):
person.mutate_infection()
p = DEATH_FUNCTION(PROBABILITY_DEATH, person.total_time_infected)
if decision(p):
person.die()
updated_population = deepcopy(self.population)
for person in self.population:
if ((person.infection is not None) and decision(PROBABILITY_SPREAD)):
for receiver in sample(updated_population, NUM_SPREAD_TO):
person.spread_infection(receiver)
self.population = updated_population[:]
self.data_handler.process_timestep_data()
|
Simulate a number of timesteps within the model
|
development_versions/v3.py
|
run
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def run(self):
self.data_handler.__init__()
for _ in range(NUM_TIMESTEPS):
for person in self.population:
self.data_handler.record_person(person)
if ((person.infection is not None) and person.alive):
if (person.treatment is None):
person.treatment = Treatment()
else:
time_cond = (person.treatment.time_treated > TIMESTEPS_MOVE_UP_LAG_TIME)
rand_cond = decision(PROBABILITY_MOVE_UP_TREATMENT)
if (time_cond and rand_cond):
person.increase_treatment()
if (int(person.treatment.drug) >= ISOLATION_THRESHOLD):
person.isolate()
person.treatment.time_treated += 1
person.total_time_infected += 1
if (PRODUCT_IN_USE and decision(PROBABILIY_PRODUCT_DETECT)):
if person.infection.resistances[str(PRODUCT_DETECTION_LEVEL)]:
person.isolate()
'# If the person is known to have a resistance that\n # is higher than their treatment, increase their\n # treatment\n if person.treatment.drug < str(PRODUCT_DETECTION_LEVEL):\n person.treatment.drug = str(PRODUCT_DETECTION_LEVEL)'
general_recovery = decision(PROBABILITY_GENERAL_RECOVERY)
treatment_recovery = (person.correct_treatment() and decision(PROBABILITY_TREATMENT_RECOVERY))
if (general_recovery or treatment_recovery):
person.recover_from_infection()
if decision(PROBABILITY_MUTATION):
person.mutate_infection()
p = DEATH_FUNCTION(PROBABILITY_DEATH, person.total_time_infected)
if decision(p):
person.die()
updated_population = deepcopy(self.population)
for person in self.population:
if ((person.infection is not None) and decision(PROBABILITY_SPREAD)):
for receiver in sample(updated_population, NUM_SPREAD_TO):
person.spread_infection(receiver)
self.population = updated_population[:]
self.data_handler.process_timestep_data()
|
def run(self):
self.data_handler.__init__()
for _ in range(NUM_TIMESTEPS):
for person in self.population:
self.data_handler.record_person(person)
if ((person.infection is not None) and person.alive):
if (person.treatment is None):
person.treatment = Treatment()
else:
time_cond = (person.treatment.time_treated > TIMESTEPS_MOVE_UP_LAG_TIME)
rand_cond = decision(PROBABILITY_MOVE_UP_TREATMENT)
if (time_cond and rand_cond):
person.increase_treatment()
if (int(person.treatment.drug) >= ISOLATION_THRESHOLD):
person.isolate()
person.treatment.time_treated += 1
person.total_time_infected += 1
if (PRODUCT_IN_USE and decision(PROBABILIY_PRODUCT_DETECT)):
if person.infection.resistances[str(PRODUCT_DETECTION_LEVEL)]:
person.isolate()
'# If the person is known to have a resistance that\n # is higher than their treatment, increase their\n # treatment\n if person.treatment.drug < str(PRODUCT_DETECTION_LEVEL):\n person.treatment.drug = str(PRODUCT_DETECTION_LEVEL)'
general_recovery = decision(PROBABILITY_GENERAL_RECOVERY)
treatment_recovery = (person.correct_treatment() and decision(PROBABILITY_TREATMENT_RECOVERY))
if (general_recovery or treatment_recovery):
person.recover_from_infection()
if decision(PROBABILITY_MUTATION):
person.mutate_infection()
p = DEATH_FUNCTION(PROBABILITY_DEATH, person.total_time_infected)
if decision(p):
person.die()
updated_population = deepcopy(self.population)
for person in self.population:
if ((person.infection is not None) and decision(PROBABILITY_SPREAD)):
for receiver in sample(updated_population, NUM_SPREAD_TO):
person.spread_infection(receiver)
self.population = updated_population[:]
self.data_handler.process_timestep_data()<|docstring|>Simulate a number of timesteps within the model<|endoftext|>
|
9656d7e718f30f3565bfcb93b083b8c28fe24f1a5b3675c0cb8127e0571ef9fa
|
def __init__(self):
'Initialise the data handler for the model as storing data\n in an appropriate structure'
self.time = []
self.ys_data = [[] for _ in range((4 + NUM_RESISTANCE_TYPES))]
self.labels = ((['Infected'] + list(map((lambda x: ('Resistance ' + x)), RESISTANCE_NAMES))) + ['Dead', 'Immune', 'Uninfected'])
self.non_disjoint = [[]]
self.non_disjoint_labels = ['Isolated']
self.timestep = (- 1)
self._new_timestep_vars()
|
Initialise the data handler for the model as storing data
in an appropriate structure
|
development_versions/v3.py
|
__init__
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def __init__(self):
'Initialise the data handler for the model as storing data\n in an appropriate structure'
self.time = []
self.ys_data = [[] for _ in range((4 + NUM_RESISTANCE_TYPES))]
self.labels = ((['Infected'] + list(map((lambda x: ('Resistance ' + x)), RESISTANCE_NAMES))) + ['Dead', 'Immune', 'Uninfected'])
self.non_disjoint = [[]]
self.non_disjoint_labels = ['Isolated']
self.timestep = (- 1)
self._new_timestep_vars()
|
def __init__(self):
'Initialise the data handler for the model as storing data\n in an appropriate structure'
self.time = []
self.ys_data = [[] for _ in range((4 + NUM_RESISTANCE_TYPES))]
self.labels = ((['Infected'] + list(map((lambda x: ('Resistance ' + x)), RESISTANCE_NAMES))) + ['Dead', 'Immune', 'Uninfected'])
self.non_disjoint = [[]]
self.non_disjoint_labels = ['Isolated']
self.timestep = (- 1)
self._new_timestep_vars()<|docstring|>Initialise the data handler for the model as storing data
in an appropriate structure<|endoftext|>
|
3e97cc1c17274794117ba2609bded841137ed46c7d3a632d5284c2ee428159b0
|
def _new_timestep_vars(self):
'Make some helper variables'
self.num_infected_stages = ([0] * (NUM_RESISTANCE_TYPES + 1))
self.num_dead = 0
self.num_immune = 0
self.num_uninfected = 0
self.num_isolated = 0
self.timestep += 1
|
Make some helper variables
|
development_versions/v3.py
|
_new_timestep_vars
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def _new_timestep_vars(self):
self.num_infected_stages = ([0] * (NUM_RESISTANCE_TYPES + 1))
self.num_dead = 0
self.num_immune = 0
self.num_uninfected = 0
self.num_isolated = 0
self.timestep += 1
|
def _new_timestep_vars(self):
self.num_infected_stages = ([0] * (NUM_RESISTANCE_TYPES + 1))
self.num_dead = 0
self.num_immune = 0
self.num_uninfected = 0
self.num_isolated = 0
self.timestep += 1<|docstring|>Make some helper variables<|endoftext|>
|
2236beccaf516951c1ebd4c275e336e482c532098fd00711e3b714bf32d341c7
|
def record_person(self, person):
'Record data about a person in the helper variables, so a whole\n statistic can be formed by running this function on every person in\n the population'
if person.immune:
self.num_immune += 1
elif (not person.alive):
self.num_dead += 1
elif (person.infection is None):
self.num_uninfected += 1
else:
self.num_infected_stages[(person.infection.get_tier() + 1)] += 1
if person.isolated:
self.num_isolated += 1
|
Record data about a person in the helper variables, so a whole
statistic can be formed by running this function on every person in
the population
|
development_versions/v3.py
|
record_person
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def record_person(self, person):
'Record data about a person in the helper variables, so a whole\n statistic can be formed by running this function on every person in\n the population'
if person.immune:
self.num_immune += 1
elif (not person.alive):
self.num_dead += 1
elif (person.infection is None):
self.num_uninfected += 1
else:
self.num_infected_stages[(person.infection.get_tier() + 1)] += 1
if person.isolated:
self.num_isolated += 1
|
def record_person(self, person):
'Record data about a person in the helper variables, so a whole\n statistic can be formed by running this function on every person in\n the population'
if person.immune:
self.num_immune += 1
elif (not person.alive):
self.num_dead += 1
elif (person.infection is None):
self.num_uninfected += 1
else:
self.num_infected_stages[(person.infection.get_tier() + 1)] += 1
if person.isolated:
self.num_isolated += 1<|docstring|>Record data about a person in the helper variables, so a whole
statistic can be formed by running this function on every person in
the population<|endoftext|>
|
5067b383d01b4f005b1b7cd2c87c7ad8f07677bd877cc558371890aa623e3f63
|
def _preprocess_disjoint_labels(self):
'Preprocess the data and the labelling for some graph types'
if (GRAPH_TYPE == 'line'):
datas = []
for i in range((NUM_RESISTANCE_TYPES + 1)):
datas.append([sum(x) for x in zip(*self.ys_data[i:(- 3)])])
datas.extend(self.ys_data[(- 3):])
datas.extend(self.non_disjoint)
final_labels = (self.labels + self.non_disjoint_labels)
return (datas, final_labels)
return (self.ys_data, self.labels)
|
Preprocess the data and the labelling for some graph types
|
development_versions/v3.py
|
_preprocess_disjoint_labels
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def _preprocess_disjoint_labels(self):
if (GRAPH_TYPE == 'line'):
datas = []
for i in range((NUM_RESISTANCE_TYPES + 1)):
datas.append([sum(x) for x in zip(*self.ys_data[i:(- 3)])])
datas.extend(self.ys_data[(- 3):])
datas.extend(self.non_disjoint)
final_labels = (self.labels + self.non_disjoint_labels)
return (datas, final_labels)
return (self.ys_data, self.labels)
|
def _preprocess_disjoint_labels(self):
if (GRAPH_TYPE == 'line'):
datas = []
for i in range((NUM_RESISTANCE_TYPES + 1)):
datas.append([sum(x) for x in zip(*self.ys_data[i:(- 3)])])
datas.extend(self.ys_data[(- 3):])
datas.extend(self.non_disjoint)
final_labels = (self.labels + self.non_disjoint_labels)
return (datas, final_labels)
return (self.ys_data, self.labels)<|docstring|>Preprocess the data and the labelling for some graph types<|endoftext|>
|
bf257c937459b46fa7713b409071c6cc5790e61e506a3f877e0bdea0ab81a492
|
def draw_full_graph(self):
'Draw a graph of all of the data in the graph'
(datas, final_labels) = self._preprocess_disjoint_labels()
DataRenderer.draw_full_graph(self.time, datas, final_labels)
|
Draw a graph of all of the data in the graph
|
development_versions/v3.py
|
draw_full_graph
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def draw_full_graph(self):
(datas, final_labels) = self._preprocess_disjoint_labels()
DataRenderer.draw_full_graph(self.time, datas, final_labels)
|
def draw_full_graph(self):
(datas, final_labels) = self._preprocess_disjoint_labels()
DataRenderer.draw_full_graph(self.time, datas, final_labels)<|docstring|>Draw a graph of all of the data in the graph<|endoftext|>
|
8481a3ae1c769789861635378b57c9dc4184bb88e6f3d91dcea690e26c9b06c0
|
def _print_current_data(self):
'Print the values of the current state of the simulation'
print('uninfected: {}, immune: {}, dead: {}, infected: {}, isolated: {}'.format(str(self.num_uninfected).ljust(OUTPUT_PADDING), str(self.num_immune).ljust(OUTPUT_PADDING), str(self.num_dead).ljust(OUTPUT_PADDING), (('[' + ', '.join(map((lambda x: str(x).ljust(OUTPUT_PADDING)), self.num_infected_stages))) + ']'), str(self.num_isolated)))
|
Print the values of the current state of the simulation
|
development_versions/v3.py
|
_print_current_data
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def _print_current_data(self):
print('uninfected: {}, immune: {}, dead: {}, infected: {}, isolated: {}'.format(str(self.num_uninfected).ljust(OUTPUT_PADDING), str(self.num_immune).ljust(OUTPUT_PADDING), str(self.num_dead).ljust(OUTPUT_PADDING), (('[' + ', '.join(map((lambda x: str(x).ljust(OUTPUT_PADDING)), self.num_infected_stages))) + ']'), str(self.num_isolated)))
|
def _print_current_data(self):
print('uninfected: {}, immune: {}, dead: {}, infected: {}, isolated: {}'.format(str(self.num_uninfected).ljust(OUTPUT_PADDING), str(self.num_immune).ljust(OUTPUT_PADDING), str(self.num_dead).ljust(OUTPUT_PADDING), (('[' + ', '.join(map((lambda x: str(x).ljust(OUTPUT_PADDING)), self.num_infected_stages))) + ']'), str(self.num_isolated)))<|docstring|>Print the values of the current state of the simulation<|endoftext|>
|
bdd7116eee53927143e769293a52619b315f553efdc3fca03b0cb6a71ffeb536
|
def _report_model_state(self):
"Report the model's state through any mechanism set in parameters"
if ((REPORT_MOD_NUM is None) or ((self.timestep % REPORT_MOD_NUM) == 0)):
if (REPORT_PROGRESS and (not PRINT_DATA)):
print('{}% complete'.format(int(((self.timestep / int((NUM_TIMESTEPS / 10))) * 10))))
if PRINT_DATA:
if REPORT_PROGRESS:
print('{}% complete'.format(str(int(((self.timestep / int((NUM_TIMESTEPS / 10))) * 10))).ljust(2)), end=' - ')
self._print_current_data()
if ANIMATE_GRAPH:
(datas, final_labels) = self._preprocess_disjoint_labels()
DataRenderer.animate_current_graph(self.time, datas, final_labels)
|
Report the model's state through any mechanism set in parameters
|
development_versions/v3.py
|
_report_model_state
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def _report_model_state(self):
if ((REPORT_MOD_NUM is None) or ((self.timestep % REPORT_MOD_NUM) == 0)):
if (REPORT_PROGRESS and (not PRINT_DATA)):
print('{}% complete'.format(int(((self.timestep / int((NUM_TIMESTEPS / 10))) * 10))))
if PRINT_DATA:
if REPORT_PROGRESS:
print('{}% complete'.format(str(int(((self.timestep / int((NUM_TIMESTEPS / 10))) * 10))).ljust(2)), end=' - ')
self._print_current_data()
if ANIMATE_GRAPH:
(datas, final_labels) = self._preprocess_disjoint_labels()
DataRenderer.animate_current_graph(self.time, datas, final_labels)
|
def _report_model_state(self):
if ((REPORT_MOD_NUM is None) or ((self.timestep % REPORT_MOD_NUM) == 0)):
if (REPORT_PROGRESS and (not PRINT_DATA)):
print('{}% complete'.format(int(((self.timestep / int((NUM_TIMESTEPS / 10))) * 10))))
if PRINT_DATA:
if REPORT_PROGRESS:
print('{}% complete'.format(str(int(((self.timestep / int((NUM_TIMESTEPS / 10))) * 10))).ljust(2)), end=' - ')
self._print_current_data()
if ANIMATE_GRAPH:
(datas, final_labels) = self._preprocess_disjoint_labels()
DataRenderer.animate_current_graph(self.time, datas, final_labels)<|docstring|>Report the model's state through any mechanism set in parameters<|endoftext|>
|
d72ea6e22911044d684236cfab8e427ccf1379a0d7061bab613abff8e9abbe5a
|
def process_timestep_data(self):
"Store the current timestep's data into the appropriate data\n structures"
for (j, v) in enumerate(self.num_infected_stages):
self.ys_data[j].append(v)
self.ys_data[(- 3)].append(self.num_dead)
self.ys_data[(- 2)].append(self.num_immune)
self.ys_data[(- 1)].append(self.num_uninfected)
self.non_disjoint[0].append(self.num_isolated)
self.time.append(self.timestep)
self._report_model_state()
self._new_timestep_vars()
|
Store the current timestep's data into the appropriate data
structures
|
development_versions/v3.py
|
process_timestep_data
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
def process_timestep_data(self):
"Store the current timestep's data into the appropriate data\n structures"
for (j, v) in enumerate(self.num_infected_stages):
self.ys_data[j].append(v)
self.ys_data[(- 3)].append(self.num_dead)
self.ys_data[(- 2)].append(self.num_immune)
self.ys_data[(- 1)].append(self.num_uninfected)
self.non_disjoint[0].append(self.num_isolated)
self.time.append(self.timestep)
self._report_model_state()
self._new_timestep_vars()
|
def process_timestep_data(self):
"Store the current timestep's data into the appropriate data\n structures"
for (j, v) in enumerate(self.num_infected_stages):
self.ys_data[j].append(v)
self.ys_data[(- 3)].append(self.num_dead)
self.ys_data[(- 2)].append(self.num_immune)
self.ys_data[(- 1)].append(self.num_uninfected)
self.non_disjoint[0].append(self.num_isolated)
self.time.append(self.timestep)
self._report_model_state()
self._new_timestep_vars()<|docstring|>Store the current timestep's data into the appropriate data
structures<|endoftext|>
|
5d8c1719c57c98f247d672f0c2efb4d07242f3917eb31b9ad075c0430d615523
|
@staticmethod
def _draw_graph(time, ys_data, labels):
'Actually draw the graph via matplotlib'
if (GRAPH_TYPE == 'line'):
for i in range(len(ys_data)):
plt.plot(time, ys_data[i], label=labels[i])
else:
plt.stackplot(time, *ys_data, labels=labels)
|
Actually draw the graph via matplotlib
|
development_versions/v3.py
|
_draw_graph
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
@staticmethod
def _draw_graph(time, ys_data, labels):
if (GRAPH_TYPE == 'line'):
for i in range(len(ys_data)):
plt.plot(time, ys_data[i], label=labels[i])
else:
plt.stackplot(time, *ys_data, labels=labels)
|
@staticmethod
def _draw_graph(time, ys_data, labels):
if (GRAPH_TYPE == 'line'):
for i in range(len(ys_data)):
plt.plot(time, ys_data[i], label=labels[i])
else:
plt.stackplot(time, *ys_data, labels=labels)<|docstring|>Actually draw the graph via matplotlib<|endoftext|>
|
be7e4b9ab054469e38212fb3d7c8422fb972b9b13f52b34ffc4528781f3ac322
|
@staticmethod
def _graph_settings():
'Add settings for the graph, e.g. axis labels and legend'
plt.title('Resistance simulation')
plt.legend(loc='upper right')
plt.xlabel('Time / timesteps')
plt.ylabel('# People')
|
Add settings for the graph, e.g. axis labels and legend
|
development_versions/v3.py
|
_graph_settings
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
@staticmethod
def _graph_settings():
plt.title('Resistance simulation')
plt.legend(loc='upper right')
plt.xlabel('Time / timesteps')
plt.ylabel('# People')
|
@staticmethod
def _graph_settings():
plt.title('Resistance simulation')
plt.legend(loc='upper right')
plt.xlabel('Time / timesteps')
plt.ylabel('# People')<|docstring|>Add settings for the graph, e.g. axis labels and legend<|endoftext|>
|
27059bbf85660cddf22b5664b971fe95946237ea505b31e6d389ac302be6f82f
|
@staticmethod
def draw_full_graph(time, ys_data, labels):
'Draw and show the graph with all the data and legend once'
DataRenderer._draw_graph(time, ys_data, labels)
DataRenderer._graph_settings()
plt.show()
|
Draw and show the graph with all the data and legend once
|
development_versions/v3.py
|
draw_full_graph
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
@staticmethod
def draw_full_graph(time, ys_data, labels):
DataRenderer._draw_graph(time, ys_data, labels)
DataRenderer._graph_settings()
plt.show()
|
@staticmethod
def draw_full_graph(time, ys_data, labels):
DataRenderer._draw_graph(time, ys_data, labels)
DataRenderer._graph_settings()
plt.show()<|docstring|>Draw and show the graph with all the data and legend once<|endoftext|>
|
ad286fed65e4a588f98a45daee8f516748ce0235f4a1e54c08e8e63b97241087
|
@staticmethod
def animate_current_graph(time, ys_data, labels):
'Draw a graph up to the current state of the simulation'
drawnow((lambda : DataRenderer._draw_graph(time, ys_data, labels)))
|
Draw a graph up to the current state of the simulation
|
development_versions/v3.py
|
animate_current_graph
|
Warwick-iGEM-2021/modelling
| 0
|
python
|
@staticmethod
def animate_current_graph(time, ys_data, labels):
drawnow((lambda : DataRenderer._draw_graph(time, ys_data, labels)))
|
@staticmethod
def animate_current_graph(time, ys_data, labels):
drawnow((lambda : DataRenderer._draw_graph(time, ys_data, labels)))<|docstring|>Draw a graph up to the current state of the simulation<|endoftext|>
|
aaa1e4d742fa621a9eade351b975d138e71e25bce5ea448faa6ac73c6005cc07
|
def setup_platform(hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None:
'Set up switch platform for ADS.'
ads_hub = hass.data.get(DATA_ADS)
name = config[CONF_NAME]
ads_var = config[CONF_ADS_VAR]
add_entities([AdsSwitch(ads_hub, name, ads_var)])
|
Set up switch platform for ADS.
|
homeassistant/components/ads/switch.py
|
setup_platform
|
GrandMoff100/homeassistant-core
| 30,023
|
python
|
def setup_platform(hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None:
ads_hub = hass.data.get(DATA_ADS)
name = config[CONF_NAME]
ads_var = config[CONF_ADS_VAR]
add_entities([AdsSwitch(ads_hub, name, ads_var)])
|
def setup_platform(hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None:
ads_hub = hass.data.get(DATA_ADS)
name = config[CONF_NAME]
ads_var = config[CONF_ADS_VAR]
add_entities([AdsSwitch(ads_hub, name, ads_var)])<|docstring|>Set up switch platform for ADS.<|endoftext|>
|
8ec8439ea79325509633b695dd2e2c4dfa09b57ed2c5bf38f1fe28ff8c52732a
|
async def async_added_to_hass(self):
'Register device notification.'
(await self.async_initialize_device(self._ads_var, pyads.PLCTYPE_BOOL))
|
Register device notification.
|
homeassistant/components/ads/switch.py
|
async_added_to_hass
|
GrandMoff100/homeassistant-core
| 30,023
|
python
|
async def async_added_to_hass(self):
(await self.async_initialize_device(self._ads_var, pyads.PLCTYPE_BOOL))
|
async def async_added_to_hass(self):
(await self.async_initialize_device(self._ads_var, pyads.PLCTYPE_BOOL))<|docstring|>Register device notification.<|endoftext|>
|
4530e1fe308f686e6501ab9d5086d334ec6b4216c5f2171cd2fb2f5683a0be9c
|
@property
def is_on(self) -> bool:
'Return True if the entity is on.'
return self._state_dict[STATE_KEY_STATE]
|
Return True if the entity is on.
|
homeassistant/components/ads/switch.py
|
is_on
|
GrandMoff100/homeassistant-core
| 30,023
|
python
|
@property
def is_on(self) -> bool:
return self._state_dict[STATE_KEY_STATE]
|
@property
def is_on(self) -> bool:
return self._state_dict[STATE_KEY_STATE]<|docstring|>Return True if the entity is on.<|endoftext|>
|
047a2bc12edd8083c5fcc1d4d66521df91d7d79b73f28860c49638b82f0041bd
|
def turn_on(self, **kwargs):
'Turn the switch on.'
self._ads_hub.write_by_name(self._ads_var, True, pyads.PLCTYPE_BOOL)
|
Turn the switch on.
|
homeassistant/components/ads/switch.py
|
turn_on
|
GrandMoff100/homeassistant-core
| 30,023
|
python
|
def turn_on(self, **kwargs):
self._ads_hub.write_by_name(self._ads_var, True, pyads.PLCTYPE_BOOL)
|
def turn_on(self, **kwargs):
self._ads_hub.write_by_name(self._ads_var, True, pyads.PLCTYPE_BOOL)<|docstring|>Turn the switch on.<|endoftext|>
|
c4d99032121af014ba1696c3bfb33e59f7e112f7d306d88bdebc6e0abf30baca
|
def turn_off(self, **kwargs):
'Turn the switch off.'
self._ads_hub.write_by_name(self._ads_var, False, pyads.PLCTYPE_BOOL)
|
Turn the switch off.
|
homeassistant/components/ads/switch.py
|
turn_off
|
GrandMoff100/homeassistant-core
| 30,023
|
python
|
def turn_off(self, **kwargs):
self._ads_hub.write_by_name(self._ads_var, False, pyads.PLCTYPE_BOOL)
|
def turn_off(self, **kwargs):
self._ads_hub.write_by_name(self._ads_var, False, pyads.PLCTYPE_BOOL)<|docstring|>Turn the switch off.<|endoftext|>
|
95560b3be97e48f0641d7aec54108e73b4b741632eeebc7d060b4ff18a086cd6
|
def run_comparison(self, **kwargs):
'\n Main test function, generates a test file with varied lookup\n parameters, then passes through any keyword arguments to\n :meth:`select.select` and returns the result.\n\n '
(nx, ny, nz) = (4, 3, 5)
(x0, y0, dx, dy) = (10.0, (- 60.0), 0.1, 0.2)
stagger = 6
ff = self._minimal_valid_ff(nx, ny, nz, x0, y0, dx, dy, stagger)
for _ in range(self.N_FIELDS):
self.new_p_field(ff)
lbuser4 = 0
for (ifld, field) in enumerate(ff.fields):
lbuser4 += 100
field.lbuser4 = lbuser4
field.lbproc = (ifld // (self.N_FIELDS // 2))
return select.select(ff, **kwargs)
|
Main test function, generates a test file with varied lookup
parameters, then passes through any keyword arguments to
:meth:`select.select` and returns the result.
|
um_utils/lib/um_utils/tests/select/test_select.py
|
run_comparison
|
metomi/mule
| 0
|
python
|
def run_comparison(self, **kwargs):
'\n Main test function, generates a test file with varied lookup\n parameters, then passes through any keyword arguments to\n :meth:`select.select` and returns the result.\n\n '
(nx, ny, nz) = (4, 3, 5)
(x0, y0, dx, dy) = (10.0, (- 60.0), 0.1, 0.2)
stagger = 6
ff = self._minimal_valid_ff(nx, ny, nz, x0, y0, dx, dy, stagger)
for _ in range(self.N_FIELDS):
self.new_p_field(ff)
lbuser4 = 0
for (ifld, field) in enumerate(ff.fields):
lbuser4 += 100
field.lbuser4 = lbuser4
field.lbproc = (ifld // (self.N_FIELDS // 2))
return select.select(ff, **kwargs)
|
def run_comparison(self, **kwargs):
'\n Main test function, generates a test file with varied lookup\n parameters, then passes through any keyword arguments to\n :meth:`select.select` and returns the result.\n\n '
(nx, ny, nz) = (4, 3, 5)
(x0, y0, dx, dy) = (10.0, (- 60.0), 0.1, 0.2)
stagger = 6
ff = self._minimal_valid_ff(nx, ny, nz, x0, y0, dx, dy, stagger)
for _ in range(self.N_FIELDS):
self.new_p_field(ff)
lbuser4 = 0
for (ifld, field) in enumerate(ff.fields):
lbuser4 += 100
field.lbuser4 = lbuser4
field.lbproc = (ifld // (self.N_FIELDS // 2))
return select.select(ff, **kwargs)<|docstring|>Main test function, generates a test file with varied lookup
parameters, then passes through any keyword arguments to
:meth:`select.select` and returns the result.<|endoftext|>
|
0c73fef3e06084f3a98589193fdaaf0fe11373cd9f6bb8b11ac4fe397893cdf3
|
def compile_inputs(input_file_list):
'\n Puts together all inputs into one dataframe\n\n Parameters\n ----------\n input_file_list\n\n Returns\n -------\n\n '
input_dfs = []
for file in input_file_list:
input_dfs.append(model_input(file))
inputs_df = pd.concat(input_dfs)
inputs_df['z'] = [re.search('-z(.*)-', name).group()[2:(- 1)] for name in inputs_df.filenames]
return inputs_df.reset_index().drop('index', axis=1)
|
Puts together all inputs into one dataframe
Parameters
----------
input_file_list
Returns
-------
|
hoki/search.py
|
compile_inputs
|
findesgh/hoki
| 0
|
python
|
def compile_inputs(input_file_list):
'\n Puts together all inputs into one dataframe\n\n Parameters\n ----------\n input_file_list\n\n Returns\n -------\n\n '
input_dfs = []
for file in input_file_list:
input_dfs.append(model_input(file))
inputs_df = pd.concat(input_dfs)
inputs_df['z'] = [re.search('-z(.*)-', name).group()[2:(- 1)] for name in inputs_df.filenames]
return inputs_df.reset_index().drop('index', axis=1)
|
def compile_inputs(input_file_list):
'\n Puts together all inputs into one dataframe\n\n Parameters\n ----------\n input_file_list\n\n Returns\n -------\n\n '
input_dfs = []
for file in input_file_list:
input_dfs.append(model_input(file))
inputs_df = pd.concat(input_dfs)
inputs_df['z'] = [re.search('-z(.*)-', name).group()[2:(- 1)] for name in inputs_df.filenames]
return inputs_df.reset_index().drop('index', axis=1)<|docstring|>Puts together all inputs into one dataframe
Parameters
----------
input_file_list
Returns
-------<|endoftext|>
|
ac77ebcf73a17577711472dcd51e6458b7c3131194a17d5c94dbd202d6c9880a
|
def select_input_files(z_list, directory=OUTPUTS_PATH, binary=True, single=False, imf='imf135_300'):
'\n Creates list of relevant input file\n\n Parameters\n ----------\n z_list\n directory\n binary\n single\n imf\n\n Returns\n -------\n\n '
base = 'input_bpass_'
input_file_list = []
if single:
input_file_list += [((((directory + base) + z) + '_sin_') + imf) for z in z_list]
if binary:
z_list = list((set(z_list) - set(['zem4hmg', 'zem5hmg', 'z001hmg', 'z002hmg', 'z003hmg', 'z004hmg'])))
input_file_list += [((((directory + base) + z) + '_bin_') + imf) for z in z_list]
return input_file_list
|
Creates list of relevant input file
Parameters
----------
z_list
directory
binary
single
imf
Returns
-------
|
hoki/search.py
|
select_input_files
|
findesgh/hoki
| 0
|
python
|
def select_input_files(z_list, directory=OUTPUTS_PATH, binary=True, single=False, imf='imf135_300'):
'\n Creates list of relevant input file\n\n Parameters\n ----------\n z_list\n directory\n binary\n single\n imf\n\n Returns\n -------\n\n '
base = 'input_bpass_'
input_file_list = []
if single:
input_file_list += [((((directory + base) + z) + '_sin_') + imf) for z in z_list]
if binary:
z_list = list((set(z_list) - set(['zem4hmg', 'zem5hmg', 'z001hmg', 'z002hmg', 'z003hmg', 'z004hmg'])))
input_file_list += [((((directory + base) + z) + '_bin_') + imf) for z in z_list]
return input_file_list
|
def select_input_files(z_list, directory=OUTPUTS_PATH, binary=True, single=False, imf='imf135_300'):
'\n Creates list of relevant input file\n\n Parameters\n ----------\n z_list\n directory\n binary\n single\n imf\n\n Returns\n -------\n\n '
base = 'input_bpass_'
input_file_list = []
if single:
input_file_list += [((((directory + base) + z) + '_sin_') + imf) for z in z_list]
if binary:
z_list = list((set(z_list) - set(['zem4hmg', 'zem5hmg', 'z001hmg', 'z002hmg', 'z003hmg', 'z004hmg'])))
input_file_list += [((((directory + base) + z) + '_bin_') + imf) for z in z_list]
return input_file_list<|docstring|>Creates list of relevant input file
Parameters
----------
z_list
directory
binary
single
imf
Returns
-------<|endoftext|>
|
e2e6d49f71b62671ff9ab1d8a94b6933bea0627488f1eaa4157b609f51a3840d
|
def _tfrecord_path(save_dir):
'Get tfrecord path.'
data_prefix = 'data.{}'.format(FLAGS.split)
data_suffix = 'tfrecord-{:05d}-of-{:05d}'.format(FLAGS.task, FLAGS.num_task)
tfrecord_name = format_filename(prefix=data_prefix, suffix=data_suffix, uncased=FLAGS.uncased)
tfrecord_path = os.path.join(save_dir, tfrecord_name)
return tfrecord_path
|
Get tfrecord path.
|
pretrain/seq2seq_create_tfrecord.py
|
_tfrecord_path
|
laiguokun/transformer-xl
| 0
|
python
|
def _tfrecord_path(save_dir):
data_prefix = 'data.{}'.format(FLAGS.split)
data_suffix = 'tfrecord-{:05d}-of-{:05d}'.format(FLAGS.task, FLAGS.num_task)
tfrecord_name = format_filename(prefix=data_prefix, suffix=data_suffix, uncased=FLAGS.uncased)
tfrecord_path = os.path.join(save_dir, tfrecord_name)
return tfrecord_path
|
def _tfrecord_path(save_dir):
data_prefix = 'data.{}'.format(FLAGS.split)
data_suffix = 'tfrecord-{:05d}-of-{:05d}'.format(FLAGS.task, FLAGS.num_task)
tfrecord_name = format_filename(prefix=data_prefix, suffix=data_suffix, uncased=FLAGS.uncased)
tfrecord_path = os.path.join(save_dir, tfrecord_name)
return tfrecord_path<|docstring|>Get tfrecord path.<|endoftext|>
|
11f2edd2227e2de7c7bc4f02f31efa36e6f4995a6412d7114dcc7cd0aee0382f
|
def _meta_path(save_dir):
'Get meta path.'
meta_prefix = 'meta.{}'.format(FLAGS.split)
meta_suffix = 'json-{:05d}-of-{:05d}'.format(FLAGS.task, FLAGS.num_task)
meta_name = format_filename(prefix=meta_prefix, suffix=meta_suffix, uncased=FLAGS.uncased)
meta_path = os.path.join(save_dir, meta_name)
return meta_path
|
Get meta path.
|
pretrain/seq2seq_create_tfrecord.py
|
_meta_path
|
laiguokun/transformer-xl
| 0
|
python
|
def _meta_path(save_dir):
meta_prefix = 'meta.{}'.format(FLAGS.split)
meta_suffix = 'json-{:05d}-of-{:05d}'.format(FLAGS.task, FLAGS.num_task)
meta_name = format_filename(prefix=meta_prefix, suffix=meta_suffix, uncased=FLAGS.uncased)
meta_path = os.path.join(save_dir, meta_name)
return meta_path
|
def _meta_path(save_dir):
meta_prefix = 'meta.{}'.format(FLAGS.split)
meta_suffix = 'json-{:05d}-of-{:05d}'.format(FLAGS.task, FLAGS.num_task)
meta_name = format_filename(prefix=meta_prefix, suffix=meta_suffix, uncased=FLAGS.uncased)
meta_path = os.path.join(save_dir, meta_name)
return meta_path<|docstring|>Get meta path.<|endoftext|>
|
a2e3bd9a8800dfddbda4a860d53147087bb0f1df32d2483c109628208721903b
|
def _create_data(input_paths, src_tok, tgt_tok):
'Load data and call corresponding create_func.'
(num_src_tok, num_tgt_tok) = (0, 0)
num_example = 0
tfrecord_path = _tfrecord_path(FLAGS.save_dir)
record_writer = tf.io.TFRecordWriter(tfrecord_path)
tf.logging.info('Start writing tfrecord to %s.', tfrecord_path)
for input_path in input_paths:
line_cnt = 0
tf.logging.info('Start processing %s', input_path)
for line in tf.io.gfile.GFile(input_path, 'r'):
if ((line_cnt % 100000) == 0):
tf.logging.info('Loading line %d', line_cnt)
line = line.strip()
if line:
try:
(src, tgt) = line.split('\t')
except ValueError:
tf.logging.info('Skip %s', line)
continue
src_ids = src_tok.convert_text_to_ids(src)
tgt_ids = tgt_tok.convert_text_to_ids(tgt)
src_ids = (src_ids + [src_tok.eos_id])
tgt_ids = ([tgt_tok.eos_id] + tgt_ids)
num_src_tok += len(src_ids)
num_tgt_tok += len(tgt_ids)
feature = {'source': _int64_feature(src_ids), 'target': _int64_feature(tgt_ids)}
example = tf.train.Example(features=tf.train.Features(feature=feature))
record_writer.write(example.SerializeToString())
line_cnt += 1
num_example += 1
tf.logging.info('Finish %s with %d lines.', input_path, line_cnt)
record_writer.close()
tf.logging.info('[Task %d] #examples: %d; #tokens: src %d, tgt %d', FLAGS.task, num_example, num_src_tok, num_tgt_tok)
meta_info = {'filenames': [os.path.basename(tfrecord_path)], 'num_example': num_example}
meta_path = _meta_path(FLAGS.save_dir)
with tf.io.gfile.GFile(meta_path, 'w') as fp:
json.dump(meta_info, fp)
|
Load data and call corresponding create_func.
|
pretrain/seq2seq_create_tfrecord.py
|
_create_data
|
laiguokun/transformer-xl
| 0
|
python
|
def _create_data(input_paths, src_tok, tgt_tok):
(num_src_tok, num_tgt_tok) = (0, 0)
num_example = 0
tfrecord_path = _tfrecord_path(FLAGS.save_dir)
record_writer = tf.io.TFRecordWriter(tfrecord_path)
tf.logging.info('Start writing tfrecord to %s.', tfrecord_path)
for input_path in input_paths:
line_cnt = 0
tf.logging.info('Start processing %s', input_path)
for line in tf.io.gfile.GFile(input_path, 'r'):
if ((line_cnt % 100000) == 0):
tf.logging.info('Loading line %d', line_cnt)
line = line.strip()
if line:
try:
(src, tgt) = line.split('\t')
except ValueError:
tf.logging.info('Skip %s', line)
continue
src_ids = src_tok.convert_text_to_ids(src)
tgt_ids = tgt_tok.convert_text_to_ids(tgt)
src_ids = (src_ids + [src_tok.eos_id])
tgt_ids = ([tgt_tok.eos_id] + tgt_ids)
num_src_tok += len(src_ids)
num_tgt_tok += len(tgt_ids)
feature = {'source': _int64_feature(src_ids), 'target': _int64_feature(tgt_ids)}
example = tf.train.Example(features=tf.train.Features(feature=feature))
record_writer.write(example.SerializeToString())
line_cnt += 1
num_example += 1
tf.logging.info('Finish %s with %d lines.', input_path, line_cnt)
record_writer.close()
tf.logging.info('[Task %d] #examples: %d; #tokens: src %d, tgt %d', FLAGS.task, num_example, num_src_tok, num_tgt_tok)
meta_info = {'filenames': [os.path.basename(tfrecord_path)], 'num_example': num_example}
meta_path = _meta_path(FLAGS.save_dir)
with tf.io.gfile.GFile(meta_path, 'w') as fp:
json.dump(meta_info, fp)
|
def _create_data(input_paths, src_tok, tgt_tok):
(num_src_tok, num_tgt_tok) = (0, 0)
num_example = 0
tfrecord_path = _tfrecord_path(FLAGS.save_dir)
record_writer = tf.io.TFRecordWriter(tfrecord_path)
tf.logging.info('Start writing tfrecord to %s.', tfrecord_path)
for input_path in input_paths:
line_cnt = 0
tf.logging.info('Start processing %s', input_path)
for line in tf.io.gfile.GFile(input_path, 'r'):
if ((line_cnt % 100000) == 0):
tf.logging.info('Loading line %d', line_cnt)
line = line.strip()
if line:
try:
(src, tgt) = line.split('\t')
except ValueError:
tf.logging.info('Skip %s', line)
continue
src_ids = src_tok.convert_text_to_ids(src)
tgt_ids = tgt_tok.convert_text_to_ids(tgt)
src_ids = (src_ids + [src_tok.eos_id])
tgt_ids = ([tgt_tok.eos_id] + tgt_ids)
num_src_tok += len(src_ids)
num_tgt_tok += len(tgt_ids)
feature = {'source': _int64_feature(src_ids), 'target': _int64_feature(tgt_ids)}
example = tf.train.Example(features=tf.train.Features(feature=feature))
record_writer.write(example.SerializeToString())
line_cnt += 1
num_example += 1
tf.logging.info('Finish %s with %d lines.', input_path, line_cnt)
record_writer.close()
tf.logging.info('[Task %d] #examples: %d; #tokens: src %d, tgt %d', FLAGS.task, num_example, num_src_tok, num_tgt_tok)
meta_info = {'filenames': [os.path.basename(tfrecord_path)], 'num_example': num_example}
meta_path = _meta_path(FLAGS.save_dir)
with tf.io.gfile.GFile(meta_path, 'w') as fp:
json.dump(meta_info, fp)<|docstring|>Load data and call corresponding create_func.<|endoftext|>
|
dd89d699f7a1efd1cab1cd1a1fec8fb1d25522e3a4222b9c082480631f1a3479
|
def main(_):
'create pretraining data (tfrecords).'
tokenizer = get_tokenizer()
if (not tf.io.gfile.exists(FLAGS.save_dir)):
tf.io.gfile.makedirs(FLAGS.save_dir)
file_paths = sorted(tf.io.gfile.glob(FLAGS.input_glob))
tf.logging.info('Use glob: %s', FLAGS.input_glob)
tf.logging.info('Find %d files: %s', len(file_paths), file_paths)
task_file_paths = file_paths[FLAGS.task::FLAGS.num_task]
if (not task_file_paths):
tf.logging.info('Exit: task %d has no file to process.', FLAGS.task)
return
tf.logging.info('Task %d process %d files: %s', FLAGS.task, len(task_file_paths), task_file_paths)
_create_data(task_file_paths, src_tok=tokenizer, tgt_tok=tokenizer)
|
create pretraining data (tfrecords).
|
pretrain/seq2seq_create_tfrecord.py
|
main
|
laiguokun/transformer-xl
| 0
|
python
|
def main(_):
tokenizer = get_tokenizer()
if (not tf.io.gfile.exists(FLAGS.save_dir)):
tf.io.gfile.makedirs(FLAGS.save_dir)
file_paths = sorted(tf.io.gfile.glob(FLAGS.input_glob))
tf.logging.info('Use glob: %s', FLAGS.input_glob)
tf.logging.info('Find %d files: %s', len(file_paths), file_paths)
task_file_paths = file_paths[FLAGS.task::FLAGS.num_task]
if (not task_file_paths):
tf.logging.info('Exit: task %d has no file to process.', FLAGS.task)
return
tf.logging.info('Task %d process %d files: %s', FLAGS.task, len(task_file_paths), task_file_paths)
_create_data(task_file_paths, src_tok=tokenizer, tgt_tok=tokenizer)
|
def main(_):
tokenizer = get_tokenizer()
if (not tf.io.gfile.exists(FLAGS.save_dir)):
tf.io.gfile.makedirs(FLAGS.save_dir)
file_paths = sorted(tf.io.gfile.glob(FLAGS.input_glob))
tf.logging.info('Use glob: %s', FLAGS.input_glob)
tf.logging.info('Find %d files: %s', len(file_paths), file_paths)
task_file_paths = file_paths[FLAGS.task::FLAGS.num_task]
if (not task_file_paths):
tf.logging.info('Exit: task %d has no file to process.', FLAGS.task)
return
tf.logging.info('Task %d process %d files: %s', FLAGS.task, len(task_file_paths), task_file_paths)
_create_data(task_file_paths, src_tok=tokenizer, tgt_tok=tokenizer)<|docstring|>create pretraining data (tfrecords).<|endoftext|>
|
9bb1b2a9436426ad9cd5fbfb93617806c0da10b343f8b8192425d649d0a9f597
|
def close(self):
'Close the connection to the database.'
self.connection.close()
|
Close the connection to the database.
|
wikipedia_preprocess/doc_db.py
|
close
|
AkariAsai/CORA
| 43
|
python
|
def close(self):
self.connection.close()
|
def close(self):
self.connection.close()<|docstring|>Close the connection to the database.<|endoftext|>
|
96aa5b4c40ba3ccfaca6e3d7ee9ff304e79610dac86e880e1543265d3e898922
|
def get_doc_ids(self):
'Fetch all ids of docs stored in the db.'
cursor = self.connection.cursor()
cursor.execute('SELECT id FROM documents')
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results
|
Fetch all ids of docs stored in the db.
|
wikipedia_preprocess/doc_db.py
|
get_doc_ids
|
AkariAsai/CORA
| 43
|
python
|
def get_doc_ids(self):
cursor = self.connection.cursor()
cursor.execute('SELECT id FROM documents')
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results
|
def get_doc_ids(self):
cursor = self.connection.cursor()
cursor.execute('SELECT id FROM documents')
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results<|docstring|>Fetch all ids of docs stored in the db.<|endoftext|>
|
dfc6ae0efaf349bd6bca60e66e4e0ef19c59683e15c9ed0fd92f76377fac7106
|
def get_doc_ids_lang(self, lang):
'Fetch all ids of docs stored in the db.'
cursor = self.connection.cursor()
cursor.execute('SELECT id FROM documents where lang = ?', (lang,))
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results
|
Fetch all ids of docs stored in the db.
|
wikipedia_preprocess/doc_db.py
|
get_doc_ids_lang
|
AkariAsai/CORA
| 43
|
python
|
def get_doc_ids_lang(self, lang):
cursor = self.connection.cursor()
cursor.execute('SELECT id FROM documents where lang = ?', (lang,))
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results
|
def get_doc_ids_lang(self, lang):
cursor = self.connection.cursor()
cursor.execute('SELECT id FROM documents where lang = ?', (lang,))
results = [r[0] for r in cursor.fetchall()]
cursor.close()
return results<|docstring|>Fetch all ids of docs stored in the db.<|endoftext|>
|
9a9989a6b4ba74d1870c94b82d3aa39429f14e23764e25d5c52a5624ddf22da4
|
def get_doc_text(self, doc_id):
"Fetch the raw text of the doc for 'doc_id'."
cursor = self.connection.cursor()
cursor.execute('SELECT text FROM documents WHERE id = ?', (doc_id,))
result = cursor.fetchone()
cursor.close()
return (result if (result is None) else result[0])
|
Fetch the raw text of the doc for 'doc_id'.
|
wikipedia_preprocess/doc_db.py
|
get_doc_text
|
AkariAsai/CORA
| 43
|
python
|
def get_doc_text(self, doc_id):
cursor = self.connection.cursor()
cursor.execute('SELECT text FROM documents WHERE id = ?', (doc_id,))
result = cursor.fetchone()
cursor.close()
return (result if (result is None) else result[0])
|
def get_doc_text(self, doc_id):
cursor = self.connection.cursor()
cursor.execute('SELECT text FROM documents WHERE id = ?', (doc_id,))
result = cursor.fetchone()
cursor.close()
return (result if (result is None) else result[0])<|docstring|>Fetch the raw text of the doc for 'doc_id'.<|endoftext|>
|
a58e18b143c856461d960f97402fdb0b01d8d8cbe65f6431f4e17817e7c367dd
|
def get_doc_text_section_separations(self, doc_id):
'\n fetch all of the paragraphs with section level separations\n e.g., \n >>> sectioned_paragraphs = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")\n >>> sectioned_paragraphs[0]\n {"section_name":"Early life and sumo background.", \n "parent_section_name": None:,\n "paragraphs": ["Tatsu Ryōya was born in Kanazawa, Ishikawa and is the youngest of three children. \n His father was a truck driver. He was a normal-sized baby but grew quickly so that when \n attending kindergarten he had difficulty fitting into the uniform. He first began \n practicing sumo whilst in the first grade of elementary school.",\n "By the age of thirteen, when he ended his \n first year at junior high school he stood , and weighed . \n After competing successfully in junior high school sumo he gave up formal education \n at the age of fifteen and entered the Takadagawa stable to pursue a professional career."\n "type": "child"}\n '
cursor = self.connection.cursor()
cursor.execute('SELECT text FROM documents WHERE id = ?', (doc_id,))
result = cursor.fetchone()
cursor.close()
if (result is None):
return []
else:
output_data = []
section_separated_context = result[0].split('Section::::')
parent_section = ''
for (s_idx, section) in enumerate(section_separated_context):
if ((s_idx == 0) and (len(section.split('\n\n')) > 1) and (len(section.split('\n\n')[1]) > 0)):
section_name = 'Introduction'
parent_section = 'Introduction'
output_data.append({'section_name': section_name, 'paragraphs': section.split('\n\n')[1:], 'type': 'intro', 'parent_section_name': parent_section})
else:
section_name = re.compile('(.*)\n').search(section).group(1)
section_text = re.sub('(.*)\n', '', section, 1)
if (len(section_text) == 0):
parent_section = section_name
output_data.append({'section_name': section_name, 'paragraphs': [], 'type': 'parent', 'parent_section_name': None})
else:
output_data.append({'section_name': section_name, 'paragraphs': [para for para in section_text.split('\n\n') if (len(para) > 10)], 'type': 'child', 'parent_section_name': parent_section})
return output_data
|
fetch all of the paragraphs with section level separations
e.g.,
>>> sectioned_paragraphs = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")
>>> sectioned_paragraphs[0]
{"section_name":"Early life and sumo background.",
"parent_section_name": None:,
"paragraphs": ["Tatsu Ryōya was born in Kanazawa, Ishikawa and is the youngest of three children.
His father was a truck driver. He was a normal-sized baby but grew quickly so that when
attending kindergarten he had difficulty fitting into the uniform. He first began
practicing sumo whilst in the first grade of elementary school.",
"By the age of thirteen, when he ended his
first year at junior high school he stood , and weighed .
After competing successfully in junior high school sumo he gave up formal education
at the age of fifteen and entered the Takadagawa stable to pursue a professional career."
"type": "child"}
|
wikipedia_preprocess/doc_db.py
|
get_doc_text_section_separations
|
AkariAsai/CORA
| 43
|
python
|
def get_doc_text_section_separations(self, doc_id):
'\n fetch all of the paragraphs with section level separations\n e.g., \n >>> sectioned_paragraphs = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")\n >>> sectioned_paragraphs[0]\n {"section_name":"Early life and sumo background.", \n "parent_section_name": None:,\n "paragraphs": ["Tatsu Ryōya was born in Kanazawa, Ishikawa and is the youngest of three children. \n His father was a truck driver. He was a normal-sized baby but grew quickly so that when \n attending kindergarten he had difficulty fitting into the uniform. He first began \n practicing sumo whilst in the first grade of elementary school.",\n "By the age of thirteen, when he ended his \n first year at junior high school he stood , and weighed . \n After competing successfully in junior high school sumo he gave up formal education \n at the age of fifteen and entered the Takadagawa stable to pursue a professional career."\n "type": "child"}\n '
cursor = self.connection.cursor()
cursor.execute('SELECT text FROM documents WHERE id = ?', (doc_id,))
result = cursor.fetchone()
cursor.close()
if (result is None):
return []
else:
output_data = []
section_separated_context = result[0].split('Section::::')
parent_section =
for (s_idx, section) in enumerate(section_separated_context):
if ((s_idx == 0) and (len(section.split('\n\n')) > 1) and (len(section.split('\n\n')[1]) > 0)):
section_name = 'Introduction'
parent_section = 'Introduction'
output_data.append({'section_name': section_name, 'paragraphs': section.split('\n\n')[1:], 'type': 'intro', 'parent_section_name': parent_section})
else:
section_name = re.compile('(.*)\n').search(section).group(1)
section_text = re.sub('(.*)\n', , section, 1)
if (len(section_text) == 0):
parent_section = section_name
output_data.append({'section_name': section_name, 'paragraphs': [], 'type': 'parent', 'parent_section_name': None})
else:
output_data.append({'section_name': section_name, 'paragraphs': [para for para in section_text.split('\n\n') if (len(para) > 10)], 'type': 'child', 'parent_section_name': parent_section})
return output_data
|
def get_doc_text_section_separations(self, doc_id):
'\n fetch all of the paragraphs with section level separations\n e.g., \n >>> sectioned_paragraphs = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")\n >>> sectioned_paragraphs[0]\n {"section_name":"Early life and sumo background.", \n "parent_section_name": None:,\n "paragraphs": ["Tatsu Ryōya was born in Kanazawa, Ishikawa and is the youngest of three children. \n His father was a truck driver. He was a normal-sized baby but grew quickly so that when \n attending kindergarten he had difficulty fitting into the uniform. He first began \n practicing sumo whilst in the first grade of elementary school.",\n "By the age of thirteen, when he ended his \n first year at junior high school he stood , and weighed . \n After competing successfully in junior high school sumo he gave up formal education \n at the age of fifteen and entered the Takadagawa stable to pursue a professional career."\n "type": "child"}\n '
cursor = self.connection.cursor()
cursor.execute('SELECT text FROM documents WHERE id = ?', (doc_id,))
result = cursor.fetchone()
cursor.close()
if (result is None):
return []
else:
output_data = []
section_separated_context = result[0].split('Section::::')
parent_section =
for (s_idx, section) in enumerate(section_separated_context):
if ((s_idx == 0) and (len(section.split('\n\n')) > 1) and (len(section.split('\n\n')[1]) > 0)):
section_name = 'Introduction'
parent_section = 'Introduction'
output_data.append({'section_name': section_name, 'paragraphs': section.split('\n\n')[1:], 'type': 'intro', 'parent_section_name': parent_section})
else:
section_name = re.compile('(.*)\n').search(section).group(1)
section_text = re.sub('(.*)\n', , section, 1)
if (len(section_text) == 0):
parent_section = section_name
output_data.append({'section_name': section_name, 'paragraphs': [], 'type': 'parent', 'parent_section_name': None})
else:
output_data.append({'section_name': section_name, 'paragraphs': [para for para in section_text.split('\n\n') if (len(para) > 10)], 'type': 'child', 'parent_section_name': parent_section})
return output_data<|docstring|>fetch all of the paragraphs with section level separations
e.g.,
>>> sectioned_paragraphs = db.get_doc_text_hyper_linked_titles_for_articles("Tokyo Imperial Palace_0")
>>> sectioned_paragraphs[0]
{"section_name":"Early life and sumo background.",
"parent_section_name": None:,
"paragraphs": ["Tatsu Ryōya was born in Kanazawa, Ishikawa and is the youngest of three children.
His father was a truck driver. He was a normal-sized baby but grew quickly so that when
attending kindergarten he had difficulty fitting into the uniform. He first began
practicing sumo whilst in the first grade of elementary school.",
"By the age of thirteen, when he ended his
first year at junior high school he stood , and weighed .
After competing successfully in junior high school sumo he gave up formal education
at the age of fifteen and entered the Takadagawa stable to pursue a professional career."
"type": "child"}<|endoftext|>
|
694f3288807072836b2be16febbf6cf8ffd8f94673d837aaf9a366217993f902
|
def test_interface(self):
'\n Main test part\n '
self.assertTrue(ad.map.access.init('test_files/TPK.adm.txt'))
lanes = ad.map.lane.getLanes()
self.assertEqual(len(lanes), 141)
mapMatching = ad.map.match.AdMapMatching()
geoPoint = ad.map.point.GeoPoint()
geoPoint.longitude = ad.map.point.Longitude(8.4401803)
geoPoint.latitude = ad.map.point.Latitude(49.0191987)
geoPoint.altitude = ad.map.point.Altitude(0.0)
mapMatchingResults = mapMatching.getMapMatchedPositions(geoPoint, ad.physics.Distance(0.01), ad.physics.Probability(0.05))
self.assertEqual(len(mapMatchingResults), 1)
routingStart = mapMatchingResults[0].lanePoint.paraPoint
routingEnd = ad.map.point.ParaPoint()
routingEnd.laneId = routingStart.laneId
routingEnd.parametricOffset = ad.physics.ParametricValue(0.0)
routeResult = ad.map.route.planRoute(ad.map.route.createRoutingPoint(routingStart), ad.map.route.createRoutingPoint(routingEnd))
routeLength = ad.map.route.calcLength(routeResult.roadSegments[0])
self.assertEqual(int(float(routeLength)), 4)
ad.map.access.cleanup()
|
Main test part
|
ad_map_access/python/tests/interface_test.py
|
test_interface
|
woojinjjang/map-1
| 61
|
python
|
def test_interface(self):
'\n \n '
self.assertTrue(ad.map.access.init('test_files/TPK.adm.txt'))
lanes = ad.map.lane.getLanes()
self.assertEqual(len(lanes), 141)
mapMatching = ad.map.match.AdMapMatching()
geoPoint = ad.map.point.GeoPoint()
geoPoint.longitude = ad.map.point.Longitude(8.4401803)
geoPoint.latitude = ad.map.point.Latitude(49.0191987)
geoPoint.altitude = ad.map.point.Altitude(0.0)
mapMatchingResults = mapMatching.getMapMatchedPositions(geoPoint, ad.physics.Distance(0.01), ad.physics.Probability(0.05))
self.assertEqual(len(mapMatchingResults), 1)
routingStart = mapMatchingResults[0].lanePoint.paraPoint
routingEnd = ad.map.point.ParaPoint()
routingEnd.laneId = routingStart.laneId
routingEnd.parametricOffset = ad.physics.ParametricValue(0.0)
routeResult = ad.map.route.planRoute(ad.map.route.createRoutingPoint(routingStart), ad.map.route.createRoutingPoint(routingEnd))
routeLength = ad.map.route.calcLength(routeResult.roadSegments[0])
self.assertEqual(int(float(routeLength)), 4)
ad.map.access.cleanup()
|
def test_interface(self):
'\n \n '
self.assertTrue(ad.map.access.init('test_files/TPK.adm.txt'))
lanes = ad.map.lane.getLanes()
self.assertEqual(len(lanes), 141)
mapMatching = ad.map.match.AdMapMatching()
geoPoint = ad.map.point.GeoPoint()
geoPoint.longitude = ad.map.point.Longitude(8.4401803)
geoPoint.latitude = ad.map.point.Latitude(49.0191987)
geoPoint.altitude = ad.map.point.Altitude(0.0)
mapMatchingResults = mapMatching.getMapMatchedPositions(geoPoint, ad.physics.Distance(0.01), ad.physics.Probability(0.05))
self.assertEqual(len(mapMatchingResults), 1)
routingStart = mapMatchingResults[0].lanePoint.paraPoint
routingEnd = ad.map.point.ParaPoint()
routingEnd.laneId = routingStart.laneId
routingEnd.parametricOffset = ad.physics.ParametricValue(0.0)
routeResult = ad.map.route.planRoute(ad.map.route.createRoutingPoint(routingStart), ad.map.route.createRoutingPoint(routingEnd))
routeLength = ad.map.route.calcLength(routeResult.roadSegments[0])
self.assertEqual(int(float(routeLength)), 4)
ad.map.access.cleanup()<|docstring|>Main test part<|endoftext|>
|
322fb53d02657bd528e25370637b3a1ec9352699b1030c5b3e07391787766701
|
def get_model_for_problem_formulation(problem_formulation_id):
' Prepare DikeNetwork in a way it can be input in the EMA-workbench.\n Specify uncertainties, levers and problem formulation.\n '
function = DikeNetwork()
dike_model = Model('dikesnet', function=function)
Real_uncert = {'Bmax': [30, 350], 'pfail': [0, 1]}
cat_uncert_loc = {'Brate': (1.0, 1.5, 10)}
cat_uncert = {'discount rate {}'.format(n): (1.5, 2.5, 3.5, 4.5) for n in function.planning_steps}
Int_uncert = {'A.0_ID flood wave shape': [0, 132]}
dike_lev = {'DikeIncrease': [0, 10]}
rfr_lev = ['{}_RfR'.format(project_id) for project_id in range(0, 5)]
EWS_lev = {'EWS_DaysToThreat': [0, 4]}
uncertainties = []
levers = []
for uncert_name in cat_uncert.keys():
categories = cat_uncert[uncert_name]
uncertainties.append(CategoricalParameter(uncert_name, categories))
for uncert_name in Int_uncert.keys():
uncertainties.append(IntegerParameter(uncert_name, Int_uncert[uncert_name][0], Int_uncert[uncert_name][1]))
for lev_name in rfr_lev:
for n in function.planning_steps:
lev_name_ = '{} {}'.format(lev_name, n)
levers.append(IntegerParameter(lev_name_, 0, 1))
for lev_name in EWS_lev.keys():
levers.append(IntegerParameter(lev_name, EWS_lev[lev_name][0], EWS_lev[lev_name][1]))
for dike in function.dikelist:
for uncert_name in Real_uncert.keys():
name = '{}_{}'.format(dike, uncert_name)
(lower, upper) = Real_uncert[uncert_name]
uncertainties.append(RealParameter(name, lower, upper))
for uncert_name in cat_uncert_loc.keys():
name = '{}_{}'.format(dike, uncert_name)
categories = cat_uncert_loc[uncert_name]
uncertainties.append(CategoricalParameter(name, categories))
for lev_name in dike_lev.keys():
for n in function.planning_steps:
name = '{}_{} {}'.format(dike, lev_name, n)
levers.append(IntegerParameter(name, dike_lev[lev_name][0], dike_lev[lev_name][1]))
dike_model.uncertainties = uncertainties
dike_model.levers = levers
direction = ScalarOutcome.MINIMIZE
if (problem_formulation_id == 0):
variable_names = []
variable_names_ = []
for n in function.planning_steps:
variable_names.extend(['{}_{} {}'.format(dike, e, n) for e in ['Expected Annual Damage', 'Dike Investment Costs'] for dike in function.dikelist])
variable_names_.extend(['{}_{} {}'.format(dike, e, n) for e in ['Expected Number of Deaths'] for dike in function.dikelist])
variable_names.extend(['RfR Total Costs {}'.format(n)])
variable_names.extend(['Expected Evacuation Costs {}'.format(n)])
dike_model.outcomes = [ScalarOutcome('All Costs', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names_], function=sum_over, kind=direction)]
elif (problem_formulation_id == 1):
variable_names = []
variable_names_ = []
variable_names__ = []
for n in function.planning_steps:
variable_names.extend(['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist])
variable_names_.extend(((['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist] + ['RfR Total Costs {}'.format(n)]) + ['Expected Evacuation Costs {}'.format(n)]))
variable_names__.extend(['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist])
dike_model.outcomes = [ScalarOutcome('Expected Annual Damage', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Total Investment Costs', variable_name=[var for var in variable_names_], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names__], function=sum_over, kind=direction)]
elif (problem_formulation_id == 2):
variable_names = []
variable_names_ = []
variable_names__ = []
variable_names___ = []
variable_names____ = []
for n in function.planning_steps:
variable_names.extend(['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist])
variable_names_.extend(['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist])
variable_names__.extend(['RfR Total Costs {}'.format(n)])
variable_names___.extend(['Expected Evacuation Costs {}'.format(n)])
variable_names____.extend(['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist])
dike_model.outcomes = [ScalarOutcome('Expected Annual Damage', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Dike Investment Costs', variable_name=[var for var in variable_names_], function=sum_over, kind=direction), ScalarOutcome('RfR Investment Costs', variable_name=[var for var in variable_names__], function=sum_over, kind=direction), ScalarOutcome('Evacuation Costs', variable_name=[var for var in variable_names___], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names____], function=sum_over, kind=direction)]
elif (problem_formulation_id == 3):
outcomes = []
for dike in function.dikelist:
variable_name = []
for e in ['Expected Annual Damage', 'Dike Investment Costs']:
variable_name.extend(['{}_{} {}'.format(dike, e, n) for n in function.planning_steps])
outcomes.append(ScalarOutcome('{} Total Costs'.format(dike), variable_name=[var for var in variable_name], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('{}_Expected Number of Deaths'.format(dike), variable_name=['{}_Expected Number of Deaths {}'.format(dike, n) for n in function.planning_steps], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('RfR Total Costs', variable_name=['RfR Total Costs {}'.format(n) for n in function.planning_steps], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs', variable_name=['Expected Evacuation Costs {}'.format(n) for n in function.planning_steps], function=sum_over, kind=direction))
dike_model.outcomes = outcomes
elif (problem_formulation_id == 4):
outcomes = []
for n in function.planning_steps:
for dike in function.dikelist:
outcomes.append(ScalarOutcome('Expected Annual Damage {}'.format(n), variable_name=['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Dike Investment Costs {}'.format(n), variable_name=['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Expected Number of Deaths {}'.format(n), variable_name=['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('RfR Total Costs {}'.format(n), kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs {}'.format(n), kind=direction))
dike_model.outcomes = outcomes
elif (problem_formulation_id == 5):
outcomes = []
for n in function.planning_steps:
for dike in function.dikelist:
for entry in ['Expected Annual Damage', 'Dike Investment Costs', 'Expected Number of Deaths']:
o = ScalarOutcome('{}_{} {}'.format(dike, entry, n), kind=direction)
outcomes.append(o)
outcomes.append(ScalarOutcome('RfR Total Costs {}'.format(n), kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs {}'.format(n), kind=direction))
dike_model.outcomes = outcomes
else:
raise TypeError('unknownx identifier')
return (dike_model, function.planning_steps)
|
Prepare DikeNetwork in a way it can be input in the EMA-workbench.
Specify uncertainties, levers and problem formulation.
|
model/problem_formulation.py
|
get_model_for_problem_formulation
|
WKSu/Model-based-decision-making
| 1
|
python
|
def get_model_for_problem_formulation(problem_formulation_id):
' Prepare DikeNetwork in a way it can be input in the EMA-workbench.\n Specify uncertainties, levers and problem formulation.\n '
function = DikeNetwork()
dike_model = Model('dikesnet', function=function)
Real_uncert = {'Bmax': [30, 350], 'pfail': [0, 1]}
cat_uncert_loc = {'Brate': (1.0, 1.5, 10)}
cat_uncert = {'discount rate {}'.format(n): (1.5, 2.5, 3.5, 4.5) for n in function.planning_steps}
Int_uncert = {'A.0_ID flood wave shape': [0, 132]}
dike_lev = {'DikeIncrease': [0, 10]}
rfr_lev = ['{}_RfR'.format(project_id) for project_id in range(0, 5)]
EWS_lev = {'EWS_DaysToThreat': [0, 4]}
uncertainties = []
levers = []
for uncert_name in cat_uncert.keys():
categories = cat_uncert[uncert_name]
uncertainties.append(CategoricalParameter(uncert_name, categories))
for uncert_name in Int_uncert.keys():
uncertainties.append(IntegerParameter(uncert_name, Int_uncert[uncert_name][0], Int_uncert[uncert_name][1]))
for lev_name in rfr_lev:
for n in function.planning_steps:
lev_name_ = '{} {}'.format(lev_name, n)
levers.append(IntegerParameter(lev_name_, 0, 1))
for lev_name in EWS_lev.keys():
levers.append(IntegerParameter(lev_name, EWS_lev[lev_name][0], EWS_lev[lev_name][1]))
for dike in function.dikelist:
for uncert_name in Real_uncert.keys():
name = '{}_{}'.format(dike, uncert_name)
(lower, upper) = Real_uncert[uncert_name]
uncertainties.append(RealParameter(name, lower, upper))
for uncert_name in cat_uncert_loc.keys():
name = '{}_{}'.format(dike, uncert_name)
categories = cat_uncert_loc[uncert_name]
uncertainties.append(CategoricalParameter(name, categories))
for lev_name in dike_lev.keys():
for n in function.planning_steps:
name = '{}_{} {}'.format(dike, lev_name, n)
levers.append(IntegerParameter(name, dike_lev[lev_name][0], dike_lev[lev_name][1]))
dike_model.uncertainties = uncertainties
dike_model.levers = levers
direction = ScalarOutcome.MINIMIZE
if (problem_formulation_id == 0):
variable_names = []
variable_names_ = []
for n in function.planning_steps:
variable_names.extend(['{}_{} {}'.format(dike, e, n) for e in ['Expected Annual Damage', 'Dike Investment Costs'] for dike in function.dikelist])
variable_names_.extend(['{}_{} {}'.format(dike, e, n) for e in ['Expected Number of Deaths'] for dike in function.dikelist])
variable_names.extend(['RfR Total Costs {}'.format(n)])
variable_names.extend(['Expected Evacuation Costs {}'.format(n)])
dike_model.outcomes = [ScalarOutcome('All Costs', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names_], function=sum_over, kind=direction)]
elif (problem_formulation_id == 1):
variable_names = []
variable_names_ = []
variable_names__ = []
for n in function.planning_steps:
variable_names.extend(['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist])
variable_names_.extend(((['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist] + ['RfR Total Costs {}'.format(n)]) + ['Expected Evacuation Costs {}'.format(n)]))
variable_names__.extend(['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist])
dike_model.outcomes = [ScalarOutcome('Expected Annual Damage', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Total Investment Costs', variable_name=[var for var in variable_names_], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names__], function=sum_over, kind=direction)]
elif (problem_formulation_id == 2):
variable_names = []
variable_names_ = []
variable_names__ = []
variable_names___ = []
variable_names____ = []
for n in function.planning_steps:
variable_names.extend(['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist])
variable_names_.extend(['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist])
variable_names__.extend(['RfR Total Costs {}'.format(n)])
variable_names___.extend(['Expected Evacuation Costs {}'.format(n)])
variable_names____.extend(['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist])
dike_model.outcomes = [ScalarOutcome('Expected Annual Damage', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Dike Investment Costs', variable_name=[var for var in variable_names_], function=sum_over, kind=direction), ScalarOutcome('RfR Investment Costs', variable_name=[var for var in variable_names__], function=sum_over, kind=direction), ScalarOutcome('Evacuation Costs', variable_name=[var for var in variable_names___], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names____], function=sum_over, kind=direction)]
elif (problem_formulation_id == 3):
outcomes = []
for dike in function.dikelist:
variable_name = []
for e in ['Expected Annual Damage', 'Dike Investment Costs']:
variable_name.extend(['{}_{} {}'.format(dike, e, n) for n in function.planning_steps])
outcomes.append(ScalarOutcome('{} Total Costs'.format(dike), variable_name=[var for var in variable_name], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('{}_Expected Number of Deaths'.format(dike), variable_name=['{}_Expected Number of Deaths {}'.format(dike, n) for n in function.planning_steps], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('RfR Total Costs', variable_name=['RfR Total Costs {}'.format(n) for n in function.planning_steps], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs', variable_name=['Expected Evacuation Costs {}'.format(n) for n in function.planning_steps], function=sum_over, kind=direction))
dike_model.outcomes = outcomes
elif (problem_formulation_id == 4):
outcomes = []
for n in function.planning_steps:
for dike in function.dikelist:
outcomes.append(ScalarOutcome('Expected Annual Damage {}'.format(n), variable_name=['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Dike Investment Costs {}'.format(n), variable_name=['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Expected Number of Deaths {}'.format(n), variable_name=['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('RfR Total Costs {}'.format(n), kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs {}'.format(n), kind=direction))
dike_model.outcomes = outcomes
elif (problem_formulation_id == 5):
outcomes = []
for n in function.planning_steps:
for dike in function.dikelist:
for entry in ['Expected Annual Damage', 'Dike Investment Costs', 'Expected Number of Deaths']:
o = ScalarOutcome('{}_{} {}'.format(dike, entry, n), kind=direction)
outcomes.append(o)
outcomes.append(ScalarOutcome('RfR Total Costs {}'.format(n), kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs {}'.format(n), kind=direction))
dike_model.outcomes = outcomes
else:
raise TypeError('unknownx identifier')
return (dike_model, function.planning_steps)
|
def get_model_for_problem_formulation(problem_formulation_id):
' Prepare DikeNetwork in a way it can be input in the EMA-workbench.\n Specify uncertainties, levers and problem formulation.\n '
function = DikeNetwork()
dike_model = Model('dikesnet', function=function)
Real_uncert = {'Bmax': [30, 350], 'pfail': [0, 1]}
cat_uncert_loc = {'Brate': (1.0, 1.5, 10)}
cat_uncert = {'discount rate {}'.format(n): (1.5, 2.5, 3.5, 4.5) for n in function.planning_steps}
Int_uncert = {'A.0_ID flood wave shape': [0, 132]}
dike_lev = {'DikeIncrease': [0, 10]}
rfr_lev = ['{}_RfR'.format(project_id) for project_id in range(0, 5)]
EWS_lev = {'EWS_DaysToThreat': [0, 4]}
uncertainties = []
levers = []
for uncert_name in cat_uncert.keys():
categories = cat_uncert[uncert_name]
uncertainties.append(CategoricalParameter(uncert_name, categories))
for uncert_name in Int_uncert.keys():
uncertainties.append(IntegerParameter(uncert_name, Int_uncert[uncert_name][0], Int_uncert[uncert_name][1]))
for lev_name in rfr_lev:
for n in function.planning_steps:
lev_name_ = '{} {}'.format(lev_name, n)
levers.append(IntegerParameter(lev_name_, 0, 1))
for lev_name in EWS_lev.keys():
levers.append(IntegerParameter(lev_name, EWS_lev[lev_name][0], EWS_lev[lev_name][1]))
for dike in function.dikelist:
for uncert_name in Real_uncert.keys():
name = '{}_{}'.format(dike, uncert_name)
(lower, upper) = Real_uncert[uncert_name]
uncertainties.append(RealParameter(name, lower, upper))
for uncert_name in cat_uncert_loc.keys():
name = '{}_{}'.format(dike, uncert_name)
categories = cat_uncert_loc[uncert_name]
uncertainties.append(CategoricalParameter(name, categories))
for lev_name in dike_lev.keys():
for n in function.planning_steps:
name = '{}_{} {}'.format(dike, lev_name, n)
levers.append(IntegerParameter(name, dike_lev[lev_name][0], dike_lev[lev_name][1]))
dike_model.uncertainties = uncertainties
dike_model.levers = levers
direction = ScalarOutcome.MINIMIZE
if (problem_formulation_id == 0):
variable_names = []
variable_names_ = []
for n in function.planning_steps:
variable_names.extend(['{}_{} {}'.format(dike, e, n) for e in ['Expected Annual Damage', 'Dike Investment Costs'] for dike in function.dikelist])
variable_names_.extend(['{}_{} {}'.format(dike, e, n) for e in ['Expected Number of Deaths'] for dike in function.dikelist])
variable_names.extend(['RfR Total Costs {}'.format(n)])
variable_names.extend(['Expected Evacuation Costs {}'.format(n)])
dike_model.outcomes = [ScalarOutcome('All Costs', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names_], function=sum_over, kind=direction)]
elif (problem_formulation_id == 1):
variable_names = []
variable_names_ = []
variable_names__ = []
for n in function.planning_steps:
variable_names.extend(['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist])
variable_names_.extend(((['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist] + ['RfR Total Costs {}'.format(n)]) + ['Expected Evacuation Costs {}'.format(n)]))
variable_names__.extend(['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist])
dike_model.outcomes = [ScalarOutcome('Expected Annual Damage', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Total Investment Costs', variable_name=[var for var in variable_names_], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names__], function=sum_over, kind=direction)]
elif (problem_formulation_id == 2):
variable_names = []
variable_names_ = []
variable_names__ = []
variable_names___ = []
variable_names____ = []
for n in function.planning_steps:
variable_names.extend(['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist])
variable_names_.extend(['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist])
variable_names__.extend(['RfR Total Costs {}'.format(n)])
variable_names___.extend(['Expected Evacuation Costs {}'.format(n)])
variable_names____.extend(['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist])
dike_model.outcomes = [ScalarOutcome('Expected Annual Damage', variable_name=[var for var in variable_names], function=sum_over, kind=direction), ScalarOutcome('Dike Investment Costs', variable_name=[var for var in variable_names_], function=sum_over, kind=direction), ScalarOutcome('RfR Investment Costs', variable_name=[var for var in variable_names__], function=sum_over, kind=direction), ScalarOutcome('Evacuation Costs', variable_name=[var for var in variable_names___], function=sum_over, kind=direction), ScalarOutcome('Expected Number of Deaths', variable_name=[var for var in variable_names____], function=sum_over, kind=direction)]
elif (problem_formulation_id == 3):
outcomes = []
for dike in function.dikelist:
variable_name = []
for e in ['Expected Annual Damage', 'Dike Investment Costs']:
variable_name.extend(['{}_{} {}'.format(dike, e, n) for n in function.planning_steps])
outcomes.append(ScalarOutcome('{} Total Costs'.format(dike), variable_name=[var for var in variable_name], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('{}_Expected Number of Deaths'.format(dike), variable_name=['{}_Expected Number of Deaths {}'.format(dike, n) for n in function.planning_steps], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('RfR Total Costs', variable_name=['RfR Total Costs {}'.format(n) for n in function.planning_steps], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs', variable_name=['Expected Evacuation Costs {}'.format(n) for n in function.planning_steps], function=sum_over, kind=direction))
dike_model.outcomes = outcomes
elif (problem_formulation_id == 4):
outcomes = []
for n in function.planning_steps:
for dike in function.dikelist:
outcomes.append(ScalarOutcome('Expected Annual Damage {}'.format(n), variable_name=['{}_Expected Annual Damage {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Dike Investment Costs {}'.format(n), variable_name=['{}_Dike Investment Costs {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('Expected Number of Deaths {}'.format(n), variable_name=['{}_Expected Number of Deaths {}'.format(dike, n) for dike in function.dikelist], function=sum_over, kind=direction))
outcomes.append(ScalarOutcome('RfR Total Costs {}'.format(n), kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs {}'.format(n), kind=direction))
dike_model.outcomes = outcomes
elif (problem_formulation_id == 5):
outcomes = []
for n in function.planning_steps:
for dike in function.dikelist:
for entry in ['Expected Annual Damage', 'Dike Investment Costs', 'Expected Number of Deaths']:
o = ScalarOutcome('{}_{} {}'.format(dike, entry, n), kind=direction)
outcomes.append(o)
outcomes.append(ScalarOutcome('RfR Total Costs {}'.format(n), kind=direction))
outcomes.append(ScalarOutcome('Expected Evacuation Costs {}'.format(n), kind=direction))
dike_model.outcomes = outcomes
else:
raise TypeError('unknownx identifier')
return (dike_model, function.planning_steps)<|docstring|>Prepare DikeNetwork in a way it can be input in the EMA-workbench.
Specify uncertainties, levers and problem formulation.<|endoftext|>
|
b7995936266c88cd910bf3dbcbc629e75a75aa59c322ba58205bf42c9e1e9b3c
|
def test_join_on_eq_with_abs_dt_outside_window(self):
'\n Should get 0 answers because N^2 matches but 0 within dt window \n '
dt = 8
(I, J) = ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), dt, 'abs_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'abs_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
|
Should get 0 answers because N^2 matches but 0 within dt window
|
tests/join_test.py
|
test_join_on_eq_with_abs_dt_outside_window
|
mcdobe100/arkouda
| 0
|
python
|
def test_join_on_eq_with_abs_dt_outside_window(self):
'\n \n '
dt = 8
(I, J) = ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), dt, 'abs_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'abs_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
|
def test_join_on_eq_with_abs_dt_outside_window(self):
'\n \n '
dt = 8
(I, J) = ak.join_on_eq_with_dt(self.a1, self.a1, self.t1, (self.t1 * 10), dt, 'abs_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)
(I, J) = ak.join_on_eq_with_dt(self.a2, self.a1, self.t1, self.t2, dt, 'abs_dt')
self.assertEqual(0, I.size)
self.assertEqual(0, J.size)<|docstring|>Should get 0 answers because N^2 matches but 0 within dt window<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.