index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,800 | daad6ab053c50a7b5eb6d893468626f78fcbae5b | """
"""
import re
from MotifAtlasBaseClass import MotifAtlasBaseClass
from models import session, MotifAnnotation
for loop in session.query(MotifAnnotation).all():
if loop.common_name is not None and loop.common_name != '':
loop.common_name = re.sub('\s?\[.+?\]\s?', '', loop.common_name)
loop.common_name = ' | '.join(set(loop.common_name.split('| ')))
if loop.annotation is not None and loop.annotation != '':
loop.annotation = re.sub('\s?\[.+?\]\s?', '', loop.annotation)
loop.annotation = ' | '.join(set(loop.annotation.split('| ')))
session.commit() |
995,801 | 3fcaafeac52c788ee8ebbce1d5b28eb4bbafabbb | from libtool import include_sub_folder_in_root_path
include_sub_folder_in_root_path(__file__, "approot", "libs")
|
995,802 | a4296789b90e4bb28695f51a7bfe7c24c16599d9 | """
Простейшая система проверки орфографии основана на использовании списка известных слов. Каждое слово в
проверяемом тексте ищется в этом списке и, если такое слово не найдено, оно помечается, как ошибочное.
Напишем подобную систему.
Через стандартный ввод подаётся следующая структура: первой строкой — количество dd записей в списке известных
слов, после передаётся dd строк с одним словарным словом на строку, затем — количество ll строк текста, после
чего — ll строк текста.
Напишите программу, которая выводит слова из текста, которые не встречаются в словаре. Регистр слов не
учитывается. Порядок вывода слов произвольный. Слова, не встречающиеся в словаре, не должны повторяться в
выводе программы.
Sample Input:
3
a
bb
cCc
2
a bb aab aba ccc
c bb aaa
Sample Output:
aab
aba
c
aaa
"""
known_words = {input().lower() for _ in range(int(input()))}
print(*{word
for _ in range(int(input()))
for word in input().lower().split() if word not in known_words
}, sep='\n')
|
995,803 | 9fa43c8c19e98adb9398cb80407eb8dea853341d | from datetime import date
class Game(object):
__slots__ = 'redPlayer', 'redScore', 'bluePlayer', 'blueScore', 'time', 'skillChangeToBlue', 'redPosChange', 'redPosAfter', 'bluePosChange', 'bluePosAfter', 'redAchievements', 'blueAchievements', 'deletedBy', 'deletedAt'
def __init__(self, redPlayer, redScore, bluePlayer, blueScore, time):
self.redPlayer = redPlayer
self.redScore = int(redScore)
self.bluePlayer = bluePlayer
self.blueScore = int(blueScore)
self.time = int(time)
self.skillChangeToBlue = 0
self.redPosChange = None
self.redPosAfter = None
self.bluePosChange = None
self.bluePosAfter = None
self.redAchievements = None
self.blueAchievements = None
self.deletedBy = None
self.deletedAt = None
def isDeleted(self):
return self.deletedAt is not None
def timeAsDate(self):
return date.fromtimestamp(self.time)
|
995,804 | 5b1387c9d1105b789a0c7f6b1e7a27fd154e6497 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'plugin_window.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
import PyQt5.QtGui as QtGui
import PyQt5.QtWidgets as QtWidgets
import PyQt5.QtCore as QtCore
class Ui_PluginWindow(object):
def setupUi(self, PluginWindow):
PluginWindow.setObjectName("PluginWindow")
PluginWindow.setWindowModality(QtCore.Qt.ApplicationModal)
PluginWindow.resize(482, 600)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(PluginWindow.sizePolicy().hasHeightForWidth())
PluginWindow.setSizePolicy(sizePolicy)
PluginWindow.setMinimumSize(QtCore.QSize(482, 600))
PluginWindow.setMaximumSize(QtCore.QSize(482, 600))
self.font = QtGui.QFont("Arial", 10)
self.font_header = QtGui.QFont("Arial", 10)
self.font_header.setBold(True)
self.centralwidget = QtWidgets.QWidget(PluginWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setObjectName("pushButton")
self.pushButton.setFont(self.font)
self.gridLayout.addWidget(self.pushButton, 1, 0, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.setFont(self.font)
self.gridLayout.addWidget(self.pushButton_2, 1, 1, 1, 1)
self.tableWidget = QtWidgets.QTableWidget(self.centralwidget)
self.tableWidget.setFocusPolicy(QtCore.Qt.NoFocus)
self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.tableWidget.verticalHeader().setVisible(False)
self.gridLayout.addWidget(self.tableWidget, 0, 0, 1, 2)
PluginWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(PluginWindow)
QtCore.QMetaObject.connectSlotsByName(PluginWindow)
self.pushButton_2.clicked.connect(PluginWindow.exit)
self.pushButton.clicked.connect(PluginWindow.start_plugin_window)
def retranslateUi(self, PluginWindow):
_translate = QtCore.QCoreApplication.translate
PluginWindow.setWindowTitle(_translate("PluginWindow", "Выбор плагинов"))
self.pushButton.setText(_translate("PluginWindow", "Запуск"))
self.pushButton_2.setText(_translate("PluginWindow", "Отмена"))
|
995,805 | aa2be557ae188b7ce93c3343ab149de290a4f07e | """ Utilties for casting numpy values in various ways
Most routines work round some numpy oddities in floating point precision and
casting. Others work round numpy casting to and from python ints
"""
from numbers import Integral
from platform import processor, machine
import numpy as np
from .testing import setup_test # noqa
class CastingError(Exception):
pass
# Test for VC truncation when casting floats to uint64
# Christoph Gohlke says this is so for MSVC <= 2010 because VC is using x87
# instructions; see:
# https://github.com/scipy/scipy/blob/99bb8411f6391d921cb3f4e56619291e91ddf43b/scipy/ndimage/tests/test_datatypes.py#L51
_test_val = 2**63 + 2**11 # Should be exactly representable in float64
TRUNC_UINT64 = np.float64(_test_val).astype(np.uint64) != _test_val
def float_to_int(arr, int_type, nan2zero=True, infmax=False):
""" Convert floating point array `arr` to type `int_type`
* Rounds numbers to nearest integer
* Clips values to prevent overflows when casting
* Converts NaN to 0 (for `nan2zero` == True)
Casting floats to integers is delicate because the result is undefined
and platform specific for float values outside the range of `int_type`.
Define ``shared_min`` to be the minimum value that can be exactly
represented in both the float type of `arr` and `int_type`. Define
`shared_max` to be the equivalent maximum value. To avoid undefined
results we threshold `arr` at ``shared_min`` and ``shared_max``.
Parameters
----------
arr : array-like
Array of floating point type
int_type : object
Numpy integer type
nan2zero : {True, False, None}
Whether to convert NaN value to zero. Default is True. If False, and
NaNs are present, raise CastingError. If None, do not check for NaN
values and pass through directly to the ``astype`` casting mechanism.
In this last case, the resulting value is undefined.
infmax : {False, True}
If True, set np.inf values in `arr` to be `int_type` integer maximum
value, -np.inf as `int_type` integer minimum. If False, set +/- infs
to be ``shared_min``, ``shared_max`` as defined above. Therefore False
gives faster conversion at the expense of infs that are further from
infinity.
Returns
-------
iarr : ndarray
of type `int_type`
Examples
--------
>>> float_to_int([np.nan, np.inf, -np.inf, 1.1, 6.6], np.int16)
array([ 0, 32767, -32768, 1, 7], dtype=int16)
Notes
-----
Numpy relies on the C library to cast from float to int using the standard
``astype`` method of the array.
Quoting from section F4 of the C99 standard:
If the floating value is infinite or NaN or if the integral part of the
floating value exceeds the range of the integer type, then the
"invalid" floating-point exception is raised and the resulting value
is unspecified.
Hence we threshold at ``shared_min`` and ``shared_max`` to avoid casting to
values that are undefined.
See: https://en.wikipedia.org/wiki/C99 . There are links to the C99
standard from that page.
"""
arr = np.asarray(arr)
flt_type = arr.dtype.type
int_type = np.dtype(int_type).type
# Deal with scalar as input; fancy indexing needs 1D
shape = arr.shape
arr = np.atleast_1d(arr)
mn, mx = shared_range(flt_type, int_type)
if nan2zero is None:
seen_nans = False
else:
nans = np.isnan(arr)
seen_nans = np.any(nans)
if not nan2zero and seen_nans:
raise CastingError('NaNs in array, nan2zero is False')
iarr = np.clip(np.rint(arr), mn, mx).astype(int_type)
if seen_nans:
iarr[nans] = 0
if not infmax:
return iarr.reshape(shape)
ii = np.iinfo(int_type)
iarr[arr == np.inf] = ii.max
if ii.min != int(mn):
iarr[arr == -np.inf] = ii.min
return iarr.reshape(shape)
# Cache range values
_SHARED_RANGES = {}
def shared_range(flt_type, int_type):
""" Min and max in float type that are >=min, <=max in integer type
This is not as easy as it sounds, because the float type may not be able to
exactly represent the max or min integer values, so we have to find the
next exactly representable floating point value to do the thresholding.
Parameters
----------
flt_type : dtype specifier
A dtype specifier referring to a numpy floating point type. For
example, ``f4``, ``np.dtype('f4')``, ``np.float32`` are equivalent.
int_type : dtype specifier
A dtype specifier referring to a numpy integer type. For example,
``i4``, ``np.dtype('i4')``, ``np.int32`` are equivalent
Returns
-------
mn : object
Number of type `flt_type` that is the minumum value in the range of
`int_type`, such that ``mn.astype(int_type)`` >= min of `int_type`
mx : object
Number of type `flt_type` that is the maximum value in the range of
`int_type`, such that ``mx.astype(int_type)`` <= max of `int_type`
Examples
--------
>>> shared_range(np.float32, np.int32) == (-2147483648.0, 2147483520.0)
True
>>> shared_range('f4', 'i4') == (-2147483648.0, 2147483520.0)
True
"""
flt_type = np.dtype(flt_type).type
int_type = np.dtype(int_type).type
key = (flt_type, int_type)
# Used cached value if present
try:
return _SHARED_RANGES[key]
except KeyError:
pass
ii = np.iinfo(int_type)
fi = np.finfo(flt_type)
mn = ceil_exact(ii.min, flt_type)
if mn == -np.inf:
mn = fi.min
mx = floor_exact(ii.max, flt_type)
if mx == np.inf:
mx = fi.max
elif TRUNC_UINT64 and int_type == np.uint64:
mx = min(mx, flt_type(2**63))
_SHARED_RANGES[key] = (mn, mx)
return mn, mx
# ----------------------------------------------------------------------------
# Routines to work out the next lowest representable integer in floating point
# types.
# ----------------------------------------------------------------------------
try:
_float16 = np.float16
except AttributeError: # float16 not present in np < 1.6
_float16 = None
class FloatingError(Exception):
pass
def on_powerpc():
""" True if we are running on a Power PC platform
Has to deal with older Macs and IBM POWER7 series among others
"""
return processor() == 'powerpc' or machine().startswith('ppc')
def type_info(np_type):
""" Return dict with min, max, nexp, nmant, width for numpy type `np_type`
Type can be integer in which case nexp and nmant are None.
Parameters
----------
np_type : numpy type specifier
Any specifier for a numpy dtype
Returns
-------
info : dict
with fields ``min`` (minimum value), ``max`` (maximum value), ``nexp``
(exponent width), ``nmant`` (significand precision not including
implicit first digit), ``minexp`` (minimum exponent), ``maxexp``
(maximum exponent), ``width`` (width in bytes). (``nexp``, ``nmant``,
``minexp``, ``maxexp``) are None for integer types. Both ``min`` and
``max`` are of type `np_type`.
Raises
------
FloatingError
for floating point types we don't recognize
Notes
-----
You might be thinking that ``np.finfo`` does this job, and it does, except
for PPC long doubles (https://github.com/numpy/numpy/issues/2669) and
float96 on Windows compiled with Mingw. This routine protects against such
errors in ``np.finfo`` by only accepting values that we know are likely to
be correct.
"""
dt = np.dtype(np_type)
np_type = dt.type
width = dt.itemsize
try: # integer type
info = np.iinfo(dt)
except ValueError:
pass
else:
return dict(min=np_type(info.min), max=np_type(info.max), minexp=None,
maxexp=None, nmant=None, nexp=None, width=width)
info = np.finfo(dt)
# Trust the standard IEEE types
nmant, nexp = info.nmant, info.nexp
ret = dict(min=np_type(info.min),
max=np_type(info.max),
nmant=nmant,
nexp=nexp,
minexp=info.minexp,
maxexp=info.maxexp,
width=width)
if np_type in (_float16, np.float32, np.float64,
np.complex64, np.complex128):
return ret
info_64 = np.finfo(np.float64)
if dt.kind == 'c':
assert np_type is np.longcomplex
vals = (nmant, nexp, width / 2)
else:
assert np_type is np.longdouble
vals = (nmant, nexp, width)
if vals in ((112, 15, 16), # binary128
(info_64.nmant, info_64.nexp, 8), # float64
(63, 15, 12), (63, 15, 16)): # Intel extended 80
return ret # these are OK without modification
# The remaining types are longdoubles with bad finfo values. Some we
# correct, others we wait to hear of errors.
# We start with float64 as basis
ret = type_info(np.float64)
if vals in ((52, 15, 12), # windows float96
(52, 15, 16)): # windows float128?
# On windows 32 bit at least, float96 is Intel 80 storage but operating
# at float64 precision. The finfo values give nexp == 15 (as for intel
# 80) but in calculations nexp in fact appears to be 11 as for float64
ret.update(dict(width=width))
return ret
if vals == (105, 11, 16): # correctly detected double double
ret.update(dict(nmant=nmant, nexp=nexp, width=width))
return ret
# Oh dear, we don't recognize the type information. Try some known types
# and then give up. At this stage we're expecting exotic longdouble or
# their complex equivalent.
if np_type not in (np.longdouble, np.longcomplex) or width not in (16, 32):
raise FloatingError('We had not expected type %s' % np_type)
if (vals == (1, 1, 16) and on_powerpc() and
_check_maxexp(np.longdouble, 1024)):
# double pair on PPC. The _check_nmant routine does not work for this
# type, hence the powerpc platform check instead
ret.update(dict(nmant=106, width=width))
elif (_check_nmant(np.longdouble, 52) and
_check_maxexp(np.longdouble, 11)):
# Got float64 despite everything
pass
elif (_check_nmant(np.longdouble, 112) and
_check_maxexp(np.longdouble, 16384)):
# binary 128, but with some busted type information. np.longcomplex
# seems to break here too, so we need to use np.longdouble and
# complexify
two = np.longdouble(2)
# See: https://matthew-brett.github.io/pydagogue/floating_point.html
max_val = (two ** 113 - 1) / (two ** 112) * two ** 16383
if np_type is np.longcomplex:
max_val += 0j
ret = dict(min=-max_val,
max=max_val,
nmant=112,
nexp=15,
minexp=-16382,
maxexp=16384,
width=width)
else: # don't recognize the type
raise FloatingError('We had not expected long double type %s '
'with info %s' % (np_type, info))
return ret
def _check_nmant(np_type, nmant):
""" True if fp type `np_type` seems to have `nmant` significand digits
Note 'digits' does not include implicit digits. And in fact if there are
no implicit digits, the `nmant` number is one less than the actual digits.
Assumes base 2 representation.
Parameters
----------
np_type : numpy type specifier
Any specifier for a numpy dtype
nmant : int
Number of digits to test against
Returns
-------
tf : bool
True if `nmant` is the correct number of significand digits, false
otherwise
"""
np_type = np.dtype(np_type).type
max_contig = np_type(2 ** (nmant + 1)) # maximum of contiguous integers
tests = max_contig + np.array([-2, -1, 0, 1, 2], dtype=np_type)
return np.all(tests - max_contig == [-2, -1, 0, 0, 2])
def _check_maxexp(np_type, maxexp):
""" True if fp type `np_type` seems to have `maxexp` maximum exponent
We're testing "maxexp" as returned by numpy. This value is set to one
greater than the maximum power of 2 that `np_type` can represent.
Assumes base 2 representation. Very crude check
Parameters
----------
np_type : numpy type specifier
Any specifier for a numpy dtype
maxexp : int
Maximum exponent to test against
Returns
-------
tf : bool
True if `maxexp` is the correct maximum exponent, False otherwise.
"""
dt = np.dtype(np_type)
np_type = dt.type
two = np_type(2).reshape((1,)) # to avoid upcasting
return (np.isfinite(two ** (maxexp - 1)) and
not np.isfinite(two ** maxexp))
def as_int(x, check=True):
""" Return python integer representation of number
This is useful because the numpy int(val) mechanism is broken for large
values in np.longdouble.
It is also useful to work around a numpy 1.4.1 bug in conversion of uints
to python ints.
This routine will still raise an OverflowError for values that are outside
the range of float64.
Parameters
----------
x : object
integer, unsigned integer or floating point value
check : {True, False}
If True, raise error for values that are not integers
Returns
-------
i : int
Python integer
Examples
--------
>>> as_int(2.0)
2
>>> as_int(-2.0)
-2
>>> as_int(2.1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
FloatingError: Not an integer: 2.1
>>> as_int(2.1, check=False)
2
"""
x = np.array(x)
if x.dtype.kind in 'iu':
# This works around a nasty numpy 1.4.1 bug such that:
# >>> int(np.uint32(2**32-1)
# -1
return int(str(x))
ix = int(x)
if ix == x:
return ix
fx = np.floor(x)
if check and fx != x:
raise FloatingError('Not an integer: %s' % x)
if not fx.dtype.type == np.longdouble:
return int(x)
# Subtract float64 chunks until we have all of the number. If the int is
# too large, it will overflow
ret = 0
while fx != 0:
f64 = np.float64(fx)
fx -= f64
ret += int(f64)
return ret
def int_to_float(val, flt_type):
""" Convert integer `val` to floating point type `flt_type`
Why is this so complicated?
At least in numpy <= 1.6.1, numpy longdoubles do not correctly convert to
ints, and ints do not correctly convert to longdoubles. Specifically, in
both cases, the values seem to go through float64 conversion on the way, so
to convert better, we need to split into float64s and sum up the result.
Parameters
----------
val : int
Integer value
flt_type : object
numpy floating point type
Returns
-------
f : numpy scalar
of type `flt_type`
"""
if flt_type is not np.longdouble:
return flt_type(val)
# The following works around a nasty numpy 1.4.1 bug such that:
# >>> int(np.uint32(2**32-1)
# -1
if not isinstance(val, Integral):
val = int(str(val))
faval = np.longdouble(0)
while val != 0:
f64 = np.float64(val)
faval += f64
val -= int(f64)
return faval
def floor_exact(val, flt_type):
""" Return nearest exact integer <= `val` in float type `flt_type`
Parameters
----------
val : int
We have to pass val as an int rather than the floating point type
because large integers cast as floating point may be rounded by the
casting process.
flt_type : numpy type
numpy float type.
Returns
-------
floor_val : object
value of same floating point type as `val`, that is the nearest exact
integer in this type such that `floor_val` <= `val`. Thus if `val` is
exact in `flt_type`, `floor_val` == `val`.
Examples
--------
Obviously 2 is within the range of representable integers for float32
>>> floor_exact(2, np.float32)
2.0
As is 2**24-1 (the number of significand digits is 23 + 1 implicit)
>>> floor_exact(2**24-1, np.float32) == 2**24-1
True
But 2**24+1 gives a number that float32 can't represent exactly
>>> floor_exact(2**24+1, np.float32) == 2**24
True
As for the numpy floor function, negatives floor towards -inf
>>> floor_exact(-2**24-1, np.float32) == -2**24-2
True
"""
val = int(val)
flt_type = np.dtype(flt_type).type
sign = 1 if val > 0 else -1
try: # int_to_float deals with longdouble safely
fval = int_to_float(val, flt_type)
except OverflowError:
return sign * np.inf
if not np.isfinite(fval):
return fval
info = type_info(flt_type)
diff = val - as_int(fval)
if diff >= 0: # floating point value <= val
return fval
# Float casting made the value go up
biggest_gap = 2**(floor_log2(val) - info['nmant'])
assert biggest_gap > 1
fval -= flt_type(biggest_gap)
return fval
def ceil_exact(val, flt_type):
""" Return nearest exact integer >= `val` in float type `flt_type`
Parameters
----------
val : int
We have to pass val as an int rather than the floating point type
because large integers cast as floating point may be rounded by the
casting process.
flt_type : numpy type
numpy float type.
Returns
-------
ceil_val : object
value of same floating point type as `val`, that is the nearest exact
integer in this type such that `floor_val` >= `val`. Thus if `val` is
exact in `flt_type`, `ceil_val` == `val`.
Examples
--------
Obviously 2 is within the range of representable integers for float32
>>> ceil_exact(2, np.float32)
2.0
As is 2**24-1 (the number of significand digits is 23 + 1 implicit)
>>> ceil_exact(2**24-1, np.float32) == 2**24-1
True
But 2**24+1 gives a number that float32 can't represent exactly
>>> ceil_exact(2**24+1, np.float32) == 2**24+2
True
As for the numpy ceil function, negatives ceil towards inf
>>> ceil_exact(-2**24-1, np.float32) == -2**24
True
"""
return -floor_exact(-val, flt_type)
def int_abs(arr):
""" Absolute values of array taking care of max negative int values
Parameters
----------
arr : array-like
Returns
-------
abs_arr : array
array the same shape as `arr` in which all negative numbers have been
changed to positive numbers with the magnitude.
Examples
--------
This kind of thing is confusing in base numpy:
>>> import numpy as np
>>> np.abs(np.int8(-128))
-128
``int_abs`` fixes that:
>>> int_abs(np.int8(-128))
128
>>> int_abs(np.array([-128, 127], dtype=np.int8))
array([128, 127], dtype=uint8)
>>> int_abs(np.array([-128, 127], dtype=np.float32))
array([ 128., 127.], dtype=float32)
"""
arr = np.array(arr, copy=False)
dt = arr.dtype
if dt.kind == 'u':
return arr
if dt.kind != 'i':
return np.absolute(arr)
out = arr.astype(np.dtype(dt.str.replace('i', 'u')))
return np.choose(arr < 0, (arr, arr * -1), out=out)
def floor_log2(x):
""" floor of log2 of abs(`x`)
Embarrassingly, from https://en.wikipedia.org/wiki/Binary_logarithm
Parameters
----------
x : int
Returns
-------
L : None or int
floor of base 2 log of `x`. None if `x` == 0.
Examples
--------
>>> floor_log2(2**9+1)
9
>>> floor_log2(-2**9+1)
8
>>> floor_log2(0.5)
-1
>>> floor_log2(0) is None
True
"""
ip = 0
rem = abs(x)
if rem > 1:
while rem >= 2:
ip += 1
rem //= 2
return ip
elif rem == 0:
return None
while rem < 1:
ip -= 1
rem *= 2
return ip
def best_float():
""" Floating point type with best precision
This is nearly always np.longdouble, except on Windows, where np.longdouble
is Intel80 storage, but with float64 precision for calculations. In that
case we return float64 on the basis it's the fastest and smallest at the
highest precision.
SPARC float128 also proved so slow that we prefer float64.
Returns
-------
best_type : numpy type
floating point type with highest precision
Notes
-----
Needs to run without error for module import, because it is called in
``ok_floats`` below, and therefore in setting module global ``OK_FLOATS``.
"""
try:
long_info = type_info(np.longdouble)
except FloatingError:
return np.float64
if (long_info['nmant'] > type_info(np.float64)['nmant'] and
machine() != 'sparc64'): # sparc has crazy-slow float128
return np.longdouble
return np.float64
def longdouble_lte_float64():
""" Return True if longdouble appears to have the same precision as float64
"""
return np.longdouble(2**53) == np.longdouble(2**53) + 1
# Record longdouble precision at import because it can change on Windows
_LD_LTE_FLOAT64 = longdouble_lte_float64()
def longdouble_precision_improved():
""" True if longdouble precision increased since initial import
This can happen on Windows compiled with MSVC. It may be because libraries
compiled with mingw (longdouble is Intel80) get linked to numpy compiled
with MSVC (longdouble is Float64)
"""
return not longdouble_lte_float64() and _LD_LTE_FLOAT64
def have_binary128():
""" True if we have a binary128 IEEE longdouble
"""
try:
ti = type_info(np.longdouble)
except FloatingError:
return False
return (ti['nmant'], ti['maxexp']) == (112, 16384)
def ok_floats():
""" Return floating point types sorted by precision
Remove longdouble if it has no higher precision than float64
"""
# copy float list so we don't change the numpy global
floats = np.sctypes['float'][:]
if best_float() != np.longdouble and np.longdouble in floats:
floats.remove(np.longdouble)
return sorted(floats, key=lambda f: type_info(f)['nmant'])
OK_FLOATS = ok_floats()
def able_int_type(values):
""" Find the smallest integer numpy type to contain sequence `values`
Prefers uint to int if minimum is >= 0
Parameters
----------
values : sequence
sequence of integer values
Returns
-------
itype : None or numpy type
numpy integer type or None if no integer type holds all `values`
Examples
--------
>>> able_int_type([0, 1]) == np.uint8
True
>>> able_int_type([-1, 1]) == np.int8
True
"""
if any([v % 1 for v in values]):
return None
mn = min(values)
mx = max(values)
if mn >= 0:
for ityp in np.sctypes['uint']:
if mx <= np.iinfo(ityp).max:
return ityp
for ityp in np.sctypes['int']:
info = np.iinfo(ityp)
if mn >= info.min and mx <= info.max:
return ityp
return None
def ulp(val=np.float64(1.0)):
""" Return gap between `val` and nearest representable number of same type
This is the value of a unit in the last place (ULP), and is similar in
meaning to the MATLAB eps function.
Parameters
----------
val : scalar, optional
scalar value of any numpy type. Default is 1.0 (float64)
Returns
-------
ulp_val : scalar
gap between `val` and nearest representable number of same type
Notes
-----
The wikipedia article on machine epsilon points out that the term *epsilon*
can be used in the sense of a unit in the last place (ULP), or as the
maximum relative rounding error. The MATLAB ``eps`` function uses the ULP
meaning, but this function is ``ulp`` rather than ``eps`` to avoid
confusion between different meanings of *eps*.
"""
val = np.array(val)
if not np.isfinite(val):
return np.nan
if val.dtype.kind in 'iu':
return 1
aval = np.abs(val)
info = type_info(val.dtype)
fl2 = floor_log2(aval)
if fl2 is None or fl2 < info['minexp']: # subnormal
fl2 = info['minexp']
# 'nmant' value does not include implicit first bit
return 2**(fl2 - info['nmant'])
|
995,806 | caffc9d81b149022e431d6ac1871215e2549b15a | # Jeff Heaton's Kaggle Utilities
# Copyright 2019 by Jeff Heaton, Open Source, Released under the Apache License
# For more information: https://github.com/jeffheaton/jh-kaggle-util
#
# Train from a SK-Learn model.
from util import *
import tensorflow as tf
import tensorflow.contrib.learn as learn
import scipy.stats
import numpy as np
import time
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostRegressor
import sklearn
from sklearn.ensemble import RandomForestRegressor
class TrainSKLearn(TrainModel):
def __init__(self, data_set, name, alg, run_single_fold):
super().__init__(data_set, run_single_fold)
self.name=name
self.alg=alg
self.early_stop = 50
self.params = str(alg)
def train_model(self, x_train, y_train, x_val, y_val):
print("Training SKLearn model: {}".format(self.alg))
x_train = x_train.values.astype(np.float32)
y_train = y_train.values.astype(np.int32)
#x_val = x_val.as_matrix().astype(np.float32)
#y_val = y_val.as_matrix().astype(np.int32)
self.alg.fit(x_train, y_train)
self.steps = 0
#self.classifier = clr.best_iteration
return self.alg
def predict_model(self, clr, x):
x = x.values.astype(np.float32)
if FIT_TYPE == FIT_TYPE_REGRESSION:
return clr.predict(x)
else:
pred = clr.predict_proba(x)
pred = np.array([v[1] for v in pred])
return pred
# "all the time" to "always"
# reall short ones that are dead wrong
# [100] train-logloss:0.288795 eval-logloss:0.329036
# [598] train-logloss:0.152968 eval-logloss:0.296854
# [984] train-logloss:0.096444 eval-logloss:0.293915
n_trees = 100
n_folds = 3
# https://www.analyticsvidhya.com/blog/2015/06/tuning-random-forest-model/
alg_list = [
['rforest',RandomForestRegressor(n_estimators=1000, n_jobs=-1, max_depth=3, criterion='mae')],
['extree',ExtraTreesClassifier(n_estimators = 1000,max_depth=2)],
['adaboost',AdaBoostRegressor(base_estimator=None, n_estimators=600, learning_rate=1.0, random_state=20160703)],
['knn', sklearn.neighbors.KNeighborsRegressor(n_neighbors=5)]
]
start_time = time.time()
for name,alg in alg_list:
train = TrainSKLearn("1",name,alg,False)
train.run()
train = None
elapsed_time = time.time() - start_time
print("Elapsed time: {}".format(hms_string(elapsed_time))) |
995,807 | 08a74c33db1fb5fb0236fa487313183a612d7588 | from flask import Flask, send_from_directory, request, render_template, jsonify
from flask_restful import Api, Resource, reqparse
from flask_cors import CORS #comment this on deployment
from api.HelloApiHandler import HelloApiHandler
from flask_mysqldb import MySQL
from flask_sqlalchemy import SQLAlchemy
import datetime
from flask_marshmallow import Marshmallow
app = Flask(__name__)#, static_url_path='', static_folder='hackathon/build')
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:''@localhost/flask'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class Patients(db.Model):
__tablename__ = 'patients'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
dob = db.Column(db.DateTime, default=datetime.datetime.now)
notes = db.Column(db.Text(), default="")
doctor_id = db.Column(db.Integer, db.ForeignKey('doctors.id'))
doctor = db.relationship('Doctors', cascade='all,delete-orphan', single_parent=True, backref=db.backref('patients', lazy='joined'))
def __init__(self, name):
self.name = name
class Doctors(db.Model):
__tablename__ = 'doctors'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
def __init__(self, name):
self.name = name
class PatientSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Patients
include_fk = True
class DoctorSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Doctors
patient_schema = PatientSchema()
patients_schema = PatientSchema(many=True)
doctor_schema = DoctorSchema()
# CORS(app) #comment this on deployment
# api = Api(app)
@app.route("/get", methods = ['GET'])
def get_patients():
all_patients = Patients.query.all()
results = patients_schema.dump(all_patients)
return jsonify(results)
@app.route("/get/<id>", methods = ['GET'])
def post_details(id):
patient = Patients.query.get(id)
return patient_schema.jsonify(patient)
@app.route("/patients/<id>", methods = ['GET'])
def doctor_patients(id):
patients = Patients.query.filter(Patients.doctor_id == id)
results = patients_schema.dump(patients)
return jsonify(results)
@app.route("/add", methods = ['POST'])
def add_patient():
name = request.json['name']
patient = Patients(name)
db.session.add(patient)
db.session.commit()
return patient_schema.jsonify(patient)
@app.route("/update/<id>", methods = ['PUT'])
def update_patient(id):
patient = Patients.query.get(id)
name = request.json['name']
patient.name = name
db.session.commit()
return patient_schema.jsonify(patient)
@app.route("/delete/<id>", methods = ['DELETE'])
def article_delete(id):
patient = Patients.query.get(id)
db.session.delete(patient)
db.session.commit()
if __name__ == "__main__":
app.run(debug=True)
# @app.route('/form')
# def form():
# return render_template('form.html')
# @app.route('/login', methods = ['POST', 'GET'])
# def login():
# if request.method == 'GET':
# return "Login via the login Form"
# if request.method == 'POST':
# name = request.form['name']
# age = request.form['age']
# cursor = mysql.connection.cursor()
# cursor.execute(''' INSERT INTO info_table VALUES(%s,%s)''',(name,age))
# mysql.connection.commit()
# cursor.close()
# return f"Done!!"
# app.run(host='localhost', port=5000)
# api.add_resource(HelloApiHandler, '/flask/hello') |
995,808 | a430a1ddf11e4eae8af9617b340a374536589973 | # Escreva um programa q converta uma temperatura digitando em graus Celsius e converta para graus Fahrenheit.
tc = float(input('Informe a temperatura em ºC: '))
tf = (tc / 5) * 9 + 32
print('A temperatura em Fahrenheit é {}ºF.'.format(tf))
|
995,809 | 3b3e521aa0cbc162facaceae37c93ab03f218eca | from __future__ import print_function
import sys, time
class ProgressBar:
"""
Example:
pb = ProgressBar(n)
for i in range(n):
do_some_action()
pb.advance()
"""
def __init__(self, size, **opt):
prompt = opt.get('prompt', 'Progress: ')
self.size = size
self.unit = size / 100.0
self.percent = -1
self.index = 0
self.done = False
self.start = time.time()
self.text = '\r' + prompt + '%d%% '
self.callback = opt.get('callback', None)
def advance(self, n=None):
if self.done:
return
if n is None:
self.index += 1
else:
self.index = n
p = int(self.index / self.unit)
if p > self.percent:
sys.stdout.write(self.text % p)
sys.stdout.flush()
self.percent = p
self.seconds = time.time() - self.start
if self.callback:
self.callback(self)
if self.index >= self.size - 1:
sys.stdout.write((self.text + '\n') % 100)
sys.stdout.flush()
self.done = True
t = time.time() - self.start
print("Time: %.2f seconds" % t)
|
995,810 | 0b759ad46bd5683f95e4df58acb36f450b1f70fe |
from ..handlers import action_handler
from ..buttonpress import load_state
# _______________________________________________________________________________________________________________________________
# Entity is an object that represent a single attempt by the computer to complete Sonic 3 and Knunkles
#
# Each Entity have the following attributes to them:
# action_list: This is a list of the actions, represented by an action object,
# that an entity took during its attempt at the game
# fitness: This represent the fitness of an entity and is used to determine
# how easily the entity will be able to reproduce during reproduction
# generation: This represent the generation the entity belongs to
# parents: This is a list that contains the parents to the entity.
# Will be empty if the entity belongs to Gen 0
# alive: This represent the entity's living status.
# Used to stop entities run if it dies during the game
# dnacap: This is the number of action an entities is allowed to take
# before its in game attempt is over
# master_ent: The entity that is used to play out an action sequences that
# was evaluated as being the fittess set of actions from previous
# generations' play attempts.
# _______________________________________________________________________________________________________________________________
class Entity:
# _______________________________________________________________________________________________________________________________
def __init__(self, name, act_list=None, parents=None, dna_cap=7):
self.name = name
self.action_list = act_list or []
self.fitness = 0
self.generation = 0
self.parents = parents
self.alive = True
self.dnacap = dna_cap
self.master_ent = None
# ______________________________________________________________________________________________________________________________
def __str__(self):
return f"""
Entity {self.name} Overall Stats:
Fitness Score: {self.fitness}
Generation: {self.generation}
Parents to this Entity: {self.parents}
Entity's Status: {self.alive}
"""
# ______________________________________________________________________________________________________________________________
# This function handles an entity's attempt at Sonic 3 & Knuckles
def play_game(self):
print(f' {self.name} is Training....')
if self.isAlive():
load_state()
# Master plays actions out first is it has been created
if self.getMasterEntity() is not None:
if self.getMasterEntity().getActionList() != []:
action_handler.master_driver(self.getMasterEntity())
# Then if previous action were created those action will be played out
if self.getActionList() != []:
action_handler.replay_driver(self)
# Else new action must be generated for this entity
else:
action_handler.generate_driver(self)
else:
print(f' A dead entity cannot play')
# For an entity that either prematurly died during the action generation step
if len(self.getActionList()) != self.getDNACap():
print(' Entity did not produce enough DNA\n Giving Entity A Second Chace...')
self.setActionList([])
self.resurrect()
self.play_game()
print('')
# _____________________________________________________________________________________________________________________________
# Getter for alive attribute
def isAlive(self):
return self.alive
# ______________________________________________________________________________________________________________________________
# Sets alive variable to false if an entity died in game
def died(self):
print(f' {self.name} is dead\n')
self.alive = False
# ___________________________________________________________________________________________________________________________
# Sets alive variable to true if an enitity needs to reattempt the game
def resurrect(self):
self.alive = True
# -----------------Getter methods for all Entity attributes(except alive)---------------
def getActionList(self):
return self.action_list
def getName(self):
return self.name
def getGeneration(self):
return self.generation
def getFitness(self):
return self.fitness
def getParents(self):
return self.parents
def getDNACap(self):
return self.dnacap
def getMasterEntity(self):
return self.master_ent
# ------------------Getter methods for all Entity attributes(except alive)----------------------------------------------
def setActionList(self, new_act_list):
self.action_list = new_act_list
def setFitness(self, new_fitness):
self.fitness = new_fitness
def setParents(self, new_parents):
self.parents = new_parents
def setName(self, new_name):
self.name = new_name
def setGeneration(self, new_gen):
self.generation = new_gen
def setDNACap(self, new_dnacap):
self.dnacap = new_dnacap
def setMasterEntity(self, new_masterentity):
self.master_ent = new_masterentity
|
995,811 | d41b111c9495029fd531ada841e4f09faccbf6c8 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import configparser
#读写配置文件类
class Config():
def __init__(self, file='config\config.ini'):
self.file = file
self.config = configparser.ConfigParser()
self.config.read(file, encoding="utf-8") # utf-8-sig
# 读取
def get(self, section='', key=''):
try:
return self.config[section][key]
except configparser.DuplicateSectionError:
return False
# 设置
def set(self, section='', key='', value=''):
try:
list = self.config.sections()
if (section in list):
pass
else:
self.config.add_section(section)
self.config.set(section, key, value)
except configparser.DuplicateSectionError:
pass
# 写入
self.config.write(open(self.file, "w", encoding="utf-8"))
if __name__ == '__main__':
pass
|
995,812 | 6fe994ec528290deb7135050b56340c246d8563e | # -*- coding: utf-8 -*-
"""
Created on Thu May 9 16:00:20 2019
@author: Dinesh Prajapat
"""
user_input = 2, 4, 7, 8, 9, 12
empty_tuple = tuple(user_input)
new_list = list(user_input)
print(new_list)
print(empty_tuple)
|
995,813 | 97735ab99719a1d00736ad63a3a6cb20448b39a3 | import argparse
import sys
import enum
import numpy as np
from typing import Tuple
from PIL import Image, ImageDraw, ImageFont
class Color(enum.IntEnum):
white = 255
black = 0
def _create_fitting_image(text: str, font: ImageFont.ImageFont, bordersize: int, fill_color: Color = Color.white) -> Image.Image:
"""
Create an image that fits the given text.
:param text: Text to draw on image
:param font: PIL.ImageFont instance
:param bordersize: Size of the border around the text in pixels
:param fill_color: Color of the image background
:return: PIL.Image instance large enough to fit text on it with a distance border_size to the border of the image
in every direction
"""
img_size = (1, 1)
img = Image.new("L", img_size, color=fill_color)
draw = ImageDraw.Draw(img)
left, top, right, bottom = draw.textbbox((0, 0), text, font)
textwidth, textheight = right - left, bottom - top
img_size = (textwidth + 2 * bordersize, textheight + 2 * bordersize)
img = img.resize(img_size)
return img
def _draw_text_outline(img: Image.Image, font: ImageFont.ImageFont, text: str, fill_color: Color, line_color: Color, origin: Tuple[int, int]) -> None:
"""
Draw text on image with an outline around the text.
:param img: PIL.Image instance on which to draw
:param font: PIL.ImageFont to use for drawing the text
:param text: String to draw on image
:param fill_color: Color of the text
:param line_color: Color of the outline
:param origin: (xy) coordinate tuple where the top left point of the text bounding box should be placed
"""
draw = ImageDraw.Draw(img)
# set to render text unaliased (https://mail.python.org/pipermail/image-sig/2005-August/003497.html)
draw.fontmode = "1"
# correct for font offset
x_offset, y_offset, _, _ = font.getbbox(text)
# These values were tweaked by hand to get a better centered text
x, y = (origin[0]-2*x_offset, origin[1]-y_offset//2)
origin = (x, y)
# draw border
draw.text((x - 1, y), text, font=font, fill=line_color)
draw.text((x + 1, y), text, font=font, fill=line_color)
draw.text((x, y - 1), text, font=font, fill=line_color)
draw.text((x, y + 1), text, font=font, fill=line_color)
# draw text over it
draw.text(origin, text, font=font, fill=fill_color)
def text_mask(text: str, fontsize: int, bordersize: int = 32, invert: bool = False) -> Image.Image:
"""
Create mask image from text. A mask image is used during maze creation to mark areas where the algorithm
won't go. Black pixels mark cells the algorithm won't move into, i.e. the represent walls.
:param text: String to draw on image. Image will be created with the correct size to fit the text.
:param fontsize: Font size to use for the text.
:param bordersize: Thickness of space around the text bounding box to the image border in pixels.
:param invert: If False (default), the letters will be white inside, with a black outline,
so a maze can be generated inside them.
If true, the letters will be full black and the maze can be generated around the text.
:return: PIL.Image instance with the text drawn as specified.
"""
text = text.lower()
if invert:
fill_color = Color.black
line_color = Color.white
else:
fill_color = Color.white
line_color = Color.black
try:
# TODO add option to load other font
font = ImageFont.truetype("fonts/Unicorn.ttf", size=fontsize)
except IOError:
sys.exit("Please place Unicorn.ttf, containing the Unicorn font made by Nick Curtis in the fonts folder.")
img = _create_fitting_image(text, font, bordersize)
_draw_text_outline(img, font, text, fill_color, line_color, origin=(bordersize, bordersize))
return img
def text_mask_to_boolarray(img: Image, invert: bool = False):
arr = np.array(img)
# Compare with threshold to get bool array
bool_mask = arr >= 128
return bool_mask
def find_start(img: Image):
x, y = 0, int(img.height / 2)
# Go right until the outline is hit
while img.getpixel((x, y)) == Color.white:
x += 1
# Go over the outline into the text
while img.getpixel((x, y)) == Color.black:
x += 1
return x, y
def save_image_to_disk(image: Image.Image, filename: str) -> None:
"""
Save given image
:param image: PIL.Image instance to save.
:param filename: Filename under which to save the image.
:return:
"""
image.save(filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate a mask image to be used with the maze generator."
" This script creates a mask image from the text string."
"Areas in black will be treated as walls by the algorithm, acting as a"
"border of the maze generation.")
parser.add_argument("text", type=str, help="Text to be used as mask. Will be converted to lower case")
parser.add_argument("-f", "--filename", type=str, default="mask.png",
help="Filename under which the mask image will be saved. ")
parser.add_argument("-s", "--fontsize", type=int, default=32,
help="Font size in points to use for text. A size <16 will be too small for letters to connect"
" with the default Unicorn font.")
parser.add_argument("-b", "--bordersize", type=int, default=32,
help="Thickness of space around the text bounding box to the image border in pixels.")
parser.add_argument("-i", "--invert", action="store_true",
help="Flag argument. If not specified (default) the letters will be white with a black border. "
"This means the maze can be generated within the letters. "
"Otherwise, if this option is specified, the letters will be the completely black and the "
"maze will be generated around them.")
args = parser.parse_args()
if not args.filename.endswith(".png"):
args.filename = f"{args.filename}.png"
img = text_mask(args.text, args.fontsize, args.bordersize, args.invert)
save_image_to_disk(img, args.filename)
# TODO: add script that converts an image to the expected format: bw, 8bpp
|
995,814 | 3924627d6d08dcbbb5b675debc4528703b77d14e | #!/usr/bin/env python
"""Aligns reads in FASTQ files to reference genome."""
import argparse
import subprocess
import sys
import glob
__author__ = 'Alexis Blanchet-Cohen'
__date__ = '2013-01-10'
# Parse arguments
parser = argparse.ArgumentParser(description='Aligns reads in FASTQ files to reference genome.')
parser.add_argument("-g", "--genome", help="reference genome")
parser.add_argument("-s", "--stranded", help="strandedness", default="yes")
parser.add_argument("-t", "--trimmed", help="FASTQ files trimmed", default="no")
parser.add_argument("-p", "--processors", help="number of processes")
args = parser.parse_args()
#Set the path to the genome and annotations
if(args.genome == "GRCh37"):
args.genomePath = "/stockage/genomes/Homo_sapiens/Ensembl/GRCh37"
elif(args.genome == "hg19"):
args.genomePath = "/stockage/genomes/Homo_sapiens/UCSC/hg19"
elif(args.genome == "NCBIM37"):
args.genomePath = "/stockage/genomes/Mus_musculus/Ensembl/NCBIM37"
elif(args.genome == "GRCm38"):
args.genomePath = "/stockage/genomes/Mus_musculus/Ensembl/GRCm38"
elif(args.genome == "mm10"):
args.genomePath = "/stockage/genomes/Mus_musculus/UCSC/mm10"
elif(args.genome == "sacCer3"):
args.genomePath = "/stockage/genomes/Saccharomyces_cerevisiae/UCSC/sacCer3"
elif(args.genome == "dm3"):
args.genomePath = "/stockage/genomes/Drosophila_melanogaster/UCSC/dm3"
elif(args.genome == "BDGP5.25"):
args.genomePath = "/stockage/genomes/Drosophila_melanogaster/Ensembl/BDGP5.25"
elif(args.genome == "ce10"):
args.genomePath = "/stockage/genomes/Caenorhabditis_elegans/UCSC/ce10"
elif(args.genome == "umd3"):
args.genomePath = "/stockage/genomes/Bos_taurus/Ensembl/UMD3.1"
else:
sys.exit("The genome " + args.genome + " does not exist")
os.mkdir("tophat")
os.chdir("tophat");
# Recover the filenames and the corresponding filenames.
sampleNamesFile = open ("../sampleNames.txt");
fileNames = []
sampleNames = []
for line in sampleNamesFile:
line = line.rstrip() # Remove trailing whitespace.
if line: # Only process non-empty lines.
fields = line.split('\t');
fileNames.append(fields[0])
samplesNames.append(fields[1]
sampleNamesFile.close()
##########
# TopHat #
##########
# Cycle through all the sample names.
for i in range(0,fileNames.length()/2):
sampleName=sampleNames[i*2];
fileNameR1 = fileNames[i*2];
fileNameR2 = fileNames[i*2+1];
# Make the sample directory for the TopHat output
os.mkdirs("../../tophat/" + sampleName);
tophatScript = open(tophatScriptName, "w")
tophatScript.write("tophat2 --rg-library \"L\" --rg-platform \"ILLUMINA\" --rg-platform-unit \"X\" --rg-sample \"" + sampleName + "\" --rg-id \"runX\"");
if (args.genome == "ce10"):
tophatScript.write(" --min-intron-length 30 --max-intron-length 30000");
tophatScript.write(" --no-novel-juncs -p " + args.processors);
if (stranded == "stranded"):
tophatScript.write(" --library-type fr-firststrand");
tophatScript.write(" -G args.genomePath/Annotation/Genes/genes.gtf");
tophatScript.write(" -o ../../tophat/" + sampleName);
tophatScript.write(" args.genomePath/Sequence/Bowtie2Index/genome");
if (args.trimmed == "untrimmed"):
tophatScript.write(" ../../../FASTQ/untrimmed/" + fileNameR1);
tophatScript.write(" ../../../FASTQ/untrimmed/" + fileNameR2);
else:
tophatScript.write(" ../../../FASTQ/trimmed/" + fileNameR1);
tophatScript.write(" ../../../FASTQ/trimmed/" + fileNameR2);
tophatScript.write(TOPHAT " &> " + tophatScriptName + ".sh_output");
tophatScript.close()
subprocess.call("submitJobs.py");
|
995,815 | 4f0c56ea84d8fa76b4872151f5bc48905df4aa08 | from pymysql import escape_string
from app import mysql
def lookup_beer(search_term, is_id):
"""Queries the MySQL database for a specific beer by name or id"""
attr = 'id' if is_id is True else 'name'
query = """
SELECT `id`, `name`, `styleid`, `abv`
FROM `beers`
WHERE `%s` = '%s'
""" % (attr, escape_string(search_term))
return mysql.query(query)
|
995,816 | 0d3cc66d381d0ea4e45f65b6e42ce5ca96087a56 | import itertools
import os
# import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
print("TF version:", tf.__version__)
print("Hub version:", hub.__version__)
print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE")
module_selection = ("efficientnet", 224)
handle_base, pixels = module_selection
MODULE_HANDLE ="https://tfhub.dev/google/{}/b0/classification/1".format(handle_base)
IMAGE_SIZE = (pixels, pixels)
print("Using {} with input size {}".format(MODULE_HANDLE, IMAGE_SIZE))
from config import BATCH_SIZE
def prepare_data():
data_dir = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
datagen_kwargs = dict(rescale=1./255, validation_split=.20)
dataflow_kwargs = dict(target_size=IMAGE_SIZE, batch_size=BATCH_SIZE,
interpolation="bilinear")
valid_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
**datagen_kwargs)
valid_generator = valid_datagen.flow_from_directory(
data_dir, subset="validation", shuffle=True, **dataflow_kwargs)
do_data_augmentation = False
if do_data_augmentation:
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=40,
horizontal_flip=True,
width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.2, zoom_range=0.2,
**datagen_kwargs)
else:
train_datagen = valid_datagen
train_generator = train_datagen.flow_from_directory(
data_dir, subset="training", shuffle=True, **dataflow_kwargs)
return train_generator, valid_generator
def build_model(n_classes, do_fine_tuning = False):
model = tf.keras.Sequential([
# Explicitly define the input shape so the model can be properly
# loaded by the TFLiteConverter
tf.keras.layers.InputLayer(input_shape=IMAGE_SIZE + (3,)),
hub.KerasLayer(MODULE_HANDLE, trainable=do_fine_tuning),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(n_classes,
kernel_regularizer=tf.keras.regularizers.l2(0.0001))
])
model.build((None,)+IMAGE_SIZE+(3,))
# model.summary()
model.compile(
optimizer=tf.keras.optimizers.SGD(lr=0.005, momentum=0.9),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True, label_smoothing=0.1),
metrics=['accuracy'])
return model
def train(model, train_generator, valid_generator, do_fine_tuning = False):
print("Building model with", MODULE_HANDLE)
steps_per_epoch = train_generator.samples // train_generator.batch_size
validation_steps = valid_generator.samples // valid_generator.batch_size
hist = model.fit(
train_generator,
epochs=5, steps_per_epoch=steps_per_epoch,
validation_data=valid_generator,
validation_steps=validation_steps).history
from utils import save_model
def get_class_string_from_index(index):
for class_string, class_index in valid_generator.class_indices.items():
if class_index == index:
return class_string
def inference(model, valid_generator):
x, y = next(valid_generator)
image = x[0, :, :, :]
true_index = np.argmax(y[0])
# plt.imshow(image)
# plt.axis('off')
# plt.show()
# Expand the validation image to (1, 224, 224, 3) before predicting the label
prediction_scores = model.predict(np.expand_dims(image, axis=0))
predicted_index = np.argmax(prediction_scores)
print("True label: " + get_class_string_from_index(true_index))
print("Predicted label: " + get_class_string_from_index(predicted_index))
if __name__ == "__main__":
train_generator, valid_generator = prepare_data()
model = build_model(n_classes=train_generator.num_classes)
# train(model, train_generator, valid_generator)
save_model(model)
inference(model, valid_generator)
|
995,817 | 43a4c3e05754ee0951ebc0acdf03fafde4b4cac8 | from os import path
import sqlite3
import csv
import glob
fitnotes_db_path = '~/Downloads/unmodified.fitnotes'
data_csv = '~/Downloads/withings.csv'
# "2015/02/01"
def date_format_(input_date):
(m, d, y) = input_date.split('/')
while len(m) < 2:
m = '0' + m
while len(d) < 2:
d = '0' + d
return '%s-%s-%s 12:00:00' % (y, m, d)
# "2014-08-29 6:20 PM"
def date_format(input_date):
(date, time, am_pm) = input_date.split(' ')
(m, d, y) = date.split('-')
while len(m) < 2:
m = '0' + m
while len(d) < 2:
d = '0' + d
return '%s-%s-%s 12:00:00' % (y, m, d)
# CREATE TABLE BodyWeight
# (
# _id INTEGER PRIMARY KEY AUTOINCREMENT,
# date TEXT NOT NULL,
# body_weight_metric REAL NOT NULL,
# body_fat REAL NOT NULL,
# comments TEXT
# )
def new_data(path):
with open(path, 'r') as file:
reader = csv.DictReader(file)
for line in reader:
pounds = float(line['Weight'])
date = date_format(line['Date'])
kilos = pounds * \
0.45359290943563975
yield 'INSERT INTO BodyWeight (date, body_weight_metric, body_fat, comments) VALUES ("%s" , %.14f, 0.0, "")' % (date, kilos)
csv_files = glob.glob(path.expanduser(data_csv))
data_path = csv_files[0]
dbname = path.expanduser(fitnotes_db_path)
conn = sqlite3.connect(dbname)
c = conn.cursor()
for command in new_data(data_path):
c.execute(command)
conn.commit()
conn.close()
|
995,818 | 00f92c618753214f3e1ac3fdd6d0bc18f1e9c22e | from solutions import is_number_palindrome
import unittest
class TestIsNumberPalindrome(unittest.TestCase):
# Test a palindrome with an even number of digits
def test_positive_even(self):
testnum = 123321
expected = True
actual = is_number_palindrome(testnum)
self.assertEqual(expected, actual)
# Test a palindrome with an odd number of digits
def test_positive_odd(self):
testnum = 123454321
expected = True
actual = is_number_palindrome(testnum)
self.assertEqual(expected, actual)
# Test a non-palindrome with an even number of digits
def test_negative_even(self):
testnum = 1234
expected = False
actual = is_number_palindrome(testnum)
self.assertEqual(expected, actual)
# Test a non-palindrome with an odd number of digits
def test_negative_odd(self):
testnum = 123
expected = False
actual = is_number_palindrome(testnum)
self.assertEqual(expected, actual)
# Test a one-digit number (every one-digit number is a palindrome)
def test_one_digit(self):
testnum = 3
expected = True
actual = is_number_palindrome(testnum)
self.assertEqual(expected, actual)
# 0 is a palindrome
def test_0(self):
testnum = 0
expected = True
actual = is_number_palindrome(testnum)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
995,819 | 6035decd2dc1320b50c3b598144d881aafb76bb3 | import json
import pytest
from app import app
from app.tests.user_test import login_as_head_company
from app.api.department.apiview import department_read_by_pk, department_update_by_pk
@pytest.fixture
def client():
app.config['TESTING'] = True
with app.app_context():
with app.test_client() as client:
yield client
def test_login(client):
payload = login_as_head_company()
response_login = client.post('/user/login/', headers={"Content-Type": "application/json"}, data=payload)
assert response_login.content_type == 'application/json'
def test_read(client):
payload = login_as_head_company()
response_login = client.post('/user/login/', headers={"Content-Type": "application/json"}, data=payload)
data_login = json.loads(response_login.data)
response_read = client.get('/department/read/',
headers=dict(Authorization='Bearer ' + json.loads(response_login.data)['auth_token']))
assert response_login.content_type == 'application/json'
assert type(data_login['auth_token']) == str
assert response_login.status_code == 200
assert response_read.status_code == 200
def test_read_one(client):
payload = login_as_head_company()
response_login = client.post('/user/login/', headers={"Content-Type": "application/json"}, data=payload)
data_login = json.loads(response_login.data)
response_read = client.get('/department/read/1/',
headers=dict(Authorization='Bearer ' + json.loads(response_login.data)['auth_token']))
results = department_read_by_pk(1)
mock_request_data = {'department': results}
assert response_login.content_type == 'application/json'
assert type(data_login['auth_token']) == str
assert response_login.status_code == 200
assert response_read.json == mock_request_data
assert response_read.status_code == 200
def test_update_by_pk(client):
payload = login_as_head_company()
response_login = client.post('/user/login/', headers={"Content-Type": "application/json"}, data=payload)
data_login = json.loads(response_login.data)
response_update = client.put('/department/update/1/', data = json.dumps(dict(name="Department1",office_id=1)),
content_type='application/json',
headers=dict(Authorization='Bearer ' + json.loads(response_login.data)['auth_token']))
assert response_login.content_type == 'application/json'
assert type(data_login['auth_token']) == str
assert response_login.status_code == 200
assert response_update.json == {'message': 'department updated successfully'}
assert response_update.status_code == 200
|
995,820 | 315928f7b2d2330c40318a01b29eda624cb82ea1 | #!/usr/bin/python
"""
This program to take the messages from Kafka and do read repair for Scylla.
"""
__author__ = "Anshita Saxena"
__copyright__ = "(c) Copyright IBM 2020"
__contributors__ = "Benu Mohta, Meghnath Saha"
__credits__ = ["BAT DMS IBM Team"]
__email__ = "anshita333saxena@gmail.com"
__status__ = "Production"
# Import the required libraries
# Import the sys library to parse the arguments
import sys
# Import the parsing library
import configparser
# Import the json library
import json
# Import the logging library
import logging
import logging.config
# Import traceback library
import traceback
# Import the time library
import time
# Import requests for slack access
import requests
# Import the required concurrency libraries
from six.moves import queue
# Import the Scylla libraries
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
from cassandra import ConsistencyLevel
# Import the Kafka libraries
from kafka import KafkaConsumer
from kafka import TopicPartition
# Initialising the configparser object to parse the properties file
CONFIG = configparser.ConfigParser()
global failed_msg
# Set the logging criteria for the generated logs
LOGFILENAME = '/root/kafka-read-repair-automation/logs/read_repair_automation.log'
logging.config.fileConfig(fname='/root/kafka-read-repair-automation/conf/log_config.conf',
defaults={'logfilename': LOGFILENAME},
disable_existing_loggers=False)
# Get the logger specified in the file
logger = logging.getLogger(__name__)
def set_env(p_app_config_file):
"""
:param p_app_config_file:
:return user, password, node, port, topic_name, kafka_servers, group_id,
enable_auto_commit, auto_offset_reset, slack_token, slack_channel_name,
control_connection_timeout, connect_timeout, max_partition_fetch_bytes,
keyspace, concurency_level:
"""
user = None
password = None
node = None
port = None
topic_name = None
kafka_servers = None
group_id = None
enable_auto_commit = None
auto_offset_reset = None
slack_token = None
slack_channel_name = None
keyspace = None
control_connection_timeout = None
connect_timeout = None
max_partition_fetch_bytes = None
concurency_level = None
try:
# Reading configuration parameters from .ini file.
# print(p_app_config_file)
CONFIG.read(p_app_config_file)
# Username for Scylla DB Database
user = CONFIG['ApplicationParams']['user']
user = str(user)
# Password for Scylla DB Database
password = CONFIG['ApplicationParams']['password']
password = str(password)
# Server IP addresses for Scylla DB Nodes
nodes = CONFIG['ApplicationParams']['node']
nodes = str(nodes)
node = nodes.split(",")
# Port for Scylla DB Connection
port = CONFIG['ApplicationParams']['port']
port = int(port)
# Message captures from Kafka Topic Name for reading from DB Table
topic_name = CONFIG['ApplicationParams']['topic_name']
topic_name = str(topic_name)
# Server IP Addresses for Kafka Nodes
kafka_servers = CONFIG['ApplicationParams']['bootstrap_servers']
kafka_servers = str(kafka_servers)
# Consumer Name of Kafka Topic
group_id = CONFIG['ApplicationParams']['group_id']
group_id = str(group_id)
# Disable auto commit parameter to ensure the message is processed
enable_auto_commit = CONFIG['ApplicationParams']['enable_auto_commit']
enable_auto_commit = eval(enable_auto_commit)
"""
Set the auto offset reset to earliest to process the latest message
always
"""
auto_offset_reset = CONFIG['ApplicationParams']['auto_offset_reset']
auto_offset_reset = str(auto_offset_reset)
# Slack connection to Server
slack_token = CONFIG['ApplicationParams']['slack_token']
slack_token = str(slack_token)
# Slack channel name
slack_channel_name = CONFIG['ApplicationParams']['slack_channel_name']
slack_channel_name = str(slack_channel_name)
# Scylla DB keyspace name (Schema name)
keyspace = CONFIG['ApplicationParams']['keyspace']
keyspace = str(keyspace)
# Set control connection timeout for Scylla DB
control_connection_timeout = CONFIG['ApplicationParams']['\
control_connection_timeout']
control_connection_timeout = float(control_connection_timeout)
# Set connection timeout for Scylla DB
connect_timeout = CONFIG['ApplicationParams']['connect_timeout']
connect_timeout = int(connect_timeout)
# Maximum bytes retrieved from Kafka Topic
max_partition_fetch_bytes = CONFIG['ApplicationParams']['\
max_partition_fetch_bytes']
max_partition_fetch_bytes = int(max_partition_fetch_bytes)
# Number of queries that can be executed concurrently
concurency_level = CONFIG['ApplicationParams']['concurency_level']
concurency_level = int(concurency_level)
except Exception as e:
raise Exception('Exception encountered in set_env() while '
'setting up application configuration parameters.')
return \
user, password, node, port, topic_name, kafka_servers, group_id, \
enable_auto_commit, auto_offset_reset, slack_token, \
slack_channel_name, keyspace, control_connection_timeout, \
connect_timeout, max_partition_fetch_bytes, concurency_level
def extract_command_params(arguments):
"""
Passing arguments from command line.
"""
# There should be only one argument
if len(arguments) != 2:
raise Exception('Illegal number of arguments. '
'Usage: '
'python read_repair_utility.py conf/parameter.ini')
app_config_file = arguments[1]
return app_config_file
def scylla_connection(user, password, node, port, keyspace,
control_connection_timeout, connect_timeout):
"""
:param connect_timeout:
:param control_connection_timeout:
:param keyspace:
:param port:
:param node:
:param password:
:param user:
:return query_status:
"""
global auth_provider, cluster, session, table_a_stmt, table_b_stmt, \
table_c_stmt, table_d_stmt, table_e_stmt
# Creating a ScyllaDB connection
if user:
auth_provider = PlainTextAuthProvider(username=user,
password=password)
cluster = Cluster(
auth_provider=auth_provider,
contact_points=node, port=port,
connect_timeout=connect_timeout,
control_connection_timeout=control_connection_timeout)
else:
cluster = Cluster(
contact_points=node, port=port,
connect_timeout=connect_timeout,
control_connection_timeout=control_connection_timeout)
session = cluster.connect()
failed_msg = 0
# Creating the prepared statements and setting the consistency level
session.default_consistency_level = ConsistencyLevel.ALL
# table_a_stmt is the table name in Scylla DB
# columna is the primary key column in table_a_stmt
table_a_stmt = session.prepare(
"SELECT * FROM " + keyspace + ".tablea WHERE columna=?")
table_a_stmt.consistency_level = ConsistencyLevel.ALL
# table_b_stmt is the table name in Scylla DB
# columnb is the primary key column in table_b_stmt
table_b_stmt = session.prepare(
"SELECT * FROM " + keyspace + ".tableb WHERE columnb=?")
table_b_stmt.consistency_level = ConsistencyLevel.ALL
# table_c_stmt is the table name in Scylla DB
# columnc is the primary key column in table_c_stmt
table_c_stmt = session.prepare(
"SELECT * FROM " + keyspace + ".tablec WHERE columnc=?")
table_c_stmt.consistency_level = ConsistencyLevel.ALL
# table_d_stmt is the table name in Scylla DB
# columnd is the primary key column in table_d_stmt
table_d_stmt = session.prepare(
"SELECT * FROM " + keyspace + ".tabled WHERE columnd=?")
table_d_stmt.consistency_level = ConsistencyLevel.ALL
# table_e_stmt is the table name in Scylla DB
# columne is the primary key column in table_e_stmt
table_e_stmt = session.prepare(
"SELECT * FROM " + keyspace + ".tablee WHERE columne =?")
table_e_stmt.consistency_level = ConsistencyLevel.ALL
logging.info('Cluster Connection setup is done!')
def scylla_operation(items, concurency_level):
# Bound statement for the Async Messages
execution_stmt = []
# Loading message in JSON object
query_status = []
# print('Message processing: ' + items.value)
message_json_info = 'Message processing: ' + items.value
logging.info(message_json_info)
json_message = json.loads(items.value)
# Loading IDs and bind them in the statement
columnas = json_message['columna']
columna_info = 'Number of IDs: ' + str(len(columnas))
logging.info(columna_info)
if len(columnas) > 0:
for columna in columnas:
# Id values are same for tablea and tableb
tablea_lookup_stmt = table_a_stmt.bind([columna])
execution_stmt.append(tablea_lookup_stmt)
table_a_b_lookup_stmt = table_b_stmt.bind([columna])
execution_stmt.append(table_a_b_lookup_stmt)
# Loading AUIs and bind them in the statement
columncs = json_message['columnc']
columnc_info = 'Number of IDs: ' + str(len(columncs))
logging.info(columnc_info)
condition_a_field = json_message['condition_a_field']
condition_b_field = json_message['condition_b_field']
if len(columncs) > 0:
for columnc in columncs:
tablec_lookup_stmt = table_c_stmt.bind([columnc])
execution_stmt.append(tablec_lookup_stmt)
# Id values are same for tableb and tablec
table_b_c_lookup_stmt = table_b_stmt.bind([columnc])
execution_stmt.append(table_b_c_lookup_stmt)
if condition_b_field or condition_a_field == 'field_value_a':
tabled_lookup_stmt = table_d_stmt.bind([columnc])
execution_stmt.append(tabled_lookup_stmt)
# Loading Principal ID and bind it into the statement
if condition_a_field == 'field_value_b':
principal_id = json_message['condition_c_field']
tablee_lookup_stmt = table_e_stmt.bind([principal_id])
execution_stmt.append(tablee_lookup_stmt)
"""
Clear Queue Function to clear the queries from the object
once the desired CONCURRENCY LEVEL reached
"""
def clear_queue():
while True:
try:
futures.get_nowait().result()
except queue.Empty:
logger.debug(traceback.format_exc())
failed_msg = 1
break
# To check the start time of query execution
start = time.time()
# Execute queries with user provided concurrency level
futures = queue.Queue(maxsize=concurency_level)
try:
for stmt in execution_stmt:
try:
# Execute asynchronously
future = session.execute_async(stmt)
logging.info(stmt)
except Exception as e:
failed_msg = 1
logging.error(
'Exception occurred at executing the asynchronous query')
logger.debug(traceback.format_exc())
try:
futures.put_nowait(future)
except queue.Full:
clear_queue()
futures.put_nowait(future)
clear_queue()
query_status.append("passed")
except Exception as e:
query_status.append("failed")
failed_msg = 1
logging.error('Exception occurred at executing the message queries')
logger.debug(traceback.format_exc())
# To check the end time of query execution
end = time.time()
logging.info("Query Execution Finished for the Message!")
query_finish_info = 'Finished executing queries with a concurrency level' \
' of ' + str(concurency_level) + ' in ' \
+ str(end - start)
logging.info(query_finish_info)
return query_status
def process_messages(user, password, node, port, topic_name, kafka_servers,
group_id, enable_auto_commit, auto_offset_reset,
slack_token, slack_channel_name, keyspace,
control_connection_timeout, connect_timeout,
max_partition_fetch_bytes, concurency_level):
"""
:param max_partition_fetch_bytes:
:param concurency_level:
:param connect_timeout:
:param control_connection_timeout:
:param slack_channel_name:
:param slack_token:
:param keyspace:
:param auto_offset_reset:
:param enable_auto_commit:
:param group_id:
:param kafka_servers:
:param topic_name:
:param port:
:param node:
:param password:
:param user
:return:
"""
# Creating a Scylla Connection
scylla_connection(
user, password, node, port, keyspace,
control_connection_timeout, connect_timeout)
# Creating the Kafka Consumer Group
kafka_consumer = KafkaConsumer(
bootstrap_servers=(kafka_servers),
group_id=group_id,
enable_auto_commit=enable_auto_commit,
auto_offset_reset=auto_offset_reset,
max_partition_fetch_bytes=max_partition_fetch_bytes)
logging.info('Kafka Consumer setup is done!')
total_lag = 0
failed_msg = 0
total_message_processed = 0
lag = 0
try:
# print(kafka_consumer)
partition_lag = {}
# Checking the Kafka lag
for partition in kafka_consumer.partitions_for_topic(topic_name):
tp = TopicPartition(topic_name, partition)
kafka_consumer.assign([tp])
committed = kafka_consumer.committed(tp)
logging.info(
'Committed: ' + str(committed) + ' for partition: ' +
str(partition))
"""
When topic received the first message.
In kafka-python library, if there is no prior message committed
then it assigns NONE instead of 0."""
if committed is None:
logging.info('Executed Committed None!')
committed = 0
logging.info(
'Committed: ' + str(committed) + ' for partition: ' +
str(partition))
kafka_consumer.seek_to_end(tp)
last_offset = kafka_consumer.position(tp)
logging.info(
'End offset: ' + str(last_offset) + ' for partition: ' +
str(partition))
# Calculate the Kafka Topic lag here
if last_offset is not None and committed is not None:
lag = last_offset - committed
partition_lag[partition] = lag
partition_info = 'group: ' + str(group_id) \
+ ' topic: ' + str(topic_name) \
+ ' partition: ' + str(partition) \
+ ' lag: ' + str(lag)
logging.info(partition_info)
if lag > 0:
total_lag += lag
# Starting message processing partition by partition
logging.info('Message processing started partition by partition!')
for partition, lag in partition_lag.items():
partition_lag_info = 'Processing on partition: ' \
+ str(partition) \
+ ' having lag: ' \
+ str(lag)
logging.info(partition_lag_info)
if lag > 0:
tp = TopicPartition(topic_name, partition)
kafka_consumer.assign([tp])
committed = kafka_consumer.committed(tp)
kafka_consumer.position(tp)
logging.info(
'Committed: ' + str(committed) + ' for partition: ' +
str(partition))
"""
When topic received the first message.
In kafka-python library, if there is no prior message committed
then it assigns NONE instead of 0.
"""
if committed is None:
logging.info('Executed Committed None!')
committed = 0
# print(current_position)
committed_current_info = 'Message Committed: ' + str(committed)
logging.info(committed_current_info)
message_count = 0
end_offset = committed + lag
"""
Run the following loop for every message to read (process) them
in Scylla DB.
"""
for msg in kafka_consumer:
start = time.time()
current_position = kafka_consumer.position(tp)
message_count_lag_position_info = 'Message Count: ' \
+ str(message_count) \
+ ', lag: ' \
+ str(lag) \
+ 'and current ' \
'position: ' \
+ str(current_position)
logging.info(message_count_lag_position_info)
if message_count >= lag:
break
elif message_count < lag:
message_start_info = 'Message processing started for' \
' partition= ' + str(partition) \
+ ' at position= ' \
+ str(current_position)
logging.info(message_start_info)
"""Sending message to the Scylla DB function to
create queries and fire them at the provided
concurrency level.
"""
query_status = scylla_operation(msg, concurency_level)
if "failed" not in query_status:
logging.info("Message Processed!!!")
kafka_consumer.commit()
total_message_processed += 1
else:
logging.info("Message not processed!!!")
failed_msg = 1
end = time.time()
message_end_info = 'Message processing ended for ' \
'partition= ' + str(partition) \
+ ' at position= ' \
+ str(current_position) \
+ ' with total time= ' \
+ str(end - start)
logging.info(message_end_info)
message_count += 1
if end_offset == current_position:
break
logging.info('Message processing ended partition by partition!')
except Exception as e:
print("Exception:::: ", e)
failed_msg = 1
logging.error('Exception occurred at executing the message queries')
logger.debug(traceback.format_exc())
finally:
print("Message Failed or not:- ", failed_msg)
if total_message_processed > 0 and total_lag > 0 and failed_msg == 0:
# Create a message to be posted in Slack
message_on_slack = 'Read Replication done on ' \
+ str(total_message_processed) \
+ ' messages in Production Environment in FRA'
# Create a body that needs to be delivered in Slack
data = {
'token': slack_token,
'channel': slack_channel_name,
'text': message_on_slack
}
logging.info("Process Ran Successfully!!!")
"""
Post the message in Slack for Success where there are messages
processed
"""
requests.post(
url='https://slack.com/api/chat.postMessage',
data=data)
if total_message_processed == 0 and total_lag == 0 and failed_msg == 0:
message_on_slack = 'Read Replication triggered and found zero \
messages for processing at FRA DC Production'
data = {
'token': slack_token,
'channel': slack_channel_name,
'text': message_on_slack
}
"""
Post the message in Slack for Success where there is no message
processed
"""
requests.post(
url='https://slack.com/api/chat.postMessage',
data=data)
if failed_msg == 1:
message_on_slack = 'Read Replication failed in FRA DC Production\
Environment'
data = {
'token': slack_token,
'channel': slack_channel_name,
'text': message_on_slack
}
logging.info("Process Failed!!!!")
"""
Post the message in Slack for Success where there is a failure
in functionality
"""
requests.post(
url='https://slack.com/api/chat.postMessage',
data=data)
# Shut down consumer
kafka_consumer.close()
# Shut down cassandra cluster
cluster.shutdown()
def main():
"""
Usage: python read_repair_utility.py conf/parameter.ini
:return:
"""
try:
logging.info("==== Processing Started ====")
# Extract command line parameters
p_app_config_file = extract_command_params(sys.argv)
# Set environment
user, password, node, port, topic_name, kafka_servers, group_id, \
enable_auto_commit, auto_offset_reset, slack_token, \
slack_channel_name, keyspace, control_connection_timeout, \
connect_timeout, max_partition_fetch_bytes, \
concurency_level = set_env(p_app_config_file)
# Process Messages
process_messages(user, password, node, port, topic_name, kafka_servers,
group_id, enable_auto_commit, auto_offset_reset,
slack_token, slack_channel_name, keyspace,
control_connection_timeout, connect_timeout,
max_partition_fetch_bytes, concurency_level)
logging.info("==== Processing Ended ====")
except Exception as e:
logging.error('Exception message in main thread::::')
logging.error(e)
raise Exception('Exception message in main thread::::', e)
if __name__ == '__main__':
main()
logging.shutdown()
|
995,821 | bfa93c0a4632170846ee10ba43030cd14a9c8d8d | import torch
from os import path
class L2Loss(torch.nn.Module):
def forward(self, output, target):
"""
L2 Loss
@output: torch.Tensor((B,C))
@target: torch.Tensor((B,C))
@return: torch.Tensor((,))
"""
return torch.mean((output - target) ** 2)
class MLPModel(torch.nn.Module):
def __init__(self, input_dim, output_dim, hidden_dims=[]):
super().__init__()
c = input_dim
layers = []
for dim in hidden_dims:
layers.append(torch.nn.Linear(c, dim))
layers.append(torch.nn.ReLU())
c = dim
layers.append(torch.nn.Linear(c, output_dim))
self.seq = torch.nn.Sequential(*layers)
def forward(self, x):
"""
@x: torch.Tensor((B,3,64,64))
@return: torch.Tensor((B,6))
"""
return self.seq(x.view(x.shape[0], -1))
model_factory = {
'mlp': MLPModel,
}
def save_model(model):
for n, m in model_factory.items():
if isinstance(model, m):
return torch.save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))
raise ValueError("model type '%s' not supported!" % str(type(model)))
def load_model(model):
r = model_factory[model]()
r.load_state_dict(torch.load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))
return r
|
995,822 | a063c1d7cae5c34f791f976ad2fc5b1be26d92db | # $language = "python"
# $interface = "1.0"
import os
import sys
import logging
import csv
# Add script directory to the PYTHONPATH so we can import our modules (only if run from SecureCRT)
if 'crt' in globals():
script_dir, script_name = os.path.split(crt.ScriptFullName)
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
else:
script_dir, script_name = os.path.split(os.path.realpath(__file__))
# Now we can import our custom modules
from securecrt_tools import scripts
from securecrt_tools import utilities
# Create global logger so we can write debug messages from any function (if debug mode setting is enabled in settings).
logger = logging.getLogger("securecrt")
logger.debug("Starting execution of {0}".format(script_name))
# ################################################ SCRIPT LOGIC ###################################################
def script_main(session):
"""
| SINGLE device script
| Morphed: Gordon Rogier grogier@cisco.com
| Framework: Jamie Caesar jcaesar@presidio.com
This script will capture the WLC AireOS mobility summary and returns an output list
:param session: A subclass of the sessions.Session object that represents this particular script session (either
SecureCRTSession or DirectSession)
:type session: sessions.Session
"""
# Get script object that owns this session, so we can check settings, get textfsm templates, etc
script = session.script
# Start session with device, i.e. modify term parameters for better interaction (assuming already connected)
session.start_cisco_session()
# Validate device is running a supported OS
session.validate_os(["AireOS"])
# Get additional information we'll need
get_mobility_group(session, to_cvs=True)
# Return terminal parameters back to the original state.
session.end_cisco_session()
def get_mobility_group(session, to_cvs=False):
"""
A function that captures the WLC AireOS mobility summary and returns an output list
:param session: The script object that represents this script being executed
:type session: session.Session
:return: A list of mobility group peers
:rtype: list of lists
"""
send_cmd = "show mobility summary"
output_raw = session.get_command_output(send_cmd)
# TextFSM template for parsing "show ap summary" output
template_file = session.script.get_template("cisco_aireos_show_mobility_summary.template")
output = utilities.textfsm_parse_to_list(output_raw, template_file, add_header=True)
if to_cvs:
output_filename = session.create_output_filename("mobility-group", ext=".csv")
utilities.list_of_lists_to_csv(output, output_filename)
return output
# ################################################ SCRIPT LAUNCH ###################################################
# If this script is run from SecureCRT directly, use the SecureCRT specific class
if __name__ == "__builtin__":
# Initialize script object
crt_script = scripts.CRTScript(crt)
# Get session object for the SecureCRT tab that the script was launched from.
crt_session = crt_script.get_main_session()
# Run script's main logic against our session
try:
script_main(crt_session)
except Exception:
crt_session.end_cisco_session()
raise
# Shutdown logging after
logging.shutdown()
# If the script is being run directly, use the simulation class
elif __name__ == "__main__":
# Initialize script object
direct_script = scripts.DebugScript(os.path.realpath(__file__))
# Get a simulated session object to pass into the script.
sim_session = direct_script.get_main_session()
# Run script's main logic against our session
script_main(sim_session)
# Shutdown logging after
logging.shutdown()
|
995,823 | 1ca858d8e28f640fce3a04cf554d3493890d0031 | import scrapy
import json
from datetime import datetime
from pymongo import MongoClient
from bson.objectid import ObjectId
from datetime import datetime
from blockchain.spiders.parse_tx import satoshi_to_btc, insert_tx
client = MongoClient('mongodb://localhost:27017')
db = client.btc
class BlockSpider(scrapy.Spider):
name = "blockchaininfo_blocks"
def start_requests(self):
print('NUM URLS TO SCRAPE', db.block_pointers_url.find({'retrieved': False}).count())
for url in list(db.block_pointers_url.find({'retrieved': False})):
yield scrapy.Request(url=url['url'], callback=self.parse, meta={'obj': url})
def parse(self, response):
blocks = json.loads(response.body)
orig_obj = response.meta['obj']
for obj in blocks['blocks']:
obj['dt'] = datetime.fromtimestamp(obj['time'])
obj['retrieved_details'] = False
try:
db.block_pointers.insert_many([obj])
except:
pass
orig_obj['retrieved'] = True
orig_obj['last_scraped'] = datetime.now()
db.block_pointers_url.replace_one({'_id': ObjectId(orig_obj['_id'])}, orig_obj, upsert=True) |
995,824 | d6ab7dc33f2ae69c622eb0646a530a092abe9248 | from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Conv1D, GlobalMaxPooling1D, BatchNormalization
from gensim.models import KeyedVectors
from keras.callbacks import *
import numpy as np
from linguistic_style_transfer.linguistic_style_transfer_model.utils import data_processor
options = {
"text_file_path": '/home/mdomrachev/work_rep/content_vs_style_problem/linguistic_style_transfer/dataset_forming/data_set/train.txt',
"label_file_path": '/home/mdomrachev/work_rep/content_vs_style_problem/linguistic_style_transfer/dataset_forming/data_set/train_labels.txt',
"vocab_size": 40000,
"training_epochs": 100,
"fastext": "/home/mdomrachev/Data/cc.ru.300.vec"
}
w2v_vectors = KeyedVectors.load_word2vec_format(options['fastext'], binary=False)
[word_index, x, _, _, _] = \
data_processor.get_text_sequences(
options['text_file_path'],
options['vocab_size'],
'/home/mdomrachev/work_rep/content_vs_style_problem/linguistic_style_transfer/author_identification/vocab')
vocabulary_index_sorted = sorted([(w, word_index[w]) for w in word_index], key= lambda x: x[1], reverse=False)
vectors = []
cover_voc = 0
base_vector = np.zeros(300)
for t in vocabulary_index_sorted:
try:
vectors.append(w2v_vectors[t[0]])
cover_voc += 1
except KeyError:
vectors.append(base_vector)
vectors = np.array(vectors)
print('create matrix: %s; cover_voc: %s' % (vectors.shape, cover_voc), '\n')
x = np.asarray(x)
[y, _] = data_processor.get_labels(options['label_file_path'], store_labels=False, one_hot_encode=False)
shuffle_indices = np.random.permutation(np.arange(len(x)))
# shuffle_indices = [i for i in shuffle_indices if i != max(shuffle_indices)]
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
# Split train/test set
dev_sample_index = -1 * int(0.01 * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary Size: {:d}".format(options['vocab_size']))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
model = Sequential()
model.add(Embedding(input_dim=len(word_index),
output_dim=300,
input_length=15,
weights=[vectors],
mask_zero=False,
trainable=False))
model.add(Dropout(0.5))
model.add(Conv1D(filters=1024,
kernel_size=5,
padding='valid',
activation='relu',
strides=1))
model.add(GlobalMaxPooling1D())
model.add(BatchNormalization())
model.add(Dense(256, activation='relu'))
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(1, activation='sigmoid'))
model.layers[1].trainable=False
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss',
patience=15,
verbose=0,
mode='auto')
print('Train...')
model.fit(x_train, y_train,
batch_size=128,
epochs=50,
validation_data=(x_dev, y_dev),
shuffle=True,
callbacks=[early_stopping])
# score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
# print('Test score:', score)
# print('Test accuracy:', acc)
|
995,825 | 31fee1a4f67cf9778ff97503263c1ab856e531c7 | import pika
class Publish(object):
def __init__(self,host,msg,queue):
self.host = host
self.comm = pika.BlockingConnection(pika.ConnectionParameters(host=self.host,heartbeat=30))
self.msg = msg
self.queue = queue
def send(self):
self.channel = self.comm.channel()
self.channel.queue_declare(queue=self.queue)
self.channel.basic_publish(exchange='', routing_key=self.queue, body=self.msg)
print("["+self.queue+"] : message: "+self.msg)
def close_comm(self):
self.comm.close()
|
995,826 | 660fe2506a679700deb8ae44ecabeed04aede1bf | #Desenvolva um programa que pergunte a distância de uma viagem em Km. Calcule o preço da passagem, cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 parta viagens mais longas.
viagem = float(input('Digite a distancia da viagem: '))
if viagem <= 200:
passagem = 0.50 * viagem
print('Sua viagem curta custou {} R$'.format(passagem))
else:
passagem = 0.45 * viagem
print('Sua viagem longa custou {} R$'.format(passagem))
|
995,827 | 8867551e9e17bbb830e28277d502fa19e4f9e23d | #!/usr/bin/env python
#
from __future__ import print_function
import rospy
from sensor_msgs.msg import JointState
from markers import *
from functions import *
import numpy as np
# Initialize the node
rospy.init_node("testKineControlPosition")
print('starting motion ... ')
# Publisher: publish to the joint_states topic
pub = rospy.Publisher('joint_states', JointState, queue_size=10)
# Files for the logs
fxcurrent = open("/home/user/proyecto_ws/src/proy/graphs/xcurrent.txt", "w")
fxdesired = open("/home/user/proyecto_ws/src/proy/graphs/xdesired.txt", "w")
fq = open("/home/user/proyecto_ws/src/proy/graphs/q.txt", "w")
# Markers for the current and desired positions
bmarker_current = BallMarker(color['RED'])
bmarker_desired = BallMarker(color['GREEN'])
# Joint names
jnames = ["base_joint", "hombro_joint", "antebrazo_joint", "ante_brazo_joint", "brazo_joint", "brazo_mano_joint", "mano_joint"]
# Desired position
xd = np.array([0.34, 0.225, 0.1491])
# Initial configuration
q0 = np.array([pi/4, pi/4, pi/4, pi/4, pi/4, pi/4, pi/4])
# Resulting initial position (end effector with respect to the base link)
T = fkine(q0)
x0 = T[0:3,3]
# Red marker shows the achieved position
bmarker_current.xyz(x0)
# Green marker shows the desired position
bmarker_desired.xyz(xd)
# Instance of the JointState message
jstate = JointState()
# Values of the message
jstate.header.stamp = rospy.Time.now()
jstate.name = jnames
# Add the head joint value (with value 0) to the joints
jstate.position = q0
# Frequency (in Hz) and control period
freq = 200
dt = 1.0/freq
rate = rospy.Rate(freq)
# Initial joint configuration
q = copy(q0)
# Control Gain
k = .9
epsilon = 0.002
# Main loop
while not rospy.is_shutdown():
# Current time (needed for ROS)
jstate.header.stamp = rospy.Time.now()
# Kinematic control law for position (complete here)
# -----------------------------
x = fkine(q)[0:3,3]
e = x-xd
print(np.linalg.norm(e))
de = -k*e
J = jacobian_position(q)
if np.linalg.matrix_rank(J)<3:
dq = np.linalg.pinv(J).dot(de)
else:
dq = np.linalg.pinv(J).dot(de)
q = q+dt*dq
# -----------------------------
# Log values
fxcurrent.write(str(x[0])+' '+str(x[1]) +' '+str(x[2])+'\n')
fxdesired.write(str(xd[0])+' '+str(xd[1])+' '+str(xd[2])+'\n')
fq.write(str(q[0])+" "+str(q[1])+" "+str(q[2])+" "+str(q[3])+" "+
str(q[4])+" "+str(q[5])+" "+str(q[6])+"\n")
# Publish the message
jstate.position = q
pub.publish(jstate)
bmarker_desired.xyz(xd)
bmarker_current.xyz(x)
if np.linalg.norm(e) < epsilon:
break
# Wait for the next iteration
rate.sleep()
print('ending motion ...')
fxcurrent.close()
fxdesired.close()
fq.close()
|
995,828 | f05b88949c1984783f4bf656ce6e619f5fabe5ed | from django.apps import AppConfig
class UpsFrontendConfig(AppConfig):
name = 'ups_frontend'
|
995,829 | eca5b52a239d539795a68ddd6eca8c4daa0c85df | """
EXPLICIT STRING CASTING
b'foo bar' : results bytes in Python 3
'foo bar' : results str
r'foo bar' : results so called raw string, where escaping special characters is
not necessary, everything is taken verbatim as you typed
"""
normal_string = 'pawel \n loves \t coding'
print(normal_string)
##pawel
## loves coding
raw_string = r'pawel \n loves \t coding'
print(raw_string)
##pawel \n loves \t coding
|
995,830 | 7d8e030cc81cae524b980e8c20fb9cf14d65fbaa | #!/usr/bin/python
'''
(C) 2010-2013 ICM UW. All rights reserved.
'''
import shutil
import time
import re
import os
import sys
###############################################
######## DEFINITION BLOCK #####################
###############################################
def copyFilesNeeded(propsDir,directory,compilation_time, copylist):
shutil.copy(sys.argv[1], directory)
proj_props = ''
idx = sys.argv[2].rfind('/')
if idx !=-1:
proj_props = sys.argv[2][idx+1:]
else:
proj_props = sys.argv[2]
fr = open(sys.argv[2],'r')
fw = open(directory+'/'+proj_props,'w')
fw.write('COMPILATION_TIME='+compilation_time+'\n'+fr.read())
fw.close();fr.close()
os.makedirs(directory+'/results')
shutil.copy(sys.argv[3], directory)
currDir = os.getcwd()
os.chdir(sys.argv[2][:idx])
for di,fils in copylist.items():
fulldi = directory+di+'/'
for fi in fils:
a = os.path.realpath(fi);b = fulldi
if os.path.isdir(a):
shutil.copytree(a, b)
else:
os.makedirs(fulldi)
shutil.copy(a,b)
os.chdir(currDir)
def readFileToString(path):
file_ = open(sys.argv[1],'r')
text_ = file_.read()
file_.close()
return text_
def removeComments(text):
text = re.sub('#[^\n]*','',text)
text = re.sub(re.compile('\'\'\'.*?\'\'\'',re.DOTALL),'',text)
return text
def getCommandLineProps(allParams):
chrumprops = {}
for par in allParams[4:]:
if par.startswith('-D') and par.find('=') != -1 and par.find('=') != 2:
chrumprops[par[2:].split('=')[0]]=par[2:].split('=')[1]
return chrumprops
def readChrumProps(chrumPropsText,otherPropsDict):
copylist = {}
keywords = {}
for line in chrumPropsText.split('\n'):
line = line.strip()
if line.find('=') != -1:
for match in re.finditer('\$\{([^\}]+)\}',line,re.IGNORECASE):
if otherPropsDict.has_key(match.group(1)):
line = line.replace('${'+match.group(1)+'}',otherPropsDict[match.group(1)])
else:
raise Exception('Undefined parameter \''+match.group(1)+'\' in chrum-properties-file. \
\nFirst provide parameter value, then use it.')
if any((line.split('=')[0] == 'HDFS',
line.split('=')[0]=='LOCAL',
line.split('=')[0]=='PROJECT',
line.split('=')[0]=='OOZIE_SERVER',
line.split('=')[0]=='OOZIE_PORT')):
keywords[line.split('=')[0]]=line.split('=')[1]
else:
otherPropsDict[line.split('=')[0]]=line.split('=')[1]
elif line.find('<-') != -1:
to = line.split('<-')[0].strip()
which = line.split('<-')[1].split(',')
which = map(str.strip,which)
copylist[to]=which
return otherPropsDict, copylist, keywords
def main(args):
otherprops = {}
if len(args) > 4:
otherprops = getCommandLineProps(args)
chrumpropstext = removeComments(readFileToString(args[1]))
chrumprops, copylist, keywords = readChrumProps(chrumpropstext,otherprops)
propsDir = os.path.dirname(os.path.realpath(args[1]))
propsDir = propsDir[:propsDir.rindex('/')]
compilation_time = str(time.time())
root_dir = '/tmp/chrum/'+compilation_time+'/'+keywords['PROJECT']+'/'+compilation_time+'/'
directory = root_dir+'/default/'
os.makedirs(directory)
copyFilesNeeded(propsDir,directory,compilation_time,copylist)
return compilation_time, keywords, chrumprops, root_dir
|
995,831 | 2a79d3c141eba52295394dfb740774a0351dafa6 | # -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
from algosec.models import ChangeRequestTrafficLine, ChangeRequestAction
from resilient_circuits import function, FunctionResult, StatusMessage
from algosec_resilient.components.algosec_base_component import AlgoSecComponent
logger = logging.getLogger(__name__)
class AlgoSecIsolateHostFromNetwork(AlgoSecComponent):
"""Component that implements Resilient function 'algosec_isolate_host_from_network"""
@function("algosec_isolate_host_from_network")
def _algosec_isolate_host_from_network_function(self, event, *args, **kwargs):
"""
Function: Create a traffic change request with AlgoSec's FireFlow to isolate a host from the network.
Then AlgoSec's ActiveChange then automatically implements rule changes across all firewalls
in the network to isolate the host completely.
"""
return self.run_login(kwargs)
def _logic(self, algosec_hostname):
"""The @function decorator offerend by resilient circuits is impossible to unit test..."""
logger.info("algosec_hostname: %s", algosec_hostname)
# PUT YOUR FUNCTION IMPLEMENTATION CODE HERE
yield StatusMessage("starting...")
isolate_traffic_lines = [
ChangeRequestTrafficLine(
action=ChangeRequestAction.DROP,
sources=[algosec_hostname],
destinations=['*'],
services=['*'],
),
ChangeRequestTrafficLine(
action=ChangeRequestAction.DROP,
sources=['*'],
destinations=[algosec_hostname],
services=['*'],
)
]
try:
yield StatusMessage("creating isolation change request...")
change_request_url = self.algosec.fire_flow().create_change_request(
subject=self.options['isolation_request_subject'].format(algosec_hostname),
requestor_name=self.options['isolation_request_requestor'],
email=self.options['isolation_request_requestor_email'],
traffic_lines=isolate_traffic_lines,
description=self.options['isolation_request_description'],
template=self.options.get('isolation_request_template') or None,
)
except Exception:
raise Exception(
"Error occured while trying to create the isolation change request for {}".format(algosec_hostname)
)
yield StatusMessage("done...")
change_request_id = int(change_request_url.split('=')[1])
result = {
'id': change_request_id,
'hostname': algosec_hostname,
'url': '<a href="{}">Change Request #{}</a>'.format(change_request_url, change_request_id),
}
# Produce a FunctionResult with the result
yield FunctionResult(result)
|
995,832 | 0fb94ff1b9cc51b80248564cb6190369ebcfc2eb | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals, absolute_import
import os,sys
from io import open
import re
# Python Compatability
if sys.version_info[0] >= 3:
unicode = str
xrange = range
from bottle import HTTPError
from . import utils
from . import main
from .nbconfig import NBCONFIG
# Incomplete todo:
# - [X] Home page / blog
# - [X] Link to homepage
# - [C] Reroute ?map=true to _sitemap
# - [X] Fix bug with all page on sitemap (and maybe original home?)
# - [X] Special pages
# - [X] todo
# - [X] tags
# - [X] random
# - [X] Photo Galleries
def offline_copy(_export_path):
"""
This is the main tool for the offline copy. It has to do some dirty
tricks to get it to work.
It is not designed to be efficient and will make a full copy on each run
"""
global export_path
export_path = _export_path
# First, monkey patch the original config
main.NBCONFIG.protectect_dirs = []
main.NBCONFIG.protected_users = {}
main.NBCONFIG.edit_users = {}
# Now monkey patch NBweb
main.REQUIRELOGIN = False
pages = []
# Copy and work all source files
for dirpath,dirnames,filenames in os.walk(NBCONFIG.source):
for dirname in dirnames[:]: # Iterate a copy since we will delete in place
if any(dirname.startswith(i) for i in ['.']):
dirnames.remove(dirname) # So we do not parse it later
continue
if dirname == '_scratch':
dirnames.remove(dirname) # So we do not parse it later
continue
# Names
src_systemname = os.path.join(dirpath,dirname)
rootname = os.path.relpath(src_systemname,NBCONFIG.source) # No leading / though
dest_systemname = os.path.join(export_path,rootname)
mkdir(rootname,isfile=False) # Will make the dir no matter what
# Index
dest = os.path.join(export_path,rootname, 'index.html')
# Exclusions.
if main.exclusion_check(utils.join('/',rootname +'/')):
with open(dest,'w',encoding='utf8') as FF:
FF.write('')
continue
try:
html = main.main_route('/' + rootname + '/')
except HTTPError:
# Likely some additional resource in _NBweb
try:
os.rmdir(dest_systemname) # Should be empty
except OSError:
pass
os.symlink(src_systemname,dest_systemname)
continue
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
# _all
dest = os.path.join(export_path,'_all',rootname, 'index.html')
mkdir(dest,isfile=True,isfull=True)
html = main.allpage('/'+ rootname +'/')
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
# Loop each file
for filename in filenames:
if os.path.splitext(filename)[0] == 'index':
continue # Already made above
# Names
src_systemname = os.path.join(dirpath,filename)
rootname = os.path.relpath(src_systemname,NBCONFIG.source) # No leading / though
dest_systemname = os.path.join(export_path,rootname)
mkdir(rootname,isfile=True) # Will make the dir no matter what
try:
os.symlink(src_systemname,dest_systemname)
except OSError:
os.remove(dest_systemname)
os.symlink(src_systemname,dest_systemname)
rootbasename,ext = os.path.splitext(rootname)
if ext in NBCONFIG.extensions:
dest = os.path.join(export_path,rootbasename + '.html')
try:
html = main.main_route(rootbasename + '.html')
except:
print('Issue with: {}'.format(rootname))
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
pages.append(rootbasename)
## Index pages
# Home page w/o blog
dest_systemname = os.path.join(export_path,'')
dest = os.path.join(export_path,'index.html')
html0 = main.main_route('/',map_view=True)
html = process_page(html0,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
# Also write the sitemap
dest = os.path.join(export_path,'_sitemap/index.html')
mkdir('/_sitemap',isfile=False)
html = process_page(html0,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
# _all
dest = os.path.join(export_path,'_all','index.html')
html = main.allpage('/')
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
## Blog Pages
if len(NBCONFIG.blog_dirs) > 0:
blog_num = 0
while True:
dest = os.path.join(export_path,'_blog',unicode(blog_num),'index.html')
try:
html = main.main_route('/',map_view=False,blog_num=blog_num)
except HTTPError:
break # At the last one
mkdir(dest,isfile=True,isfull=True)
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
blog_num += 1
# Make the home page.
dest = os.path.join(export_path,'index.html')
html = main.main_route('/',map_view=False,blog_num=0)
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
## Special Pages
make_random_forward(pages)
# Tags
dest = os.path.join(export_path,'_tags/index.html')
mkdir(dest,isfile=True,isfull=True)
html = main.return_tags()
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
# ToDos
dest = os.path.join(export_path,'_todo/index.html')
mkdir(dest,isfile=True,isfull=True)
html = main.return_todo()
html = process_page(html,dest)
with open(dest,'w',encoding='utf8') as FF:
FF.write(html)
txt = main.return_todo_txt()
dest = os.path.join(export_path,'_todo/todo.txt')
with open(dest,'w',encoding='utf8') as FF:
FF.write(txt)
# Galleries
cpsym( utils.join(NBCONFIG.scratch_path,'_galleries'),utils.join(export_path,'_galleries'))
## Clean up
for F in [utils.join(export_path,'_NBweb',a) for a in ['NBCONFIG.py','NBCONFIG.pyc','template.html']]:
try:
os.remove(F)
except:
pass
# Make sure there are never any directory listings
for dirpath,dirnames,filenames in os.walk(export_path):
if 'index.html' not in filenames:
with open(utils.join(dirpath,'index.html'),'w',encoding='utf8') as F:
F.write('')
def make_random_forward(pages):
"""Write out a page to randomly forward"""
txt = """\
<script type="text/javascript">
var urls = new Array(PAGES);
function redirect()
{
window.location = urls[Math.floor(urls.length*Math.random())];
}
redirect()
</script>
""".replace(' ','')
pages = ('"./../'+page+ '.html"' for page in pages)
pages = (utils.to_unicode(page) for page in pages)
rand_file = utils.join(export_path,'_random/index.html')
mkdir(rand_file,isfile=True,isfull=True)
with open(rand_file,'wb') as F:
F.write(txt.replace('PAGES',','.join(pages)).encode('utf8'))
re_dirlinks = re.compile('(href|src)=\"/(.*?)/\"') # Starts with /, Ends in /
re_all = re.compile('(href|src)=\"/_all/(.*?)\"') # starts with /_all
re_intlinks = re.compile('(href|src)=\"/(.*?)\"') # Starts with /
def process_page(html,dest):
"""
Fix the pages for offline
* Fix internal links to be relative
* Fix links to directories to end in 'index.html'
Notes:
* all internal links will, by previous processing, start with '/' and
pages will end in .html IF they are in the main content. We have to
work around special pages that do not have a directory name
"""
html0 = html[:]
to_root = os.path.relpath(export_path,dest)
to_root = to_root[1:]# Change '../' or '..' to '.' or './'
# Fix links to directories first since that is easier to find
html,N1 = re_dirlinks.subn(r'\1="/\2/index.html"',html)
# all pages links
html,N2 = re_all.subn(r'\1="/_all/\2/index.html"',html)
# Add index.html for any other internal links. NOTE: by preprocessing
# all internal links from the main content will already end in .html so this
# is just special pages.
for match in re_intlinks.finditer(html):
dest = match.groups()[-1]
ext = os.path.splitext(dest)[-1]
if ext == '':
old = r'{}="/{}"'.format(*match.groups())
new = r'{}="/{}"'.format(match.groups()[0], os.path.join(match.groups()[1],'index.html') )
html = html.replace(old,new)
# Now make all links to the root
html,N3 = re_intlinks.subn(r'\1="{}/\2"'.format(to_root),html)
# Remove the search stuff
out = []
ff = False
for line in html.split('\n'):
if not ff and '<!-- search -->' not in line:
out.append(line)
continue
if '<!-- search -->' in line:
ff = True
if ff and '<!-- /search -->' in line:
ff = False
html = '\n'.join(out)
return html
def cpsym(src,dest):
"""
symlink copy all files
"""
src = os.path.normpath(src)
dest = os.path.normpath(dest)
if not os.path.exists(src):
return
for dirpath,dirnames,filenames in os.walk(src):
rel_dirpath = os.path.relpath(dirpath,src)
dest_dirpath = os.path.join(dest,rel_dirpath)
mkdir(dest_dirpath,isfull=True)
for filename in filenames:
src_filename = os.path.join(dirpath,filename)
rel_filename = os.path.relpath(src_filename,src)
dest_filename = os.path.join(dest,rel_filename)
try:
os.symlink(src_filename,dest_filename)
except OSError:
pass
def mkdir(path,isfile=False,isfull=False):
if isfile:
path = os.path.split(path)[0]
if isfull:
full = path
else:
if path.startswith('/'):
path = path[1:]
full = os.path.join(export_path,path)
try:
os.makedirs(full)
except OSError:
pass
|
995,833 | fd1e33434b79b2f2ad3e7b10f926f737d21397a9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import mako
import json
import os
from girder import constants, events
from girder.utility.model_importer import ModelImporter
from girder.utility.webroot import Webroot
from girder.api.rest import Resource, loadmodel, RestException
from girder.api.describe import Description
from girder.api import access
from solr import IndexUploadedFilesText, QueryText, IndexLocationName, QueryLocationName, IndexLatLon, QueryPoints
from tika import parser
from geograpy import extraction
from geopy.geocoders import Nominatim
geolocator = Nominatim()
class GeoParserJobs(Resource):
def __init__(self):
self.resourceName = 'geoparser_jobs'
self.route('GET', ("extract_text",), self.extractText)
self.route('GET', ("find_location",), self.findLocation)
self.route('GET', ("find_lat_lon",), self.findLatlon)
self.route('GET', ("get_points",), self.getPoints)
@access.public
def extractText(self, params):
'''
Using Tika to extract text from given file
and return the text content.
'''
file_name = params['file_name']
parsed = parser.from_file(file_name)
status = IndexUploadedFilesText(file_name, parsed["content"])
if status[0]:
return {'job':'text_extraction', 'status': 'successful', 'comment':'Text extracted and indexed to Solr.'}
else:
return {'job':'text_extraction', 'status': 'unsuccessful', 'comment':status[1]}
extractText.description = (
Description('Extract text')
)
@access.public
def findLocation(self, params):
'''
Find location name from extracted text using Geograpy.
'''
file_name = params['file_name']
text_content = QueryText(file_name)
if text_content:
e = extraction.Extractor(text=text_content)
e.find_entities()
status = IndexLocationName(file_name, e.places)
if status[0]:
return {'job':'find_location', 'status': 'successful', 'comment':'Location/s found and indexed to Solr.'}
else:
return {'job':'find_location', 'status': 'unsuccessful', 'comment':status[1]}
else:
return {'job':'find_location', 'status': 'unsuccessful', 'comment':'Cannot extract text.'}
findLocation.description = (
Description('Find location name')
)
@access.public
def findLatlon(self, params):
'''
Find latitude and longitude from location name using GeoPy.
'''
file_name = params['file_name']
location_names = QueryLocationName(file_name)
if location_names:
points = []
for location in location_names:
try:
geolocation = geolocator.geocode(location)
points.append(
{'loc_name': location,
'position':{
'x': geolocation.latitude,
'y': geolocation.longitude
}
}
)
except:
pass
status = IndexLatLon(file_name, points)
if status[0]:
return {'job':'find_lat_lon', 'status': 'successful', 'comment':'Latitude and Longitude found and indexed to Solr.'}
else:
return {'job':'find_lat_lon', 'status': 'unsuccessful', 'comment':status[1]}
else:
return {'job':'find_lat_lon', 'status': 'unsuccessful', 'comment':'Cannot find location name.'}
findLatlon.description = (
Description('Find latitude and longitude')
)
@access.public
def getPoints(self, params):
'''
Return geopoints for given filename
'''
file_name = params['file_name']
points = QueryPoints(file_name)
if points:
return {'job':'getPoints', 'status': 'successful', 'comment':'Points returned sucessfuly', 'points':points}
else:
return {'job':'getPoints', 'status': 'unsuccessful', 'comment':'Cannot find location name.', 'points':""}
QueryPoints.description = (
Description('Return geo points for given file name.')
)
class CustomAppRoot(object):
"""
The webroot endpoint simply serves the main index HTML file of GeoParser.
"""
exposed = True
file_dir = os.path.realpath(__file__).split("__init__.py")[0]
index_html_path = "{0}/../../../templates/index.html".format(file_dir)
vars = {
'plugins': [],
'apiRoot': '/api/v1',
'staticRoot': '/static',
'title': 'Memex GeoParser'
}
def GET(self):
with open(self.index_html_path,'r') as f:
template = f.read()
f.close()
return mako.template.Template(template).render(**self.vars)
def load(info):
info['apiRoot'].geoparser_jobs = GeoParserJobs()
# Move girder app to /girder, serve GeoParser app from /
info['serverRoot'], info['serverRoot'].girder = (CustomAppRoot(), info['serverRoot'])
info['serverRoot'].api = info['serverRoot'].girder.api
|
995,834 | f99baca36e9ace88b3bffd2387e5d584cdee8e2b | from network import shallow_resnet
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
import argparse
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.examples.tutorials.mnist import mnist
import cPickle
import numpy as np
class Learner(object):
def __init__(self,dataset,
learning_rate = 0.001,
num_classes = 10,
batch_size = 50,
img_height = 32,
img_width = 32,
num_epoches = 20,
task='finetune',
saved_model = 'trained_model.ckpt'):
self.dataset = dataset
self.learning_rate = learning_rate
self.num_classes = num_classes
self.batch_size = batch_size
self.img_height = img_height
self.img_width = img_width
self.num_epoches = num_epoches
self.task = task
self.saved_model = saved_model
self.build_graph()
def build_graph(self):
# Put placeholders
self.images = tf.placeholder(tf.float32,
[self.batch_size, self.img_height, self.img_width, 3],name = 'images')
# images = tf.random_crop(self.images,
# [self.batch_size, 64, 64, 3])
# images = tf.image.resize_images(images, [128,128])
# Randomly flip the image horizontally.
# images = tf.image.random_flip_left_right(images)
# # Because these operations are not commutative, consider randomizing
# # the order their operation.
# images = tf.image.random_brightness(images, max_delta=63)
# images = tf.image.random_contrast(images, lower=0.2, upper=1.8)
self.labels = tf.placeholder(tf.int64,[self.batch_size],name = 'labels')
one_hot_labels = tf.one_hot(self.labels,self.num_classes)
self.train_phase = tf.placeholder(tf.bool, name='train_phase')
# Lay down graph for the loss.
# logits = shallow_resnet(images,
# self.num_classes,
# is_training = self.train_phase)
logits = shallow_resnet(self.images,
self.num_classes,
is_training = self.train_phase)
self.logits = tf.reshape(logits,[self.batch_size,self.num_classes])
self.loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels,
logits=self.logits))
self.predictions = tf.argmax(self.logits,1)
correct_predictions = tf.equal(self.labels,
self.predictions)
self.top5 = tf.nn.top_k(self.logits,k=5, sorted = True)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions,tf.float32))
self.optimum = \
tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Lay down computational graph for network.
if self.task == 'finetune':
# Get list of variables to restore
variables_to_restore = []
exclusions = ["resnet_v1_50/logits","biases",
"resnet_v1_50/block3","resnet_v1_50/block4"]
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion) or \
var.op.name.endswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
elif str.startswith(self.task,'continue_training'):
variables_to_restore = slim.get_model_variables()
elif str.startswith(self.task,'validation'):
variables_to_restore = slim.get_model_variables()
else:
variables_to_restore = slim.get_model_variables()
self.restorer = tf.train.Saver(variables_to_restore)
self.saver = tf.train.Saver()
def train(self):
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# Restore variables from disk.
if self.task == 'finetune':
self.restorer.restore(sess, "./resnet_v1_50.ckpt")
print("Model restored.")
elif str.startswith(self.task,'continue_training'):
print('haha')
model_tobe_restored = './saved_model-100.ckpt'
self.restorer.restore(sess, model_tobe_restored)
elif str.startswith(self.task,'validation'):
model_tobe_restored = self.task.split(':')[1]
self.restorer.restore(sess, model_tobe_restored)
n_samples = self.dataset.train.num_examples
num_batches = int(n_samples / self.batch_size)
n_val_samples = self.dataset.validation.num_examples
num_val_batches = \
int(n_val_samples / self.batch_size)
# Training
print 'The training stage...'
for epoch in xrange(self.num_epoches):
avg_loss_value = 0.
avg_train_acc = 0.0
for b in xrange(num_batches):
batch_images,batch_labels = self.dataset.train.next_batch(batch_size=self.batch_size)
if batch_images.shape[-1] == 1:
batch_images = np.tile(batch_images,(1,1,1,3))
_,loss_val,train_acc = sess.run([self.optimum,
self.loss,self.accuracy],
feed_dict = {
self.images: batch_images,
self.labels: batch_labels,
self.train_phase: True})
avg_loss_value += loss_val / n_samples * self.batch_size
avg_train_acc += train_acc / n_samples * self.batch_size
print loss_val, train_acc
current_ratio = float(n_samples)/(b * self.batch_size + self.batch_size)
with open("training_log.txt", "a") as f:
f.write(str(avg_loss_value * current_ratio)+','+
str(avg_train_acc * current_ratio) +'\n')
avg_val_acc = 0.0
for b in xrange(num_val_batches):
val_images,val_labels = self.dataset.validation.next_batch(self.batch_size)
if val_images.shape[-1] == 1:
val_images = np.tile(val_images,(1,1,1,3))
val_acc = sess.run(self.accuracy,
feed_dict = {self.images:val_images,
self.labels: val_labels,
self.train_phase: False})
print val_acc
avg_val_acc += val_acc / n_val_samples * self.batch_size
if (epoch+1) % 10 == 0:
self.saver.save(sess,
self.saved_model + "-{}.ckpt".format(epoch+1))
log = "Epoch: %s Training Loss: %s Train Accuracy: %s Val Accuracy: %s"%(epoch,avg_loss_value,
avg_train_acc,avg_val_acc)
with open("epoch_log.txt", "a") as f:
f.write(log+'\n')
print log
# Save the model.
saver = tf.train.Saver()
saver.save(sess, self.saved_model)
def eval(self):
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# model_tobe_restored = self.task.split(':')[1]
self.restorer.restore(sess,"./saved_model-10.ckpt")
print 'hehe'
n_samples = self.dataset.test.num_examples
num_batches = int(n_samples / self.batch_size)
# Test
output = np.zeros((n_samples,5),dtype = int)
print 'The test stage...'
for b in xrange(num_batches):
test_images,_ = self.dataset.test.next_batch(self.batch_size)
if test_images.shape[-1] == 1:
test_images = np.tile(test_images,(1,1,1,3))
test_values, test_labels = sess.run(self.top5,
feed_dict = {self.images:test_images,
self.train_phase: False})
print test_labels
output[b*self.batch_size: (b+1)*self.batch_size]=\
test_labels
return output
def validate(self):
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
model_tobe_restored = self.task.split(':')[1]
self.restorer.restore(sess, "./saved_model-100.ckpt")
avg_val_acc = 0.0
n_val_samples = self.dataset.validation.num_examples
num_val_batches = int(n_val_samples / self.batch_size)
for b in xrange(num_val_batches):
val_images,val_labels = self.dataset.validation.next_batch(self.batch_size)
if val_images.shape[-1] == 1:
val_images = np.tile(val_images,(1,1,1,3))
val_acc = sess.run(self.accuracy,
feed_dict = {self.images:val_images,
self.labels: val_labels,
self.train_phase: False})
print val_acc
avg_val_acc += val_acc / n_val_samples * self.batch_size
print 'Val Accuracy: {}'.format(avg_val_acc)
|
995,835 | 16a02a7c7f7b517857afa114f64099220a485238 | import sys, pprint
import maya.standalone
maya.standalone.initialize()
from pysideuic import compileUi
#Print what we're running
print "Running " + sys.argv[0]
filepath = ''
#Check for a passed in filepath or ask for one
if(len(sys.argv) < 2):
filepath = raw_input("Need a filepath argument:\n")
else:
filepath = sys.argv[1]
#Print
print "Filepath: " + filepath
#Remove the file extension
filepath = filepath[:(len(filepath) - 3)]
#Open a new py file to put stuff in
pyfile = open(filepath + ".py", 'w')
#Use pyside to compile the ui
compileUi(filepath + ".ui", pyfile, False, 4,False)
#Close the file
pyfile.close()
#Now open it for reading
f = open(filepath + ".py", 'r')
#Get the file lines
lines = f.readlines()
#Close the file
f.close()
#Open again, this time for writing
f = open(filepath + ".py", 'w')
#Compile adds 'import <name_of_resource>_rc' to the end of the pyfile
#so iterate through the lines and only store the ones without the
#resource in them
for line in lines:
if "_rc\n" not in line:
f.write(line)
#Finally close
f.close()
|
995,836 | 209effd35b5791f239a60501f71328022fddbc0c | from .geometry import rmsd, dihedral
from .plots import sequential, histogram, ramachandran
from .general import trj_data
|
995,837 | 5583958217b8cc8ce61821eb09bd38a231c4f715 | from github import Github
import boto3
import os
GITHUB_API_TOKEN = os.getenv('GITHUB_API_TOKEN')
github_client = Github(GITHUB_API_TOKEN)
codecommit_client = boto3.client('codecommit')
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def clone_repo(repo_name):
print(f"{bcolors.OKGREEN}--> Cloning repository {repo_name} to local storage {bcolors.ENDC}")
os.system('git clone --mirror https://github.com/rribeiro1/{}.git {}'.format(repo_name, repo_name))
def delete_repo_local(repo_name):
print(f"{bcolors.OKGREEN}--> Deleting repository {repo_name} from local storage {bcolors.ENDC}")
os.system('rm -Rf {}'.format(repo_name))
def is_repo_exists_on_aws(repo_name):
try:
codecommit_client.get_repository(repositoryName=repo_name)
return True
except Exception:
return False
def create_repo_code_commit(repo_name):
print(f"{bcolors.OKBLUE}--> Creating repository {repo_name} on AWS CodeCommit {bcolors.ENDC}")
codecommit_client.create_repository(
repositoryName=repo_name,
repositoryDescription='Backup repository for {}'.format(repo_name),
tags={
'name': repo_name
}
)
def sync_code_commit_repo(repo_name):
print(f"{bcolors.OKGREEN}--> Pushing changes from repository {repo_name} to AWS CodeCommit {bcolors.ENDC}")
os.system('cd {} && git remote add sync ssh://git-codecommit.eu-central-1.amazonaws.com/v1/repos/{}'.format(repo_name, repo_name))
os.system('cd {} && git push sync --mirror'.format(repo.name))
for repo in github_client.get_user().get_repos():
if repo.archived:
print(f"{bcolors.WARNING}> Skipping repository {repo.name}, it is archived on github {bcolors.ENDC}")
else:
print(f"{bcolors.HEADER}> Processing repository: {repo.name} {bcolors.ENDC}")
clone_repo(repo.name)
if is_repo_exists_on_aws(repo.name):
sync_code_commit_repo(repo.name)
else:
create_repo_code_commit(repo.name)
sync_code_commit_repo(repo.name)
delete_repo_local(repo.name)
|
995,838 | 6549df7be885e76d37a10ba2c174316fdad0983c | from .tile_data import TILE_DATA, SPRITE_DATA
from .map_data import MAP_DATA
from .sound_data import SOUND_DATA
from .music_data import MUSIC_DATA
|
995,839 | 3ea2d3fcd32fc85c0e3763ad03f4584f39e05263 | # Generated by Django 3.1.4 on 2021-01-19 14:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pnr', '0015_remove_pnr_sabre_token'),
]
operations = [
migrations.AddField(
model_name='pnr',
name='sabre_token',
field=models.JSONField(default={}, verbose_name='sabre token'),
),
]
|
995,840 | 1e3ef80f0ca579872f9e09d9d2a9227633f8ccf9 | """
Implement Selection Sort
"""
def selection_sort(arr):
l = len(arr)
print (arr)
for i in range(l):
#print ("******%s=%s*****" % (i, arr[i]))
index = i
for j in range(i+1, l):
#print ("%s, %s" % (arr[j], arr[index]))
if arr[j] < arr[index]:
index = j
#print ("indy %s=%s" % (arr[i], arr[index]))
arr[i], arr[index] = arr[index], arr[i]
print (arr)
arr = [99, 44, 6, 2, 1, 5, 63, 87, 283, 4, 0]
selection_sort(arr)
|
995,841 | 9665395afbe8feda082d5daa83c5dd1aa4e15a45 | from collections import namedtuple
import torch
from torchvision import models
from utils.constants import SupportedPretrainedWeights
class GoogLeNet(torch.nn.Module):
"""Only those layers are exposed which have already proven to work nicely."""
def __init__(self, pretrained_weights, requires_grad=False, show_progress=False):
super().__init__()
if pretrained_weights == SupportedPretrainedWeights.IMAGENET:
googlenet = models.googlenet(pretrained=True, progress=show_progress).eval()
else:
raise Exception(f'Pretrained weights {pretrained_weights} not yet supported for {self.__class__.__name__} model.')
self.layer_names = ['inception3b', 'inception4c', 'inception4d', 'inception4e']
self.conv1 = googlenet.conv1
self.maxpool1 = googlenet.maxpool1
self.conv2 = googlenet.conv2
self.conv3 = googlenet.conv3
self.maxpool2 = googlenet.maxpool2
self.inception3a = googlenet.inception3a
self.inception3b = googlenet.inception3b
self.maxpool3 = googlenet.maxpool3
self.inception4a = googlenet.inception4a
self.inception4b = googlenet.inception4b
self.inception4c = googlenet.inception4c
self.inception4d = googlenet.inception4d
self.inception4e = googlenet.inception4e
# Set these to False so that PyTorch won't be including them in it's autograd engine - eating up precious memory
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
# todo: not sure why they are using this additional processing - made an issue
# https://discuss.pytorch.org/t/why-does-googlenet-additionally-process-input-via-transform-input/88865
def transform_input(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def forward(self, x):
x = self.transform_input(x)
# N x 3 x 224 x 224
x = self.conv1(x)
conv1 = x
# N x 64 x 112 x 112
x = self.maxpool1(x)
mp1 = x
# N x 64 x 56 x 56
x = self.conv2(x)
conv2 = x
# N x 64 x 56 x 56
x = self.conv3(x)
conv3 = x
# N x 192 x 56 x 56
x = self.maxpool2(x)
mp2 = x
# N x 192 x 28 x 28
x = self.inception3a(x)
inception3a = x
# N x 256 x 28 x 28
x = self.inception3b(x)
inception3b = x
# N x 480 x 28 x 28
x = self.maxpool3(x)
mp3 = x
# N x 480 x 14 x 14
x = self.inception4a(x)
inception4a = x
# N x 512 x 14 x 14
x = self.inception4b(x)
inception4b = x
# N x 512 x 14 x 14
x = self.inception4c(x)
inception4c = x
# N x 512 x 14 x 14
x = self.inception4d(x)
inception4d = x
# N x 528 x 14 x 14
x = self.inception4e(x)
inception4e = x
# Feel free to experiment with different layers.
net_outputs = namedtuple("GoogLeNetOutputs", self.layer_names)
out = net_outputs(inception3b, inception4c, inception4d, inception4e)
return out |
995,842 | d0cc7d0daafe8d42db165f37227245529379b98d | #!/usr/bin/env python
## \file state.py
# \brief python package for state
# \author T. Lukaczyk, F. Palacios
# \version 4.1.2 "Cardinal"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
#
# Copyright (C) 2012-2016 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy, time
from ..io import expand_part, expand_time, get_adjointSuffix, add_suffix, \
get_specialCases, Config
from ..util import bunch
from ..util import ordered_bunch
# ----------------------------------------------------------------------
# State Factory
# ----------------------------------------------------------------------
def State_Factory(state=None,config=None):
""" state = SU2.io.State()
Starts a state class, an extension of ordered_bunch().
Stores data generated while traversing SU2 tool chain
Fields:
FUNCTIONS - ordered bunch of objective function values
GRADIENTS - ordered bunch of gradient value lists
VARIABLES - ordered bunch of variables
FILES - ordered bunch of file types
HISTORY - ordered bunch of history information
Fields can be accessed by item or attribute
ie: state['FUNCTIONS'] or state.FUNCTIONS
Methods:
update() - updates self with another state
pullnlink() - returns files to pull and link
design_vector() - vectorizes design variables
find_files() - finds existing mesh and solutions
Example of a filled state:
FUNCTIONS:
LIFT: 0.2353065809
DRAG: 0.042149736
SIDEFORCE: 0.0
MOMENT_X: 0.0
MOMENT_Y: 0.0
MOMENT_Z: 0.046370243
FORCE_X: 0.0370065195
FORCE_Y: 0.2361700759
FORCE_Z: 0.0
EFFICIENCY: 5.5826347517
GRADIENTS:
DRAG: [0.133697, 0.41473, 0.698497, (...)
VARIABLES:
DV_VALUE_NEW: [0.002, 0.002, 0.002, (...)
FILES:
MESH: mesh.su2
DIRECT: solution_flow.dat
ADJOINT_DRAG: solution_adj_cd.dat
HISTORY:
DIRECT: {ITERATION=[1.0, 2.0, 3.0, (...)
ADJOINT_DRAG: {ITERATION=[1.0, 2.0, 3.0, (...)
"""
if isinstance(state,Config) and not config:
config = state
state = None
if not state is None:
assert isinstance(state,State) , 'input is must be a state instance'
return state
NewClass = State()
for key in ['FUNCTIONS','GRADIENTS','VARIABLES','FILES','HISTORY']:
NewClass[key] = ordered_bunch()
if config:
NewClass.find_files(config)
return NewClass
# ----------------------------------------------------------------------
# State Class
# ----------------------------------------------------------------------
class State(ordered_bunch):
""" state = SU2.io.state.State()
This is the State class that should be generated with the
Factory Function SU2.io.state.State_Factory()
Parameters:
none, should be loaded with State_Factory()
Methods:
update() - updates self with another state
pullnlink() - returns files to pull and link
design_vector() - vectorizes design variables
find_files() - finds existing mesh and solutions
"""
_timestamp = 0
def update(self,ztate):
""" Updates self given another state
"""
if not ztate: return
assert isinstance(ztate,State) , 'must update with another State-type'
for key in self.keys():
if isinstance(ztate[key],dict):
self[key].update( ztate[key] )
elif ztate[key]:
self[key] = ztate[key]
self.set_timestamp()
def __repr__(self):
return self.__str__()
def __str__(self):
output = 'STATE:'
for k1,v1 in self.iteritems():
output += '\n %s:' % k1
if isinstance(v1,dict):
for k2,v2 in v1.iteritems():
output += '\n %s: %s' % (k2,v2)
else:
output += '\n %s' % v1
return output
def pullnlink(self,config):
""" pull,link = SU2.io.State.pullnlink(config)
returns lists pull and link of files for folder
redirection, based on a given config
"""
pull = []; link = []
# choose files to pull and link
for key,value in self.FILES.iteritems():
# link big files
if key == 'MESH':
# mesh (merged or partitioned)
value = expand_part(value,config)
link.extend(value)
elif key == 'DIRECT':
# direct solution
value = expand_time(value,config)
link.extend(value)
elif 'ADJOINT_' in key:
# adjoint solution
value = expand_time(value,config)
link.extend(value)
#elif key == 'STABILITY':
#pass
# copy all other files
else:
pull.append(value)
#: for each filename
return pull,link
def design_vector(self):
""" vectorizes State.VARIABLES
"""
vector = []
for value in self.VARIABLES.values():
if isinstance(value,dict):
for v in value.values():
vector.append(v)
elif not isinstance(value,list):
value = [value]
vector.extend(value)
return vector
def find_files(self,config):
""" SU2.io.State.find_files(config)
finds mesh and solution files for a given config.
updates state.FILES with filenames.
files already logged in state are not overridden.
will ignore solutions if config.RESTART_SOL == 'NO'.
"""
files = self.FILES
mesh_name = config.MESH_FILENAME
direct_name = config.SOLUTION_FLOW_FILENAME
adjoint_name = config.SOLUTION_ADJ_FILENAME
targetea_name = 'TargetEA.dat'
targetcp_name = 'TargetCp.dat'
targetheatflux_name = 'TargetHeatFlux.dat'
adj_map = get_adjointSuffix()
restart = config.RESTART_SOL == 'YES'
special_cases = get_specialCases(config)
def register_file(label,filename):
if not files.has_key(label):
if os.path.exists(filename):
files[label] = filename
print 'Found: %s' % filename
else:
assert os.path.exists(files[label]) , 'state expected file: %s' % filename
#: register_file()
# mesh
register_file('MESH',mesh_name)
# direct solution
if restart:
register_file('DIRECT',direct_name)
# adjoint solutions
if restart:
for obj,suff in adj_map.iteritems():
ADJ_LABEL = 'ADJOINT_' + obj
adjoint_name_suffixed = add_suffix(adjoint_name,suff)
register_file(ADJ_LABEL,adjoint_name_suffixed)
# equivalent area
if 'EQUIV_AREA' in special_cases:
register_file('TARGET_EA',targetea_name)
# pressure inverse design
if 'INV_DESIGN_CP' in special_cases:
register_file('TARGET_CP',targetcp_name)
# heat flux inverse design
if 'INV_DESIGN_HEATFLUX' in special_cases:
register_file('TARGET_HEATFLUX',targetheatflux_name)
return
def __setitem__(self,k,v):
if self._initialized:
self.set_timestamp()
super(State,self).__setitem__(k,v)
def set_timestamp(self):
self._timestamp = time.time()
def tic(self):
""" timestamp = State.tic()
returns the time that this state was last modified
"""
return self._timestamp
def toc(self,timestamp):
""" updated = State.toc(timestamp)
returns True if state was modified since last timestamp
"""
return self._timestamp > timestamp
#: def State
|
995,843 | 875e2e5534dcca89c1712287d4cbd65b082913a6 | total = 0
for i in range(1, 10):
total += i
print(total)
total2 = 0
i1 = 0
while i1 < 5:
total2 += i1
i1 += 1
print(total2)
my_list = [6, 4, 5, 2, 9, -2, -3, -1, 15] # создаем список
# задача: складывать цифры, пока не упремся в отрицательные, отрицательные не складывать
print(my_list[0]) # квадратные скобки указывают на место в списке, 0 = первое место
total3 = 0 # это переменная-аккумулятор
i2 = 0 # это переменная-счетчик
while my_list[i2] > 0: # условие начинается с первой цифры списка
total3 += my_list[i2] # прибавляем первую цифрц к аккумулятору
i2 += 1 # идем на следующую цифру списка
print(total3) # когда условие не тру, печатаем что получилось в аккумуляторе
total4 = 0 # аналогичная задача, только с другой переменной
for element in my_list: # проверяем переменную element, идем по my_list
if element > 0: # если число в списке больше 0...
total4 += element # ... складываем это число в аккумулятор
print(total4) # в отличие от первого варианта, тут можно проверить весь список, а не остановится, уперевшись в отрицательное число
total5 = 0 # все аналогично, только пропускаем отрицательные число. Например, если их миллион, чтобы не тратить время
for element1 in my_list:
if element1 <= 0: # если видим отрицательное число...
break # ...завершаем выполнение
total5 += element1 # а так, складываем цифры в аккумулятор
print(total5)
total6 = 0
for element2 in my_list:
if total6 >= 11:
break
total6 += element2
print(total6)
my_list = [6, 3, 5, 2, 9, -2, -3, -1, 15]
total7 = 0
i7 = 0
while total7 < 10 and my_list[i7] > 0:
total7 += my_list[i7]
i7 += 1
print(total7)
#my_list = [6, 3, 5, 2, 9]
#total8 = 0
#i8 = 0
#while my_list[i2] > 0:
# total8 += my_list[i8]
# i8 += 1
#print(total8)
# аналогичный пример, только выдает ошибку, потому что ничто не останавливает цикл, он доходит до последней цифры списка, прибавляет к i8 единицу и выходит за предел списка.
# вот как починить:
my_list = [6, 3, 5, 2, 9]
total9 = 0
i9 = 0
while i9 < len(my_list) and my_list[i9] > 0:
total9 += my_list[i9]
i9 += 1
print(total9) |
995,844 | c30e0164c82974d5ad1116f18b4ed4f18048ce15 | # -*- coding: utf-8 -*-
# @Author: JinZhang
# @Date: 2018-03-22 13:06:51
# @Last Modified by: JinZhang
# @Last Modified time: 2018-03-23 13:30:03
global _G;
_G = {};
def setG(key,value):
try:
if id(_G[key]):
print("The global var is existed !");
except Exception:
_G[key] = value;
def getG(key):
try:
return _G[key];
except NameError as e:
print("The global var is not exist !");
raise e
|
995,845 | d4228813febed2572fbab155f0039174cc3f3e8f | import argparse
import bz2
import gzip
import json
import io
import os
from datetime import datetime, timedelta
from typing import Any, Callable, List, Optional, Text
from urllib.request import urlopen
try:
import zstandard
except ImportError:
zstandard = None
from .utils import git
from . import log
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
logger = log.get_logger()
def abs_path(path: Text) -> Text:
return os.path.abspath(os.path.expanduser(path))
def should_download(manifest_path: Text, rebuild_time: timedelta = timedelta(days=5)) -> bool:
if not os.path.exists(manifest_path):
return True
mtime = datetime.fromtimestamp(os.path.getmtime(manifest_path))
if mtime < datetime.now() - rebuild_time:
return True
logger.info("Skipping manifest download because existing file is recent")
return False
def merge_pr_tags(repo_root: Text, max_count: int = 50) -> List[Text]:
gitfunc = git(repo_root)
tags: List[Text] = []
if gitfunc is None:
return tags
for line in gitfunc("log", "--format=%D", "--max-count=%s" % max_count).split("\n"):
for ref in line.split(", "):
if ref.startswith("tag: merge_pr_"):
tags.append(ref[5:])
return tags
def score_name(name: Text) -> Optional[int]:
"""Score how much we like each filename, lower wins, None rejects"""
# Accept both ways of naming the manifest asset, even though
# there's no longer a reason to include the commit sha.
if name.startswith("MANIFEST-") or name.startswith("MANIFEST."):
if zstandard and name.endswith("json.zst"):
return 1
if name.endswith(".json.bz2"):
return 2
if name.endswith(".json.gz"):
return 3
return None
def github_url(tags: List[Text]) -> Optional[List[Text]]:
for tag in tags:
url = "https://api.github.com/repos/web-platform-tests/wpt/releases/tags/%s" % tag
try:
resp = urlopen(url)
except Exception:
logger.warning("Fetching %s failed" % url)
continue
if resp.code != 200:
logger.warning("Fetching %s failed; got HTTP status %d" % (url, resp.code))
continue
try:
release = json.load(resp.fp)
except ValueError:
logger.warning("Response was not valid JSON")
return None
candidates = []
for item in release["assets"]:
score = score_name(item["name"])
if score is not None:
candidates.append((score, item["browser_download_url"]))
return [item[1] for item in sorted(candidates)]
return None
def download_manifest(
manifest_path: Text,
tags_func: Callable[[], List[Text]],
url_func: Callable[[List[Text]], Optional[List[Text]]],
force: bool = False
) -> bool:
if not force and not should_download(manifest_path):
return False
tags = tags_func()
urls = url_func(tags)
if not urls:
logger.warning("No generated manifest found")
return False
for url in urls:
logger.info("Downloading manifest from %s" % url)
try:
resp = urlopen(url)
except Exception:
logger.warning("Downloading pregenerated manifest failed")
continue
if resp.code != 200:
logger.warning("Downloading pregenerated manifest failed; got HTTP status %d" %
resp.code)
continue
if url.endswith(".zst"):
if not zstandard:
continue
try:
dctx = zstandard.ZstdDecompressor()
decompressed = dctx.decompress(resp.read())
except OSError:
logger.warning("Failed to decompress downloaded file")
continue
elif url.endswith(".bz2"):
try:
decompressed = bz2.decompress(resp.read())
except OSError:
logger.warning("Failed to decompress downloaded file")
continue
elif url.endswith(".gz"):
fileobj = io.BytesIO(resp.read())
try:
with gzip.GzipFile(fileobj=fileobj) as gzf:
data = gzf.read()
decompressed = data
except OSError:
logger.warning("Failed to decompress downloaded file")
continue
else:
logger.warning("Unknown file extension: %s" % url)
continue
break
else:
return False
try:
with open(manifest_path, "wb") as f:
f.write(decompressed)
except Exception:
logger.warning("Failed to write manifest")
return False
logger.info("Manifest downloaded")
return True
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", type=abs_path, help="Path to manifest file.")
parser.add_argument(
"--tests-root", type=abs_path, default=wpt_root, help="Path to root of tests.")
parser.add_argument(
"--force", action="store_true",
help="Always download, even if the existing manifest is recent")
return parser
def download_from_github(path: Text, tests_root: Text, force: bool = False) -> bool:
return download_manifest(path, lambda: merge_pr_tags(tests_root), github_url,
force=force)
def run(**kwargs: Any) -> int:
if kwargs["path"] is None:
path = os.path.join(kwargs["tests_root"], "MANIFEST.json")
else:
path = kwargs["path"]
success = download_from_github(path, kwargs["tests_root"], kwargs["force"])
return 0 if success else 1
|
995,846 | 55624f8ae37a9503f570837e00e2d3a23743c0df | """Sample code demonstrating management of Azure web apps.
This script expects that the following environment vars are set:
AZURE_TENANT_ID: with your Azure Active Directory tenant id or domain
AZURE_CLIENT_ID: with your Azure Active Directory Application Client ID
AZURE_CLIENT_SECRET: with your Azure Active Directory Application Secret
AZURE_SUBSCRIPTION_ID: with your Azure Subscription Id
"""
import os
from haikunator import Haikunator
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.web import WebSiteManagementClient
from azure.mgmt.web.models import AppServicePlan, SkuDescription, Site
WEST_US = 'westus'
GROUP_NAME = 'azure-sample-group'
SERVER_FARM_NAME = 'sample-server-farm'
SITE_NAME = Haikunator().haikunate()
def run_example():
"""Web Site management example."""
#
# Create the Resource Manager Client with an Application (service principal) token provider
#
subscription_id = os.environ['AZURE_SUBSCRIPTION_ID']
credentials = ServicePrincipalCredentials(
client_id=os.environ['AZURE_CLIENT_ID'],
secret=os.environ['AZURE_CLIENT_SECRET'],
tenant=os.environ['AZURE_TENANT_ID']
)
resource_client = ResourceManagementClient(credentials, subscription_id)
web_client = WebSiteManagementClient(credentials, subscription_id)
# Create Resource group
print('Create Resource Group')
resource_group_params = {'location':'westus'}
print_item(resource_client.resource_groups.create_or_update(GROUP_NAME, resource_group_params))
#
# Create an App Service plan for your WebApp
#
print('Create an App Service plan for your WebApp')
service_plan_async_operation = web_client.app_service_plans.create_or_update(
GROUP_NAME,
SERVER_FARM_NAME,
AppServicePlan(
app_service_plan_name=SERVER_FARM_NAME,
location=WEST_US,
sku=SkuDescription(
name='S1',
capacity=1,
tier='Standard'
)
)
)
service_plan = service_plan_async_operation.result()
print_item(service_plan)
#
# Create a Site to be hosted on the App Service plan
#
print('Create a Site to be hosted on the App Service plan')
site_async_operation = web_client.web_apps.create_or_update(
GROUP_NAME,
SITE_NAME,
Site(
location=WEST_US,
server_farm_id=service_plan.id
)
)
site = site_async_operation.result()
print_item(site)
#
# List Sites by Resource Group
#
print('List Sites by Resource Group')
for site in web_client.web_apps.list_by_resource_group(GROUP_NAME):
print_item(site)
#
# Get a single Site
#
print('Get a single Site')
site = web_client.web_apps.get(GROUP_NAME, SITE_NAME)
print_item(site)
print("Your site and server farm have been created. " \
"You can now go and visit at http://{}/".format(site.default_host_name))
input("Press enter to delete the site and server farm.")
#
# Delete a Site
#
print('Deleting the Site')
web_client.web_apps.delete(GROUP_NAME, SITE_NAME)
#
# Delete the Resource Group
#
print('Deleting the resource group')
delete_async_operation = resource_client.resource_groups.delete(GROUP_NAME)
delete_async_operation.wait()
def print_item(group):
"""Print some properties of an Azure model."""
print("\tName: {}".format(group.name))
print("\tId: {}".format(group.id))
print("\tLocation: {}".format(group.location))
print("\tTags: {}".format(group.tags))
if hasattr(group, 'status'):
print("\tStatus: {}".format(group.status))
if hasattr(group, 'state'): # Site
print("\tStatus: {}".format(group.state))
if hasattr(group, 'properties'):
print_properties(group.properties)
print("\n\n")
def print_properties(props):
"""Print some properties of a Site."""
if props and props.provisioning_state:
print("\tProperties:")
print("\t\tProvisioning State: {}".format(props.provisioning_state))
if __name__ == "__main__":
run_example()
|
995,847 | c6b8f712964f2d440d1412f85440d231affb718e | from __future__ import print_function, division
import torch
import torch.optim as optim
from networks.simplenet import SimpleANN
from core.simul import DQNSimulator
from agents.dqn import DQN
from utils.env_wrapper import BasicEnv
from utils.memory import ReplayMemory
TOTAL_STEPS = 50000
TARGET_UPDATE_STEPS = 300
EXPLORATION_STEPS = 2000
def make_cuda(model):
if torch.cuda.is_available():
model.cuda()
def main():
# set up environment
env = BasicEnv('CartPole-v1', seed=123456)
# set up network model
input_size = 4
output_size = env.num_actions
model = SimpleANN(input_size, output_size)
make_cuda(model)
target_model = SimpleANN(input_size, output_size)
make_cuda(target_model)
optimizer = optim.Adam(model.parameters())
dqn = DQN(model, optimizer, target_model=target_model, gamma = 0.99,
double_q_learning=True, eps_start=1.0, eps_end=0.05,
eps_decay=10000)
replay = ReplayMemory(10000, history_length=1)
simul = DQNSimulator(dqn, env, replay)
simul.train(TOTAL_STEPS,
target_update_steps=TARGET_UPDATE_STEPS,
batch_size=32,
exploration_steps=EXPLORATION_STEPS,
save_path='./trained_models/',
save_steps=200)
simul.test(5, batch_size=32)
env.close()
if __name__ == '__main__':
main()
|
995,848 | b0e2c65f4a7e46790b7a473cee0b6faf809f7438 | import numpy as np
import pandas as pd
import ComputeGrad
from sklearn import datasets
def softmax(a):
C = np.max(a)
exp_a = np.exp(a - C)
if a.ndim == 1:
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
else:
sum_exp_a = np.sum(exp_a, 1)
sum_exp_a = sum_exp_a.reshape(sum_exp_a.shape[0], 1)
y = exp_a / sum_exp_a
return y
def cross_entropy_loss(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y)) / batch_size
iris = datasets.load_iris()
x = iris.data
x /= x.mean()
y = iris.target
print(x.shape, y.shape)
one_hot = np.zeros((y.shape[0], y.max() + 1))
one_hot[np.arange(y.shape[0]), y] = 1
y = one_hot
print(y.shape)
num_classes = 3
w = np.random.uniform(-1, 1, (x.shape[1], num_classes))
b = np.zeros(num_classes)
w, b = ComputeGrad.SoftmaxGD(x, y, w, b, 0.1, 10000, 128)
pred = x.dot(w) + b
pred = softmax(pred)
print("ACC : ", (pred.argmax(1) == y.argmax(1)).mean())
|
995,849 | a1442af73e484d3952931856c140da779adba2ef | import os
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.cors import CORSMiddleware
from starlette.requests import Request
from starlette.responses import JSONResponse
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.templating import Jinja2Templates
from gsheet_service import (
media_views,
oauth_views,
scheduler_views,
settings,
sheet_views,
spell_check_views,
)
BASE_DIR = os.path.dirname(os.path.abspath(__name__))
templates = Jinja2Templates(
directory=os.path.join(BASE_DIR, "gsheet_service", "templates")
)
def oauth_config():
link = settings.OAUTH_SPREADSHEET
sheet = settings.OAUTH_SHEET_NAME
redirect_uri = f"{settings.HOST_PROVIDER}/redirect"
return locals()
async def home(request: Request):
result = await oauth_views.oauth_service.get_authorization_url(
"zoho", **oauth_config()
)
return templates.TemplateResponse(
"index.html",
{
"request": request,
"authorization_url": result.data["authorization_url"],
},
)
async def fetch_access_token(request: Request):
body = await request.json()
authorization_response = body.get("redirect_uri")
if not authorization_response:
return JSONResponse(
{"status": False, "msg": "No redirect_uri passed"}, status_code=400
)
result = await oauth_views.oauth_service.get_access_and_refresh_token(
"zoho",
authorization_response=authorization_response,
**oauth_config(),
)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def get_emails(request: Request):
body = await request.json()
refresh_token = body.get("refresh_token")
search_config = body.get("search_config")
provider = request.path_params["provider"]
if not refresh_token:
return JSONResponse(
{"status": False, "msg": "No refresh_token sent"}, status_code=400
)
if not search_config:
return JSONResponse(
{"status": False, "msg": "No search_config sent"}, status_code=400
)
result = await oauth_views.oauth_service.get_emails(
provider,
search_config=search_config,
refresh_token=refresh_token,
**oauth_config(),
)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
async def get_email_content(request: Request):
body = await request.json()
refresh_token = body.get("refresh_token")
email_data = body.get("email_data")
provider = request.path_params["provider"]
if not refresh_token:
return JSONResponse(
{"status": False, "msg": "No refresh_token sent"}, status_code=400
)
if not email_data:
return JSONResponse(
{"status": False, "msg": "No email_data content sent"}, status_code=400
)
result = await oauth_views.oauth_service.get_email_content(
provider, email_data, refresh_token=refresh_token, **oauth_config()
)
if result.error:
return JSONResponse({"status": False, "msg": result.error}, status_code=400)
return JSONResponse({"status": True, "data": result.data})
def redirect_page(request: Request):
return templates.TemplateResponse("redirect.html", {"request": request})
# async def secrets(request: Request):
# return JSONResponse(service.config)
async def oauth_callback(request: Request):
params = dict(request.query_params)
return JSONResponse(params)
middlewares = [
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
allow_credentials=True,
)
]
routes = [
Route("/", home),
Route("/redirect", redirect_page),
Route("/get-access-token", fetch_access_token, methods=["POST"]),
Route("/{provider}/get-emails", get_emails, methods=["POST"]),
Route("/{provider}/get-email-content", get_email_content, methods=["POST"]),
Mount(
"/static",
StaticFiles(directory=os.path.join(BASE_DIR, "gsheet_service", "static")),
name="static",
),
Route("/oauth-callback", oauth_callback, methods=["GET"]),
Mount("/oauth", routes=oauth_views.routes),
Mount("/media", routes=media_views.routes),
Mount("/scheduler", routes=scheduler_views.routes),
Mount("/sc", routes=spell_check_views.routes),
Mount("", routes=sheet_views.routes),
# Route("/secrets", secrets),
]
on_startup = [] + sheet_views.on_startup
on_shutdown = [] + sheet_views.on_shutdown
app = Starlette(
middleware=middlewares,
routes=routes,
on_startup=on_startup,
on_shutdown=on_shutdown,
)
|
995,850 | e3dbf466a6844e9db7192195067a1bb7e2f7e157 | import time
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from sqlalchemy import create_engine
@pytest.yield_fixture()
def driver():
driver = webdriver.Chrome(
executable_path='chromedriver2.43'
)
driver.set_window_size(1024, 800)
driver.implicitly_wait(3)
driver.set_page_load_timeout(5)
driver.get('http://localhost:58001/')
yield driver
driver.quit()
@pytest.yield_fixture()
def wait(driver):
wait = WebDriverWait(driver, 5)
yield wait
del(wait)
@pytest.yield_fixture()
def psql_connect():
psql = create_engine('postgresql://postgres:devpass@localhost:5433/core')
db_connect = psql.connect()
yield db_connect
db_connect.close()
def test_window_visible(wait):
wait.until(EC.element_to_be_clickable((By.XPATH, '//div[@class="NPS"]')))
def test_window_not_visible(driver, wait):
driver.add_cookie(
{
"name" : 'NPS_sended',
'value' : '1',
'domain' : 'localhost'
}
)
driver.refresh()
wait.until_not(EC.element_to_be_clickable((By.XPATH, '//div[@class="NPS"]')))
def test_send_hight_opinion(driver, wait):
button_10 = wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'div.NPS__button.n10')
)
)
button_10.click()
assert driver.get_cookie('NPS_sended')['value'] == '1'
def test_send_low_opinion(driver, wait):
button_0 = wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'div.NPS__button.n0')
)
)
button_0.click()
comment_textarea = wait.until(
EC.element_to_be_clickable(
(By.ID, 'feedbackTextarea')
)
)
comment_textarea.send_keys('time_now')
send_btn = wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.NPS__feedback-send')
)
)
send_btn.click()
assert driver.get_cookie('NPS_sended')['value'] == '1'
def test_db_save_low_result(wait, psql_connect):
time_now = str(time.time())
button_0 = wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'div.NPS__button.n0')
)
)
button_0.click()
comment_textarea = wait.until(
EC.element_to_be_clickable(
(By.ID, 'feedbackTextarea')
)
)
comment_textarea.send_keys(time_now)
send_btn = wait.until(
EC.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.NPS__feedback-send')
)
)
send_btn.click()
req = 'select result from t_feedback_models where feedback = \'{}\''.format(time_now)
res = psql_connect.execute(req)
assert res.returns_rows
|
995,851 | 4af6cb9c7bb537ff2703ef7687f1192daf9d87d3 | class Solution(object):
def rotate(self, nums, k):
length = len(nums)
k %= length
last = nums[-k:]
for i in reversed(range(length - k)):
nums[i+k] = nums[i]
for i in range(len(last)):
nums[i] = last[i]
|
995,852 | fd543a7d33efc6e0942e1b1b1dedf1d8d527c550 | import sys
from Project import app, db
from Project import views
def main():
db.create_all()
app.run(debug=True)
return 0
if __name__=='__main__':
sys.exit(main()) |
995,853 | b27e85b86c13c33a7ec3b8b1f895ae62f8a9f825 | import random
import time
class Car:
def __init__(self, available, now_region, preferred_region, name):
self.available = available
self.now_region = now_region
self.preferred_region = preferred_region
self.name = name
self.to_go = -1
def whether_can_go(self,wanted_region):
if (self.available):
if (abs(self.now_region-wanted_region)+abs(self.now_region-wanted_region))<=4:
if (wanted_region in self.preferred_region):
if (self.to_go==-1):
return True
return False
class Region:
def __init__(self, predicted_car, region1, now_car=[]):
self.predicted_car = predicted_car
self.now_car = now_car
self.need_car = abs(predicted_car-len(now_car))
self.region = region1
def lack_or_more(self):
if (self.predicted_car<self.now_car):
return False,len(self.now_car)-self.predicted_car
else:
return True, self.predicted_car-len(self.now_car)
def add_car(self,car):
self.now_car.append(car)
self.need_car -= 1
class ZHeap:
def __init__(self, item=[]):
self.items = item
self.heapsize = len(self.items)
def LEFT(self, i):
return 2 * i + 1
def RIGHT(self, i):
return 2 * i + 2
def PARENT(self, i):
return (i - 1) // 2
def MIN_HEAPIFY(self, i):
l = self.LEFT(i)
r = self.RIGHT(i)
if l < self.heapsize and self.items[l].need_car > self.items[i].need_car:
largest = l
else:
largest = i
if r < self.heapsize and self.items[r].need_car > self.items[largest].need_car:
largest = r
if largest != i:
self.items[i], self.items[largest] = self.items[largest], self.items[i]
self.MIN_HEAPIFY(largest)
def INSERT(self, val):
self.items.append(val)
idx = len(self.items) - 1
parIdx = self.PARENT(idx)
while parIdx >= 0:
if self.items[parIdx].need_car < self.items[idx].need_car:
self.items[parIdx], self.items[idx] = self.items[idx], self.items[parIdx]
idx = parIdx
parIdx = self.PARENT(parIdx)
else:
break
self.heapsize += 1
def DELETE(self):
last = len(self.items) - 1
if last < 0:
return None
self.items[0], self.items[last] = self.items[last], self.items[0]
val = self.items.pop()
self.heapsize -= 1
self.MIN_HEAPIFY(0)
return val
def BUILD_MIN_HEAP(self):
i = self.PARENT(len(self.items) - 1)
while i >= 0:
self.MIN_HEAPIFY(i)
i -= 1
def SHOW(self):
print(self.items)
class ZPriorityQ(ZHeap):
def __init__(self, item=[]):
ZHeap.__init__(self, item)
def insert(self, val):
ZHeap.INSERT(self, val)
def pop(self):
val = ZHeap.DELETE(self)
return val
region = []
predited_number = []
car_array = []
width = 4
height = 5
total = width*height
for i in range(total):
region_ = Region(predited_number[i], (i % width, i//width))
region.append(region_)
for i in range(len(car_array)):
car_ = Car(random.randint(0,1),(car_array[i][1] % width, car_array[i][1]//width),car_array[i][2],car_array[i][0])
region[car_array[i][1]].add_car(car_)
need_car = ZPriorityQ()
more_car = ZPriorityQ()
for i in range(total):
a,b = region[i].lack_or_more()
if (a):
need_car.insert(region[i])
else:
more_car.insert(region[i])
while (1):
a = need_car.pop()
b = more_car.pop()
if (abs(a.region[0]-b.region[0])+abs(a.region[1]-b.region[1]))>=4:
c = b
b = more_car.pop()
more_car.insert(c)
for i in range(len(b.now_car)):
if (b.now_car[i].whether_can_go(a.region)):
input1 = input("Whether you want to go to Region (%d,%d)"%(a.region[0],a.region[1]))
a.add_car(b.now_car[i])
b.now_car.remove(i)
b.need_car += 1
time.sleep(10)
|
995,854 | 16007d237d05d5ecd7f032bb1c13b45b67fd26a9 | """
===========
Description
===========
The popular video games Fallout 3 and Fallout: New Vegas has a computer hacking mini game
This game requires the player to correctly guess a password from a list of same length words.
Your challenge is to implement this game yourself.
The game works like the classic game of Mastermind.
The player has only 4 guesses and on each incorrect guess,
the computer will indicate how many letter positions are correct.
For example, if the password is MIND and the player guesses MEND,
the game will indicate that 3 out of 4 positions are correct (M_ND).
If the password is COMPUTE and the player guesses PLAYFUL, the game will report 0/7.
While some of the letters match, they're in the wrong position.
Ask the player for a difficulty (very easy, easy, average, hard, very hard),
then present the player with 5 to 15 words of the same length. The length can be 4 to 15 letters.
More words and letters make for a harder puzzle.
The player then has 4 guesses, and on each incorrect guess indicate the number of correct positions.
Here's an example game:
Difficulty (1-5)? 3
SCORPION
FLOGGING
CROPPERS
MIGRAINE
FOOTNOTE
REFINERY
VAULTING
VICARAGE
PROTRACT
DESCENTS
Guess (4 left)? migraine
0/8 correct
Guess (3 left)? protract
2/8 correct
Guess (2 left)? croppers
8/8 correct
You win!
========
Solution
========
"""
import random
difficulties = {1:4,
2:5,
3:7,
4:10,
5:15}
def wordlist(difficulty):
wordlist = open('enable1')
words = [word.strip() for word in wordlist if len(word) == difficulties[difficulty]+1]
wordlist.close()
return words
def get_words(word_amount,difficulty):
words = []
for i in range(word_amount):
word = random.choice(wordlist(difficulty))
words.append(word)
return words
def print_words(words):
print('----------')
for word in words:
print (word)
print('----------')
def check_word(word,answer):
correct_letter_count = 0
for i in range(len(word)):
if word[i] == answer[i]:
correct_letter_count+=1
return correct_letter_count
def main_loop(num_of_guesses,amount_of_words,difficulty):
guesses = num_of_guesses
words = get_words(amount_of_words,difficulty)
answer = random.choice(words)
while guesses:
print_words(words)
word = input('Take a guess: ')
print()
if check_word(word,answer) == len(answer):
print('YOU WON')
break
print(check_word(word,answer),'were in the correct position')
guesses-=1
print('The answer was',answer)
input('Press enter to exit')
main_loop(10,10,4)
|
995,855 | 770f20943a5348e1e4b01e91ea579509986227db | def processStream(stream):
ignore_garbage = False
result = 0
open_data = 0
garbage_count = 0
open_garbage = False
for c in stream:
if open_garbage:
if ignore_garbage:
ignore_garbage = False
elif c == '>':
open_garbage = False
elif c == '!':
ignore_garbage = True
else:
garbage_count += 1
else:
if c == '{':
open_data += 1
elif c == '<':
open_garbage = True
elif c == '}':
result += open_data
open_data -= 1
return garbage_count
# opens file with name of "test.txt"
f = open("e:\Personal\Advent of Code\Day 9\input.txt", "r")
wholeString = f.readline()
result = processStream(wholeString)
print(result)
|
995,856 | 31284d45852fe71ecbacb3ea2d7b1b74f85719af | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
#Arrancamos el escenario vnx
def crear():
print("Creamos el escenario")
os.system("sudo vnx -f /home/upm/pfinal/pfinal.xml --create")
#Paramos el escenario vnx y borramos todas las configuraciones
def borrar():
print("Destruimos el escenario")
os.system("sudo vnx -f /home/upm/pfinal/pfinal.xml --destroy")
#Paramos el escenario vnx guardando los cambios
def parar():
print("Paramos el escenario")
os.system("sudo vnx -f /home/upm/pfinal/pfinal.xml --shutdown")
#Rearrancamos el escenario vnx con los cambios guardados
def arrancar():
print("Rearrancamos el escenario")
os.system("sudo vnx -f /home/upm/pfinal/pfinal.xml --start")
# VAMOS A HACER QUE LOS COMANDOS PASEN COMO ARGUMENTO
f = sys.argv
args = len(f)
if args > 1:
metodo = f[1]
print("El argurmento 1 es :"+ str(metodo))
if metodo == "crear":
crear()
elif metodo == "borrar":
borrar()
elif metodo == "parar":
parar()
elif metodo == "arrancar":
arrancar()
elif metodo == "ayuda":
print("\n\n######## AYUDA #######\n")
print("* './Confi_inicial.py' ejecuta diferentes funciones segun la opcion que se añada detras: \n ")
print("* './Confi_inicial.py crear' creamos el escenario vnx\n")
print("* './Confi_inicial.py borrar' borra el escenario vnx\n ")
print("* './Confi_inicial.py parar' para el escenario vnx y guarda los cambios\n ")
print("* './Confi_inicial.py arrancar' arranca el escenario vnx con los cambios guardados\n")
print("\n######## FIN DE LA AYUDA #######\n\n")
else:
print "Las opciones son erroneas. Introduzca './Confi_Inicial.py ayuda' para mas informacion"
|
995,857 | fd0b6e7e51685b0e3f342ebefa12d4b1d9572459 | class Game:
def __init__(self, date, type, time, time_elapsed, host, guest, index1, pankou, index2, gameId):
self.date = date
self.type = type
self.time = time
self.time_elapsed = time_elapsed
self.host = host
self.guest = guest
self.index1 = index1
self.pankou = pankou
self.index2 = index2
self.gameId = gameId
|
995,858 | 65fc1d6b80e6c4fbdf4a09a3ef3c1bbca4697b5e | import os
import urlparse
from sqlalchemy import create_engine
from werkzeug.wrappers import Request, Response
from werkzeug.routing import Map, Rule
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.wsgi import SharedDataMiddleware
from werkzeug.utils import redirect
from jinja2 import Environment, FileSystemLoader
from controllers.home import index
from controllers.book import add,view_book
from types import MethodType
import models
from sqlalchemy.orm import sessionmaker
class Bazaar(object):
# This function does some magic, pass in a route and a function, and
# it handles the werkzeug endpoint crap for you.
# Note: Any function bound using this will be treated as a method on the Bazaar object,
# and thus be passed self as a parameter
def bind_route(self,route,controller):
self.url_map.add(Rule(route,endpoint=controller.__name__))
setattr(self,'on_'+controller.__name__,MethodType(controller,self))
def __init__(self, config):
engine = create_engine(config['host']+config['db'])
self.mysql = engine.connect()
template_path = os.path.join(os.path.dirname(__file__), 'templates')
self.jinja_env = Environment(loader=FileSystemLoader(template_path),
autoescape=True)
models.metadata.create_all(self.mysql)
Session = sessionmaker()
Session.configure(bind=engine)
self.mysql_session = Session()
self.url_map = Map()
self.bind_route('/',index)
self.bind_route('/add',add)
self.bind_route('/add_book',add)
self.bind_route('/book/<book_id>',view_book)
def render_template(self, template_name, **context):
t = self.jinja_env.get_template(template_name)
return Response(t.render(context), mimetype='text/html')
def dispatch_request(self, request):
adapter = self.url_map.bind_to_environ(request.environ)
try:
endpoint, values = adapter.match()
return getattr(self, 'on_' + endpoint)(request, **values)
except HTTPException, e:
return e
def wsgi_app(self, environ, start_response):
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(self, environ, start_response):
return self.wsgi_app(environ, start_response)
def create_app(mysql_host='mysql://root@localhost/', mysql_db='book_bazaar', with_static=True):
app = Bazaar({
'host': mysql_host,
'db': mysql_db
})
if with_static:
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/static': os.path.join(os.path.dirname(__file__), 'static')
})
return app
if __name__ == '__main__':
from werkzeug.serving import run_simple
app = create_app()
run_simple('127.0.0.1', 5000, app, use_debugger=True, use_reloader=True) |
995,859 | 2d630137ef3dcd8ba17a372bdc932c459340373a | """
Main flask app
"""
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
STORES = [
{
"name": "Nike town San Francisco",
"items": [
{
"name": "Metcon 3",
"price": 130
}
]
}
]
@app.route("/")
def home():
return render_template("index.html")
@app.route("/store")
def get_stores():
return jsonify({
"data": STORES
})
@app.route("/store", methods=["POST"])
def create_store():
body = request.get_json()
new_store = {
"name": body["name"],
"items": []
}
STORES.append(new_store)
return jsonify({
"data": STORES
})
@app.route("/store/<string:name>")
def get_store_by_name(name):
formatted_name = name.replace("-", " ")
store = [store for store in STORES if store["name"] == formatted_name]
if not store:
return jsonify({
"message": "Store {} not found".format(formatted_name)
})
return jsonify({
"data": store
})
@app.route("/store/<string:name>/item")
def get_store_items_by_name(name):
formatted_name = name.replace("-", " ")
items = [store["items"] for store in STORES if store["name"] == formatted_name]
if not items:
return jsonify({
"message": "Store {} not found".format(formatted_name)
})
return jsonify({
"data": items[0]
})
@app.route("/store/<string:name>/item", methods=["POST"])
def create_store_item_by_name(name):
new_item = request.get_json()
formatted_name = name.replace("-", " ")
store = [store for store in STORES if store["name"] == formatted_name]
if not store:
return jsonify({
"message": "Store {} not found".format(formatted_name)
})
store[0]["items"].append(new_item)
return jsonify({
"data": store
})
app.run(port=5000) |
995,860 | ef147863281933f5a9ab5a7a3495c4f942ef4f68 | import torch
from hem.models.inverse_module import InverseImitation
from hem.models import Trainer
from hem.models.discrete_logistic import DiscreteMixLogistic
import numpy as np
import matplotlib.pyplot as plt
from hem.datasets.util import MEAN, STD
import cv2
if __name__ == '__main__':
trainer = Trainer('bc_inv', "Trains Behavior Clone w/ inverse + goal on input data")
config = trainer.config
# build Imitation Module and MDN Loss
repeat_last = config.get('repeat_last', False)
pnt_weight = config.get('pnt_weight', 0.1)
goal_loss, goal_margin = config.get('goal_loss', False), config.get('goal_margin', -1)
action_model = InverseImitation(**config['policy'])
inv_loss_mult = config.get('inv_loss_mult', 1.0)
def forward(m, device, context, traj, append=True):
states, actions = traj['states'].to(device), traj['actions'].to(device)
images = traj['images'].to(device)
context = context['video'].to(device)
if repeat_last:
old_T = context.shape[1]
context = context[:,-1:].repeat((1, old_T, 1, 1, 1))
# compute predictions and action LL
out = m(states, images, context, ret_dist=False)
mu_bc, scale_bc, logit_bc = out['bc_distrib']
action_distribution = DiscreteMixLogistic(mu_bc[:,:-1], scale_bc[:,:-1], logit_bc[:,:-1])
l_bc = torch.mean(-action_distribution.log_prob(actions))
# compute inverse model density
inv_distribution = DiscreteMixLogistic(*out['inverse_distrib'])
l_inv = inv_loss_mult * torch.mean(-inv_distribution.log_prob(actions))
# compute goal embedding
if not goal_loss:
l_goal, goal_stat = 0, 0
elif goal_margin < 0:
l_goal = torch.mean(torch.sum((out['pred_goal'][:,0] - out['img_embed'][:,-1].detach()) ** 2, 1))
goal_stat = l_goal.item()
else:
cos_sims = torch.matmul(out['pred_goal'], out['img_embed'].transpose(1, 2))
goal_sim, other_sim = cos_sims[:,:,-1], cos_sims[:,0,:-1]
l_goal = torch.mean(torch.nn.functional.relu(other_sim - goal_sim + goal_margin))
goal_stat = l_goal.item()
loss = l_goal + l_inv + l_bc
stats = {'inverse_loss':l_inv.item(), 'bc_loss': l_bc.item(), 'goal_loss': goal_stat}
if 'point_ll' in out:
pnts = traj['points'].to(device).long()
l_point = torch.mean(-out['point_ll'][range(pnts.shape[0]), pnts[:,-1,0], pnts[:,-1,1]])
loss = loss + pnt_weight * l_point
stats['point_loss'] = l_point.item()
if trainer.is_img_log_step:
points_img = torch.exp(out['point_ll'].detach())
maxes = points_img.reshape((points_img.shape[0], -1)).max(dim=1)[0] + 1e-3
stats['point_img'] = (points_img[:,None] / maxes.reshape((-1, 1, 1, 1))).repeat((1, 3, 1, 1))
stats['point_img'] = 0.7 * stats['point_img'] + 0.3 * traj['target_images'][:,0].to(device)
pnt_color = torch.from_numpy(np.array([0,1,0])).float().to(stats['point_img'].device).reshape((1, 3))
for i in range(-5, 5):
for j in range(-5, 5):
h = torch.clamp(pnts[:,-1,0] + i, 0, images.shape[3] - 1)
w = torch.clamp(pnts[:,-1,1] + j, 0, images.shape[4] - 1)
stats['point_img'][range(pnts.shape[0]),:,h,w] = pnt_color
mean_ac = np.clip(action_distribution.mean.detach().cpu().numpy(), -1, 1)
mean_inv = np.clip(inv_distribution.mean.detach().cpu().numpy(), -1, 1)
for d in range(actions.shape[2]):
a_d = actions.cpu().numpy()[:,:,d]
stats['bc_l1_{}'.format(d)] = np.mean(np.abs(mean_ac[:,:,d] - a_d))
stats['inv_l1_{}'.format(d)] = np.mean(np.abs(mean_inv[:,:,d] - a_d))
return loss, stats
trainer.train(action_model, forward)
|
995,861 | 3e13d0a2ed65b0ccc1d44b249b0d0eb911dcbf58 | import sys, os, pygame, time
from pygame.locals import *
from math import *
import random
pygame.init()
####------Colours------####
BLACK = ( 0, 0, 0)
BLUE = ( 0, 0, 255)
DARKBLUE = ( 0, 0, 64)
DARKGREY = ( 64, 64, 64)
DARKRED = ( 64, 0, 0)
WHITE = (255, 255, 255)
RED = (255,0,0)
GREEN = (0,200,0)
BRIGHT_RED = (255,0,0)
BRIGHT_GREEN = (0,255,0)
####-------------------####
block_color = (53,115,255)
WIDTH = 1360
HEIGHT = 768
map_size = int((WIDTH / 680) * 64)
CLOCK = pygame.time.Clock()
FPS = 60
SCREEN = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption("Raycaster")
map_colour = BLUE
floor_colour = BLACK
ceiling_colour = DARKRED
rotate_speed = 0.03
move_speed = 0.15
strafe_speed = 0.04
wall_height = 1.27
resolution = 6 #Pixels per line
texture = pygame.image.load('pacman.jpeg')
texWidth = texture.get_width()
texHeight = texture.get_height()
texArray = pygame.PixelArray(texture)
old = 0
hand = pygame.image.load('./player.png')
mira = pygame.image.load('./mira.png')
class Raycaster(object):
def __init__(self, width, heigth):
self.width = width
self.heigth = heigth
def point(self, x, y, c = None):
SCREEN.set_at((x, y), c)
def draw_player(self, player, xi, yi, w = 256, h = 256, size=32):
for x in range(xi, xi + w):
for y in range(yi, yi + h):
tx = int((x - xi) * size/w)
ty = int((y - yi) * size/h)
c = player.get_at((tx, ty))
if c != (152, 0, 136, 255) and c!= (0, 0, 0, 0):
self.point(x, y, c)
def button(self,msg,x,y,w,h,ic,ac,action=None):
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if x+w > mouse[0] > x and y+h > mouse[1] > y:
pygame.draw.rect(SCREEN, ac,(x,y,w,h))
if click[0] == 1 and action != None:
action()
else:
pygame.draw.rect(SCREEN, ic,(x,y,w,h))
smallText = pygame.font.SysFont("comicsansms",20)
textSurf, textRect = self.text_objects(msg, smallText)
textRect.center = ( (x+(w/2)), (y+(h/2)) )
SCREEN.blit(textSurf, textRect)
def text_objects(self,text, font, color=BLACK):
textSurface = font.render(text, True, color)
return textSurface, textSurface.get_rect()
def message(self, message, font_size, x, y, color):
largeText = pygame.font.SysFont("comicsansms",font_size, color)
TextSurf, TextRect = self.text_objects(message, largeText, color)
TextRect.center = (x,y)
SCREEN.blit(TextSurf, TextRect)
def game_intro(self, game,quit):
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
SCREEN.fill(BLACK)
self.message("PAINT IT FIRST", 115,(WIDTH/2),(HEIGHT/2), WHITE)
self.button("GO!",550,450,100,50,GREEN,BRIGHT_GREEN,game)
self.button("Quit",700,450,100,50,RED,BRIGHT_RED,quit)
pygame.display.update()
CLOCK.tick(15)
def success(self):
success = True
while success:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
SCREEN.fill(BLACK)
self.message("SUCCESS", 115,(WIDTH/2),(HEIGHT/2), WHITE)
self.button("Quit",625,450,100,50,RED,BRIGHT_RED,self.Quit)
pygame.display.update()
CLOCK.tick(15)
def create_level(self,file):
if file[-4:] != '.txt': file += '.txt'
f = open(file, 'r')
file = f.readlines()
for i, line in enumerate(file):
file[i] = list(line.rstrip('\n'))
for j, char in enumerate(file[i]):
if char == ' ': file[i][j] = 0
else: file[i][j] = int(char)
f.close()
map_x = len(file)
map_y = len(file[0])
map_buffer = []
for i, line in enumerate(file):
map_buffer.append([])
for j, char in enumerate(file[i]):
if char != 0:
map_buffer[i].append(char)
else:
map_buffer[i].append(0)
return map_x, map_y, map_buffer
def Quit(self):
pygame.quit()
sys.exit()
def draw_rectangle(self, x, y, texture):
for cx in range(x, x + 50):
for cy in range(y, y + 50):
tx = int((cx - x)*128 / 50)
ty = int((cy - y)*128 / 50)
c = texture.get_at((tx, ty))
self.point(cx, cy, c)
def load_map(self, filename):
with open(filename) as f:
for line in f.readlines():
self.map.append(list(line))
def game(self):
map_x, map_y, map_buffer = self.create_level('map')
position_x, position_y = 7.4442835833842285, 10.119874124901372
direction_x, direction_y = -0.005715065212402226, 1.2806120950661122
plane_x, plane_y = -0.5135283099460273, -0.41459459099700735
SCREEN.fill(BLACK)
while True:
if(position_y>12.5):
self.success()
difference = 0
for event in pygame.event.get():
if event.type == QUIT:
self.Quit()
return
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
self.Quit()
return
pygame.draw.rect(SCREEN, ceiling_colour, (0, 0, WIDTH, (HEIGHT - map_size) / 2))
pygame.draw.rect(SCREEN, floor_colour, (0, (HEIGHT - map_size) / 2, WIDTH, (HEIGHT - map_size) / 2))
for x in range(0, WIDTH, resolution):
camera_x = 2 * x / WIDTH - 1
rayposition_x = position_x
rayposition_y = position_y
mapX = int(rayposition_x)
mapY = int(rayposition_y)
raydirection_x = direction_x + plane_x * camera_x + 0.000000000000001
raydirection_y = direction_y + plane_y * camera_x + 0.000000000000001
deltaDistX = sqrt(1 + raydirection_y ** 2 / raydirection_x ** 2)
deltaDistY = sqrt(1 + raydirection_x ** 2 / raydirection_y ** 2)
zBuffer = []
if raydirection_x < 0:
stepX = -1
sideDistX = (rayposition_x - mapX) * deltaDistX
else:
stepX = 1
sideDistX = (mapX + 1 - rayposition_x) * deltaDistX
if raydirection_y < 0:
stepY = -1
sideDistY = (rayposition_y- mapY) * deltaDistY
else:
stepY = 1
sideDistY = (mapY + 1 - rayposition_y) * deltaDistY
while True:
if sideDistX < sideDistY:
sideDistX += deltaDistX
mapX += stepX
side = 0
else:
sideDistY += deltaDistY
mapY += stepY
side = 1
if mapX >= map_x or mapY >= map_y or mapX < 0 or mapY < 0 or map_buffer[mapX][mapY] > 0:
break
if side == 0: rayLength = (mapX - rayposition_x + (1 - stepX) / 2) / raydirection_x
else: rayLength = (mapY - rayposition_y+ (1 - stepY) / 2) / raydirection_y
lineHeight = (HEIGHT / rayLength) * wall_height
drawStart = -lineHeight / 2 + (HEIGHT - map_size) / 2
drawEnd = lineHeight / 2 + (HEIGHT - map_size) / 2
if side == 0: wallX = rayposition_y+ rayLength * raydirection_y
else: wallX = rayposition_x + rayLength * raydirection_x
wallX = abs((wallX - floor(wallX)) - 1)
texX = int(wallX * texWidth)
if side == 0 and raydirection_x > 0: texX = texWidth - texX - 1
if side == 1 and raydirection_y < 0: texX = texWidth - texX - 1
for y in range(texHeight):
if drawStart + (lineHeight / texHeight) * (y + 1) < 0: continue
if drawStart + (lineHeight / texHeight) * y > HEIGHT - map_size: break
colour = pygame.Color(texArray[texX][y])
c = 255.0 - abs(int(rayLength * 32)) * 0.85
if c < 1: c = 1
if c > 255: c = 255
if side == 1: c = c * 0.5
new_colour = []
for i, value in enumerate(colour):
if i == 0: continue
new_colour.append(value * (c / 255))
colour = tuple(new_colour)
pygame.draw.line(SCREEN, colour, (x, drawStart + (lineHeight / texHeight) * y), (x, drawStart + (lineHeight / texHeight) * (y + 1)), resolution)
for x in range(self.width):
for y in range(self.heigth):
if map_buffer[y][x] != 0: pygame.draw.rect(SCREEN, map_colour, ((x * (map_size / map_x) + WIDTH) - map_size, y * (map_size / map_y) + HEIGHT - map_size, (map_size / map_x), (map_size / map_y)))
myPosition = (position_y* (map_size / map_y) + WIDTH - map_size, position_x * (map_size / map_x) + HEIGHT - map_size)
pygame.draw.rect(SCREEN, ( 0, 255, 0), myPosition + (2, 2))
pygame.draw.line(SCREEN, ( 0, 170, 170), myPosition, ((direction_y + position_y+ plane_y) * (map_size / map_y) + WIDTH - map_size, (direction_x + position_x + plane_x) * (map_size / map_x) + HEIGHT - map_size))
pygame.draw.line(SCREEN, ( 0, 170, 170), myPosition, ((direction_y + position_y- plane_y) * (map_size / map_y) + WIDTH - map_size, (direction_x + position_x - plane_x) * (map_size / map_y) + HEIGHT - map_size))
pygame.draw.line(SCREEN, ( 0, 170, 170), ((direction_y + position_y+ plane_y) * (map_size / map_y) + WIDTH - map_size, (direction_x + position_x + plane_x) * (map_size / map_x) + HEIGHT - map_size), ((direction_y + position_y- plane_y) * (map_size / map_y) + WIDTH - map_size, (direction_x + position_x - plane_x) * (map_size / map_y) + HEIGHT - map_size))
keys = pygame.key.get_pressed()
if keys[K_w]:
if not map_buffer[int(position_x + direction_x * move_speed)][int(position_y)]: position_x += direction_x * move_speed
if not map_buffer[int(position_x)][int(position_y+ direction_y * move_speed)]: position_y+= direction_y * move_speed
if keys[K_a]:
if not map_buffer[int(position_x + direction_y * strafe_speed)][int(position_y)]: position_x += direction_y * strafe_speed
if not map_buffer[int(position_x)][int(position_y- direction_x * strafe_speed)]: position_y-= direction_x * strafe_speed
if keys[K_s]:
if not map_buffer[int(position_x - direction_x * move_speed)][int(position_y)]: position_x -= direction_x * move_speed
if not map_buffer[int(position_x)][int(position_y- direction_y * move_speed)]: position_y-= direction_y * move_speed
if keys[K_d]:
if not map_buffer[int(position_x - direction_y * strafe_speed)][int(position_y)]: position_x -= direction_y * strafe_speed
if not map_buffer[int(position_x)][int(position_y+ direction_x * strafe_speed)]: position_y+= direction_x * strafe_speed
if keys[K_q]: difference = -5
if keys[K_e]: difference = 5
if difference != 0:
cosrot = cos(difference * rotate_speed)
sinrot = sin(difference * rotate_speed)
old = direction_x
direction_x = direction_x * cosrot - direction_y * sinrot
direction_y = old * sinrot + direction_y * cosrot
old = plane_x
plane_x = plane_x * cosrot - plane_y * sinrot
plane_y = old * sinrot + plane_y * cosrot
self.message("PAINT IT FIRST LAB 1", 50,(WIDTH/2),HEIGHT-100, WHITE)
self.message("USE w to move up, s to move down, a to move left, d to move right.", 30,(WIDTH/2-50),HEIGHT-70, WHITE)
self.message("USE q to turn the head left, e to turn the head right.", 30,(WIDTH/2-50),HEIGHT-45, WHITE)
self.message("MOVE FORWARD TO WIN", 30,(WIDTH/2-50),HEIGHT-20, RED)
self.draw_player(hand,1000 - 256 - 128, 650 - 256)
self.draw_player(mira,int(WIDTH/2 - 80 ),int(HEIGHT/2 - 100),120,120,512)
pygame.display.update()
CLOCK.tick(FPS)
def main():
r = Raycaster(21, 22)
r.game_intro(r.game,r.Quit)
r.game()
pygame.quit()
r.quit()
if __name__=="__main__": main() |
995,862 | 186473b852ab78faa9457b43c7cb210e295da999 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from booki.utils import log
from booki.editor import models
import os, tempfile
from django import forms
class UploadForm(forms.Form):
title = forms.CharField(required=False,
label='Book title',
widget=forms.TextInput(attrs={'placeholder': 'Only if you want to rename it'}))
file = forms.FileField(required=True,
label='Your EPUB file')
from booktype.utils.misc import import_book_from_file
class ImporterView(FormView):
template_name = 'importer/frontpage.html'
form_class = UploadForm
success_url = '/importer/'
def form_valid(self, form):
fil = self.request.FILES['file']
f = open('/tmp/acika.epub', 'wb+')
for chunk in fil.chunks():
f.write(chunk)
f.close()
import_book_from_file('/tmp/acika.epub', self.request.user)
return super(ImporterView, self).form_valid(form)
from django.db import transaction
#@transaction.commit_manually
def frontpage(request):
from django.http import HttpResponseRedirect
if request.method == 'POST': # If the form has been submitted...
form = UploadForm(request.POST, request.FILES) # A form bound to the POST data
if form.is_valid(): # All validation rules pass
fil = request.FILES['file']
f = open('/tmp/acika.epub', 'wb+')
for chunk in fil.chunks():
f.write(chunk)
f.close()
try:
title = None
if form.cleaned_data['title'].strip() != '':
title = form.cleaned_data['title']
book = import_book_from_file('/tmp/acika.epub', request.user, book_title=title)
except:
transaction.rollback()
raise
else:
transaction.commit()
from django.core.urlresolvers import reverse
res = HttpResponseRedirect(reverse('book_info', kwargs={'bookid': book.url_title})) # Redirect after POST
return res
else:
form = UploadForm() # An unbound form
# try:
resp = render(request, 'importer/frontpage.html', {'request': request,
'form': form})
# except:
# transaction.rollback()
# raise
# else:
# transaction.commit()
return resp
|
995,863 | 077996763e30f3c4344655e34de8736b42841829 | import curses
import sys
def display(copyright="declaration.md"):
copyright_win = curses.newwin(4, 2, 10, 10)
copyright_win.border(0)
try:
with open(copyright) as declaration:
for lines in declaration:
copyright_win.addstr(3, 3, lines)
copyright_win.refresh()
except OSError:
raise
def action(self):
pass
if __name__ == '__main__':
screen = curses.initscr()
screen.clear()
screen.border(0)
screen.refresh()
display("testing.md")
curses.endwin()
|
995,864 | a35d6655de94bee39c3af5f18dba2869ca6043fb | from random import randrange
import traceback, sys
import json
from msilib.schema import Class
from flask import Flask
from flask import render_template, redirect, url_for
from flask import request, jsonify
from gensim.models import Word2Vec
from pymystem3 import Mystem
from gensim.models import KeyedVectors
import gensim
import pymorphy2
import time
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
model = gensim.models.KeyedVectors.load_word2vec_format(
"local/tayga_1_2.vec", binary=False)
morph = pymorphy2.MorphAnalyzer()
@app.route('/get_vectors/', methods=['POST'])
def get_vector():
start = time.perf_counter()
query = json.loads(request.data)
words = query.get("words")
print("Запрос получен", time.perf_counter() - start)
vectors = []
for word in words:
try:
vector = model[word].tolist()
vectors.append(vector)
except Exception as e:
print(e)
print(f'Ошибка при обработке слов "{word}".')
vectors.append([])
return jsonify({"vectors": vectors})
# TODO Сделать возвращение статуса или как-то еще передавать сообщения об ошибке на сторону клиента.
@app.route('/most_similar/', methods=['POST'])
def most_similar():
start = time.perf_counter()
query = json.loads(request.data)
def key_for_tayga(word: str):
pos_map = {
'INFN': "VERB"
}
def pos_priority(parse):
priority = ['NOUN', 'ADJF', 'VERB', 'ADVB', 'PRTF']
try:
return priority.index(pos_map.get(parse.tag.POS, parse.tag.POS)) + parse.score
except ValueError:
return len(priority) + 1 # Lowest possible priority.
if word.find('_') >= 0:
return word
p = morph.parse(word)
p = sorted(p, key=pos_priority)[0]
return "{normal_form}_{POS}".format(normal_form=p.normal_form, POS=p.tag.POS)
def to_keys(words):
keys = []
for word in words:
if isinstance(word, list):
keys.append('{w}_{pos}'.format(w=word[0], pos=word[1]))
elif isinstance(word, str):
keys.append(key_for_tayga(word))
else:
raise ValueError(f"Unexpected word encoding: {word}")
return keys
try:
positive = list(to_keys(query["pos_words"]))
negative = list(to_keys(query.get("neg_words")))
topn=query.get("topn", 15)
# print("positive\n", positive)
# print("negative\n", negative)
# print("topn\n", topn)
sim_list = model.most_similar(positive=positive, negative=negative, topn=topn)
except Exception:
traceback.print_exc(file=sys.stdout)
sim_list = []
print("Запрос обработан.", time.perf_counter() - start)
j = jsonify(sim_list)
print("Запрос закодирован.", time.perf_counter() - start)
return j
if __name__ == "__main__":
app.run(debug=True, port=5100)
|
995,865 | ea1955b37bfbaf1de5767a848d23acd3642431f2 | import os
import random
import spacy
from spacy.util import minibatch, compounding
import pandas as pd
TEST_REVIEW = """
Transcendently beautiful in moments outside the office, it seems almost
sitcom-like in those scenes. When Toni Colette walks out and ponders
life silently, it's gorgeous.<br /><br />The movie doesn't seem to decide
whether it's slapstick, farce, magical realism, or drama, but the best of it
doesn't matter. (The worst is sort of tedious - like Office Space with less
humor.)
"""
eval_list = []
def train_model(
training_data: list, test_data: list, iterations: int = 20
) -> None:
# Build pipeline
nlp = spacy.load("en_core_web_sm")
if "textcat" not in nlp.pipe_names:
textcat = nlp.create_pipe(
"textcat", config={"architecture": "simple_cnn"}
)
nlp.add_pipe(textcat, last=True)
else:
textcat = nlp.get_pipe("textcat")
textcat.add_label("pos")
textcat.add_label("neg")
# Train only textcat
training_excluded_pipes = [
pipe for pipe in nlp.pipe_names if pipe != "textcat"
]
with nlp.disable_pipes(training_excluded_pipes):
optimizer = nlp.begin_training()
# Training loop
print("Beginning training")
print("Loss\tPrecision\tRecall\tF-score")
batch_sizes = compounding(
4.0, 32.0, 1.001
) # A generator that yields infinite series of input numbers
for i in range(iterations):
print(f"Training iteration {i}")
loss = {}
random.shuffle(training_data)
batches = minibatch(training_data, size=batch_sizes)
for batch in batches:
text, labels = zip(*batch)
nlp.update(text, labels, drop=0.2, sgd=optimizer, losses=loss)
with textcat.model.use_params(optimizer.averages):
evaluation_results = evaluate_model(
tokenizer=nlp.tokenizer,
textcat=textcat,
test_data=test_data,
)
print(
f"{loss['textcat']}\t{evaluation_results['precision']}"
f"\t{evaluation_results['recall']}"
f"\t{evaluation_results['f-score']}"
)
# Save model
with nlp.use_params(optimizer.averages):
nlp.to_disk("model_artifacts")
def evaluate_model(tokenizer, textcat, test_data: list) -> dict:
reviews, labels = zip(*test_data)
reviews = (tokenizer(review) for review in reviews)
true_positives = 0
false_positives = 1e-8 # Can't be 0 because of presence in denominator
true_negatives = 0
false_negatives = 1e-8
for i, review in enumerate(textcat.pipe(reviews)):
true_label = labels[i]["cats"]
for predicted_label, score in review.cats.items():
# Every cats dictionary includes both labels, you can get all
# the info you need with just the pos label
if predicted_label == "neg":
continue
if score >= 0.5 and true_label["pos"]:
true_positives += 1
elif score >= 0.5 and true_label["neg"]:
false_positives += 1
elif score < 0.5 and true_label["neg"]:
true_negatives += 1
elif score < 0.5 and true_label["pos"]:
false_negatives += 1
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
if precision + recall == 0:
f_score = 0
else:
f_score = 2 * (precision * recall) / (precision + recall)
return {"precision": precision, "recall": recall, "f-score": f_score}
def test_model(input_data: str = TEST_REVIEW):
# Load saved trained model
loaded_model = spacy.load("model_artifacts")
# Generate prediction
parsed_text = loaded_model(input_data)
# Determine prediction to return
if parsed_text.cats["pos"] > parsed_text.cats["neg"]:
prediction = "Positive"
score = parsed_text.cats["pos"]
else:
prediction = "Negative"
score = parsed_text.cats["neg"]
print(
f"Review text: {input_data}\nPredicted sentiment: {prediction}"
f"\tScore: {score}"
)
def load_training_data(
data_directory: str = "aclImdb/train", split: float = 0.8, limit: int = 0
) -> tuple:
# Load from files
reviews = []
for label in ["pos", "neg"]:
labeled_directory = f"{data_directory}/{label}"
for review in os.listdir(labeled_directory):
if review.endswith(".txt"):
with open(f"{labeled_directory}/{review}") as f:
text = f.read()
text = text.replace("<br />", "\n\n")
if text.strip():
spacy_label = {
"cats": {
"pos": "pos" == label,
"neg": "neg" == label,
}
}
reviews.append((text, spacy_label))
random.shuffle(reviews)
if limit:
reviews = reviews[:limit]
split = int(len(reviews) * split)
return reviews[:split], reviews[split:]
if __name__ == "__main__":
train, test = load_training_data(limit=25)
print("Training model")
train_model(train, test)
df = pd.DataFrame(eval_list)
pd.DataFrame.plot(df)
print("Testing model")
test_model()
|
995,866 | 2ba07f7c4dbf00cce06c2dff5079c9b0a37c4632 | from modeltranslation.translator import TranslationOptions, translator
from .models import MetaTag
class MetaTagTranslationOptions(TranslationOptions):
fields = ('title', 'keywords', 'description')
translator.register(MetaTag, MetaTagTranslationOptions)
|
995,867 | fa110ea5594611b8a70c33167773f8ab264168b0 | import marshal
import numpy as np
import cv2
from collections import namedtuple
from pathlib import Path
from FPS import FPS
import depthai as dai
import time
from math import gcd
from string import Template
SCRIPT_DIR = Path(__file__).resolve().parent
MOVENET_LIGHTNING_MODEL = SCRIPT_DIR / "models/movenet_singlepose_lightning_U8_transpose.blob"
MOVENET_THUNDER_MODEL = SCRIPT_DIR / "models/movenet_singlepose_thunder_U8_transpose.blob"
# Dictionary that maps from joint names to keypoint indices.
KEYPOINT_DICT = {
'nose': 0,
'left_eye': 1,
'right_eye': 2,
'left_ear': 3,
'right_ear': 4,
'left_shoulder': 5,
'right_shoulder': 6,
'left_elbow': 7,
'right_elbow': 8,
'left_wrist': 9,
'right_wrist': 10,
'left_hip': 11,
'right_hip': 12,
'left_knee': 13,
'right_knee': 14,
'left_ankle': 15,
'right_ankle': 16
}
class Body:
def __init__(self, scores=None, keypoints_norm=None, keypoints=None, score_thresh=None, crop_region=None, next_crop_region=None):
"""
Attributes:
scores : scores of the keypoints
keypoints_norm : keypoints normalized ([0,1]) coordinates (x,y) in the squared cropped region
keypoints : keypoints coordinates (x,y) in pixels in the source image
score_thresh : score threshold used
crop_region : cropped region on which the current body was inferred
next_crop_region : cropping region calculated from the current body keypoints and that will be used on next frame
"""
self.scores = scores
self.keypoints_norm = keypoints_norm
self.keypoints = keypoints
self.score_thresh = score_thresh
self.crop_region = crop_region
self.next_crop_region = next_crop_region
def print(self):
attrs = vars(self)
print('\n'.join("%s: %s" % item for item in attrs.items()))
CropRegion = namedtuple('CropRegion',['xmin', 'ymin', 'xmax', 'ymax', 'size']) # All values are in pixel. The region is a square of size 'size' pixels
def find_isp_scale_params(size):
# We want size >= 288
if size < 288:
size = 288
# We are looking for the list on integers that are divisible by 16 and
# that can be written like n/d where n <= 16 and d <= 63
size_candidates = {}
for s in range(288,1080,16):
f = gcd(1080, s)
n = s//f
d = 1080//f
if n <= 16 and d <= 63:
size_candidates[s] = (n, d)
# What is the candidate size closer to 'size' ?
min_dist = -1
for s in size_candidates:
dist = abs(size - s)
if min_dist == -1:
min_dist = dist
candidate = s
else:
if dist > min_dist: break
candidate = s
min_dist = dist
return candidate, size_candidates[candidate]
class MovenetDepthai:
def __init__(self, input_src="rgb",
model=None,
score_thresh=0.2,
crop=False,
internal_fps=None,
internal_frame_size=640,
stats=True):
self.model = model
if model == "lightning":
self.model = str(MOVENET_LIGHTNING_MODEL)
self.pd_input_length = 192
elif model == "thunder":
self.model = str(MOVENET_THUNDER_MODEL)
self.pd_input_length = 256
else:
self.model = model
if "lightning" in str(model):
self.pd_input_length = 192
else: # Thunder
self.pd_input_length = 256
print(f"Using blob file : {self.model}")
print(f"MoveNet imput size : {self.pd_input_length}x{self.pd_input_length}x3")
self.score_thresh = score_thresh
self.crop = crop
self.internal_fps = internal_fps
self.stats = stats
if input_src is None or input_src == "rgb" or input_src == "rgb_laconic":
self.input_type = "rgb" # OAK* internal color camera
self.laconic = "laconic" in input_src # Camera frames are not sent to the host
if internal_fps is None:
if "thunder" in str(model):
self.internal_fps = 12
else:
self.internal_fps = 26
else:
self.internal_fps = internal_fps
print(f"Internal camera FPS set to: {self.internal_fps}")
self.video_fps = internal_fps # Used when saving the output in a video file. Should be close to the real fps
self.crop = True # Temp
if self.crop:
self.frame_size, self.scale_nd = find_isp_scale_params(internal_frame_size)
self.img_h = self.img_w = self.frame_size
print(f"Internal camera image size: {self.frame_size} x {self.frame_size}")
else:
self.img_w = 1920
self.img_h = 1080
else:
print(f"Input source '{input_src}' is not supported in edge mode !")
print("Valid input sources: 'rgb', 'rgb_laconic'")
import sys
sys.exit()
# Defines the default crop region (pads the full image from both sides to make it a square image)
# Used when the algorithm cannot reliably determine the crop region from the previous frame.
box_size = max(self.img_w, self.img_h)
x_min = (self.img_w - box_size) // 2
y_min = (self.img_h - box_size) // 2
self.init_crop_region = CropRegion(x_min, y_min, x_min+box_size, y_min+box_size, box_size)
self.crop_region = self.init_crop_region
self.device = dai.Device(self.create_pipeline())
print("Pipeline started")
# Define data queues
if not self.laconic:
self.q_video = self.device.getOutputQueue(name="cam_out", maxSize=1, blocking=False)
self.q_processing_out = self.device.getOutputQueue(name="processing_out", maxSize=4, blocking=False)
self.fps = FPS()
self.nb_frames = 0
self.nb_pd_inferences = 0
def create_pipeline(self):
print("Creating pipeline...")
# Start defining a pipeline
pipeline = dai.Pipeline()
pipeline.setOpenVINOVersion(dai.OpenVINO.Version.VERSION_2021_3)
# ColorCamera
print("Creating Color Camera...")
cam = pipeline.create(dai.node.ColorCamera)
cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
cam.setInterleaved(False)
# if self.crop:
# Crop video to square shape (palm detection takes square image as input)
cam.setIspScale(self.scale_nd[0], self.scale_nd[1])
cam.setVideoSize(self.frame_size, self.frame_size)
cam.setPreviewSize(self.frame_size, self.frame_size)
# else: # Letterboxing
# cam.setPreviewSize(*cam.getVideoSize())
# # Define pose detection pre processing (resize preview from (self.frame_size, self.frame_size) to (self.pd_input_length, self.pd_input_length))
# print("Creating letterboxing image manip...")
# letterboxing_manip = pipeline.create(dai.node.ImageManip)
# self.frame_size = max(cam.getVideoSize()) // 16 * 16
# letterboxing_manip.initialConfig.setResizeThumbnail(self.frame_size, self.frame_size)
# letterboxing_manip.setMaxOutputFrameSize(3*self.frame_size**2)
# letterboxing_manip.inputImage.setQueueSize(1)
# letterboxing_manip.inputImage.setBlocking(False)
# cam.preview.link(letterboxing_manip.inputImage)
cam.setFps(self.internal_fps)
cam.setBoardSocket(dai.CameraBoardSocket.RGB)
cam.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
# ImageManip for cropping
manip = pipeline.create(dai.node.ImageManip)
manip.setMaxOutputFrameSize(self.pd_input_length*self.pd_input_length*3)
manip.setWaitForConfigInput(True)
manip.inputImage.setQueueSize(1)
manip.inputImage.setBlocking(False)
cam.preview.link(manip.inputImage)
if not self.laconic:
cam_out = pipeline.create(dai.node.XLinkOut)
cam_out.setStreamName("cam_out")
cam.video.link(cam_out.input)
# Define pose detection model
print("Creating Pose Detection Neural Network...")
pd_nn = pipeline.create(dai.node.NeuralNetwork)
pd_nn.setBlobPath(str(Path(self.model).resolve().absolute()))
# pd_nn.input.setQueueSize(1)
# Increase threads for detection
# pd_nn.setNumInferenceThreads(2)
if self.crop:
manip.out.link(pd_nn.input)
# else:
# letterboxing_manip.out.link(pd_nn.input)
# Define processing script
processing_script = pipeline.create(dai.node.Script)
processing_script.setScriptData(self.build_processing_script())
pd_nn.out.link(processing_script.inputs['from_pd_nn'])
processing_script.outputs['to_manip_cfg'].link(manip.inputConfig)
# Define link to send result to host
processing_out = pipeline.create(dai.node.XLinkOut)
processing_out.setStreamName("processing_out")
processing_script.outputs['to_host'].link(processing_out.input)
print("Pipeline created.")
return pipeline
def build_processing_script(self):
'''
The code of the scripting node 'template_processing_script' depends on :
- the NN model (thunder or lightning),
- the score threshold,
- the video frame shape
So we build this code from the content of the file processing_script.py which is a python template
'''
# Read the template
with open('template_processing_script.py', 'r') as file:
template = Template(file.read())
# Perform the substitution
code = template.substitute(
_init_crop_region = str(self.init_crop_region._asdict()).replace("OrderedDict", "dict"),
_pd_input_length = self.pd_input_length,
_score_thresh = self.score_thresh,
_img_w = self.img_w,
_img_h = self.img_h
)
# For debuging
# with open("tmp_code.py", "w") as file:
# file.write(code)
return code
def pd_postprocess(self, inference):
result = marshal.loads(inference.getData())
scores = np.array(result["scores"])
keypoints_norm = np.array(list(zip(result["xnorm"], result["ynorm"])))
keypoints = np.array(list(zip(result["x"], result["y"])))
next_crop_region = CropRegion(**result["next_crop_region"])
body = Body(scores, keypoints_norm, keypoints, self.score_thresh, self.crop_region, next_crop_region)
return body
def next_frame(self):
self.fps.update()
# Get the device camera frame if wanted
if self.laconic:
frame = np.zeros((self.frame_size, self.frame_size, 3), dtype=np.uint8)
else:
in_video = self.q_video.get()
frame = in_video.getCvFrame()
# Get result from device
inference = self.q_processing_out.get()
body = self.pd_postprocess(inference)
self.crop_region = body.next_crop_region
# Statistics
if self.stats:
self.nb_frames += 1
self.nb_pd_inferences += 1
return frame, body
def exit(self):
# Print some stats
if self.stats:
print(f"FPS : {self.fps.global_duration():.1f} f/s (# frames = {self.fps.nb_frames()})")
|
995,868 | d4753adeca62e5ea00888a0c0e9f3150cb6be192 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the balancedSums function below.
def balancedSums(arr):
l_idx=0
r_idx=len(arr)-1
l_sum=arr[l_idx]
r_sum=arr[r_idx]
while l_idx!=r_idx:
if l_sum<r_sum:
l_idx += 1
l_sum += arr[l_idx]
else:
r_idx -= 1
r_sum += arr[r_idx]
if l_sum==r_sum:
return 'YES'
return'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
T = int(input().strip())
for T_itr in range(T):
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = balancedSums(arr)
fptr.write(result + '\n')
fptr.close()
|
995,869 | 1a33b75a9770446fe0ee604923b33a19adf4d8cb | def MoreThanHalfNum_Solution(numbers):
# write code here
res = None
cnt = 0
for i in numbers: # 留下数组中出现次数最高的数
if not res:
res = i
cnt = 1
else:
if i == res:
cnt += 1
else:
cnt -= 1
if cnt == 0:
res = None
# 判断次数是否大于一半
cnt = 0
for i in numbers:
if i == res:
cnt += 1
if cnt > len(numbers) / 2:
return res
else:
return 0
print(MoreThanHalfNum_Solution([1,2,3,2,2,2,5,4,2]))
我的解法
1 # 时间复杂度O(n), 空间复杂度O(n)
class Solution:
def MoreThanHalfNum_Solution(self, numbers):
# write code here
if not numbers:
return 0
d=dict()
c = numbers
for i in c:
d[i]=d.get(i,0)+1
#第二次遍历字符串,检查每个字符出现的次数
for i in c:
if d[i] > len(c) //2 : #O(1)
return i
return 0
2 # 想要空间复杂度为O(1),时间复杂度O(n)
# 思路:遇到不相同的数据就相互抵消掉,最终剩下的数字就可能是大于一半的数字
def MoreThanHalfNum_Solution(self, numbers):
numLen = len(numbers)
last = 0
lastCount = 0
for num in numbers:
if lastCount == 0:
last = num
lastCount = 1
else:
if num == last:
lastCount += 1
else:
lastCount -= 1
if lastCount == 0:
return 0
else:
# 这种情况是last可能是大于一半的数字
lastCount = 0
for num in numbers:
if num == last:
lastCount += 1
if lastCount > (numLen >> 1):
return last
return 0
note:
python的sorted函数使用Timesort算法进行排序,平均时间复杂度为O(n*logn)
出现次数大于数组长度一半的元素,在排序后的数组中间位置
方法: 找出排序后在数组中间位置地元素并计算其出现次数,若次数超过数组长度地一半则返回它,否则返回0.
# -*- coding:utf-8 -*-\
class Solution:
def MoreThanHalfNum_Solution(self, numbers):
# 出现次数大于数组长度一半的元素,在排序后的数组中间位置
n = len(numbers)
mostElement = sorted(numbers)[n//2]
cnt = numbers.count(mostElement)
return mostElement if cnt > n//2 else 0
|
995,870 | 378118936d76bfaca89a2fcbb18648ed2cc8d1f5 | import numpy as np
from snc.agents.hedgehog.params import BigStepLayeredPolicyParams, BigStepPenaltyPolicyParams
from snc.agents.hedgehog.policies.big_step_layered_policy import BigStepLayeredPolicy
from snc.agents.hedgehog.policies.big_step_policy import BigStepPolicy
from snc.agents.hedgehog.workload import workload
import snc.environments.scenarios as scenarios
def get_simple_link_constrained_model():
cost_per_buffer = np.array([3, 1, 3, 1.5, 3]).reshape(-1, 1)
param_overrides = dict(alpha1=4.8, mu12=2., mu13=4., mu25=2., mu32=4.5, mu34=1.8,
mu35=2., mu45=1., mu5=7.,
cost_per_buffer=cost_per_buffer)
_, env = scenarios.load_scenario('simple_link_constrained_model', 0, param_overrides)
_, workload_mat, nu = workload.compute_load_workload_matrix(env,6)
env.workload_mat = workload_mat
env.nu = nu
return env
def get_layered_policy_object_for_simple_link_constrained_model(env):
_, workload_mat, nu = workload.compute_load_workload_matrix(env, 6)
policy_params = BigStepLayeredPolicyParams('cvx.CPLEX')
policy_obj = BigStepLayeredPolicy(env.cost_per_buffer,
env.constituency_matrix,
env.job_generator.demand_rate,
env.job_generator.buffer_processing_matrix,
env.workload_mat,
env.nu,
env.list_boundary_constraint_matrices,
env.ind_surplus_buffers,
policy_params)
return policy_obj
def get_penalty_policy_object_for_simple_link_constrained_model(env, kappa_w, kappa):
_, workload_mat, nu = workload.compute_load_workload_matrix(env, 6)
policy_params = BigStepPenaltyPolicyParams('cvx.CPLEX', False, kappa_w, kappa)
policy_obj = BigStepPolicy(env.cost_per_buffer,
env.constituency_matrix,
env.job_generator.demand_rate,
env.job_generator.buffer_processing_matrix,
env.workload_mat,
env.nu,
env.list_boundary_constraint_matrices,
env.ind_surplus_buffers,
policy_params)
return policy_obj
def test_no_safety_stocks_stealing_layered_policy():
env = get_simple_link_constrained_model()
# system is at high initial state, critical safety stock level in buffer 2 should been
# maintained
state = np.array([0, 1000, 0, 0, 0]).reshape(-1, 1)
horizon = 100
safety_stocks_vec = np.array([10, 10, 10, 10, 10, 10, 10, 10]).reshape(-1, 1)
# safety stock penalty is higher than nonidling one
policy_obj = get_penalty_policy_object_for_simple_link_constrained_model(env=env,
kappa_w=1e3,
kappa=1e6)
z_star, _ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
new_state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
# critical safety stock is maintained
np.testing.assert_almost_equal(new_state[2], 10)
# safety stock penalty is lower than nonidling one
policy_obj = get_penalty_policy_object_for_simple_link_constrained_model(env=env,
kappa_w=1e6,
kappa=1e3 )
z_star, _ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
new_state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
# critical safety stock level is not maintained
assert new_state[2] < 5
# now create layered policy which has no penalty coefficients
policy_obj = get_layered_policy_object_for_simple_link_constrained_model(env=env)
z_star, _ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
new_state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
# critical safety stock is maintained
np.testing.assert_almost_equal(new_state[2], 10)
def test_full_draining_layered_policy():
env = get_simple_link_constrained_model()
# system is in drained state and fluid policy should maintain it drained
state = np.array([0, 0, 0, 0, 0]).reshape(-1, 1)
horizon = 100
safety_stocks_vec = np.array([10, 10, 10, 10, 10, 10, 10, 10]).reshape(-1, 1)
# safety stock penalty is lower than nonidling one
policy_obj = get_penalty_policy_object_for_simple_link_constrained_model(env=env,
kappa_w=1e6,
kappa=1e3 )
z_star,_ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
new_state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
# safety stock level is drained
np.testing.assert_almost_equal(new_state[2], 0)
# safety stock penalty is higher than nonidling one
policy_obj = get_penalty_policy_object_for_simple_link_constrained_model(env=env,
kappa_w=1e3,
kappa=1e6 )
z_star,_ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
new_state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
# safety stock is not drained
np.testing.assert_almost_equal(new_state[2], 10)
# now create layered policy which has no penalty coeficients
policy_obj = get_layered_policy_object_for_simple_link_constrained_model(env=env)
z_star,_ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
new_state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
# safety stock is drained
np.testing.assert_almost_equal(new_state[2], 0)
def test_backward_fluid_rates_feasibility_drained_state():
env = get_simple_link_constrained_model()
# system is in drained state and fluid policy should maintain it drained
state = np.array([0,0,0,0,0]).reshape(-1,1)
horizon = 100
safety_stocks_vec = np.array([10,10,10,10,10,10,10,10]).reshape(-1,1)
policy_obj = get_layered_policy_object_for_simple_link_constrained_model(env=env)
for _ in range(10):
z_star,_ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
assert np.all([c.value() for c in policy_obj._cost_lp_constraints])
policy_obj._z_drain.value = z_star
assert np.all([c.value() for c in policy_obj._draining_lp_constraints])
policy_obj._z_safe.value = z_star
assert np.all([c.value() for c in policy_obj._safety_stocks_lp_constraints])
policy_obj._z_nonidle.value = z_star
assert np.all([c.value() for c in policy_obj._nonidling_lp_constraints])
state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
def test_backward_fluid_rates_feasibility_effective_state():
env = get_simple_link_constrained_model()
# system is in drained state and fluid policy should maintain it drained
state = np.array([0, 1000, 0, 0, 0]).reshape(-1, 1)
horizon = 100
safety_stocks_vec = np.array([10, 10, 10, 10, 10, 10, 10, 10]).reshape(-1, 1)
policy_obj = get_layered_policy_object_for_simple_link_constrained_model(env=env)
for _ in range(10):
z_star, _ = policy_obj.get_policy(state=state,
safety_stocks_vec=safety_stocks_vec,
k_idling_set=np.array([]),
draining_bottlenecks=set([0]),
horizon=horizon)
assert np.all([c.value() for c in policy_obj._cost_lp_constraints])
policy_obj._z_drain.value = z_star
assert np.all([c.value() for c in policy_obj._draining_lp_constraints])
policy_obj._z_safe.value = z_star
assert np.all([c.value() for c in policy_obj._safety_stocks_lp_constraints])
policy_obj._z_nonidle.value = z_star
assert np.all([c.value() for c in policy_obj._nonidling_lp_constraints])
state = state + (policy_obj.buffer_processing_matrix @ z_star
+ policy_obj.demand_rate) * horizon
|
995,871 | f69a6e76604e6cc2a9c773c23f26203eb96ba6db | #!/usr/bin/env python3
from src import omnibus
if __name__ == '__main__':
print('WARNING: PARAMETERS ARE SET FOR A LOCAL ENVIRONMENT')
omnibus.run(host='127.0.0.1', port = 5001, debug=True) |
995,872 | 12b8761443356aeb81f03541c06c5957fc16d48d | nucleotides = {"A": 0, "C": 0, "G": 0, "T": 0}
def count(file):
for x in file:
if x in nucleotides.keys():
nucleotides[x] += 1
return nucleotides
if __name__ == '__main__':
file = open('/Users/nadiabey/PycharmProjects/rosalind/rosalind_dna.txt', 'r').readlines()[0]
count(file)
print(nucleotides['A'], nucleotides['C'], nucleotides['G'], nucleotides['T'])
|
995,873 | 869b2bc59722bab5d6538250d191a96ed51b4c67 | from abc import ABC, abstractmethod
from collections import Counter
from typing import Callable, Iterable
from checklisting.result import BaseTaskResult
from checklisting.result.status import TaskResultStatus
class BaseTaskResultStatusValidator(ABC):
def of_results(self, results: Iterable[BaseTaskResult]) -> TaskResultStatus:
return self.validate([result.status for result in results])
@abstractmethod
def validate(self, task_result_statuses: Iterable[TaskResultStatus]) -> TaskResultStatus:
pass
class FallbackTaskResultStatusValidator(BaseTaskResultStatusValidator):
def __init__(self, fallback: Callable[[], TaskResultStatus],
inner_validator: BaseTaskResultStatusValidator) -> None:
super().__init__()
self._fallback = fallback
self._inner_validator = inner_validator
def validate(self, task_result_statuses: Iterable[TaskResultStatus]) -> TaskResultStatus:
result = self._inner_validator.validate(task_result_statuses)
if result != TaskResultStatus.UNKNOWN:
return result
return self._fallback()
class AllOfSameTypeTaskResultStatusValidator(BaseTaskResultStatusValidator):
def validate(self, task_result_statuses: Iterable[TaskResultStatus]) -> TaskResultStatus:
if not task_result_statuses:
return TaskResultStatus.UNKNOWN
statuses = set(task_result_statuses)
if len(statuses) == 1:
return statuses.pop()
return TaskResultStatus.UNKNOWN
class MostCommonTaskResultStatusValidator(BaseTaskResultStatusValidator):
def validate(self, task_result_statuses: Iterable[TaskResultStatus]) -> TaskResultStatus:
if not task_result_statuses:
return TaskResultStatus.UNKNOWN
statuses = Counter(task_result_statuses)
most_common_statuses = statuses.most_common(2)
try:
(most_common, second_most_common) = most_common_statuses
if most_common[1] == second_most_common[1]:
return TaskResultStatus.UNKNOWN
except ValueError:
most_common = most_common_statuses.pop()
return most_common[0]
class AvailableStatusTaskResultStatusValidator(BaseTaskResultStatusValidator):
def __init__(self, expected_status: TaskResultStatus) -> None:
super().__init__()
self._expected_status = expected_status
def validate(self, task_result_statuses: Iterable[TaskResultStatus]) -> TaskResultStatus:
if not task_result_statuses:
return TaskResultStatus.UNKNOWN
if self._expected_status in task_result_statuses:
return self._expected_status
return TaskResultStatus.UNKNOWN
class AggregatedTaskResultStatusValidator(BaseTaskResultStatusValidator):
def __init__(self, *inner_validators: Iterable[BaseTaskResultStatusValidator]) -> None:
super().__init__()
self._inner_validators = inner_validators
def validate(self, task_result_statuses: Iterable[TaskResultStatus]) -> TaskResultStatus:
for validator in self._inner_validators:
result = validator.validate(task_result_statuses)
if result != TaskResultStatus.UNKNOWN:
return result
return TaskResultStatus.UNKNOWN
class PrioritizedTaskResultStatusValidator(AggregatedTaskResultStatusValidator):
def __init__(self):
super().__init__(
AvailableStatusTaskResultStatusValidator(TaskResultStatus.FAILURE),
AvailableStatusTaskResultStatusValidator(TaskResultStatus.WARNING),
AvailableStatusTaskResultStatusValidator(TaskResultStatus.SUCCESS),
AvailableStatusTaskResultStatusValidator(TaskResultStatus.INFO),
AvailableStatusTaskResultStatusValidator(TaskResultStatus.UNKNOWN),
)
class DefaultTaskResultStatusValidator(AggregatedTaskResultStatusValidator):
def __init__(self):
super().__init__(
AllOfSameTypeTaskResultStatusValidator(),
PrioritizedTaskResultStatusValidator(),
)
|
995,874 | 69ca8d38370f31b4a4505e09b10fc76307287094 | import pygame
import sys
from TileMap import TileMap
from Ant import Ant
import AntBootCamp
# Initialize pygame
pygame.init()
pygame.mixer.init()
pygame.font.init()
# Query the user as to the number of ants to start with.
# Constrain it to 1-100 to prevent ridiculousness.
numAnts = -1
while numAnts == -1:
try:
i=int(raw_input('Enter starting number of ants:'))
if i > 0 and i <= 100:
numAnts = i
else:
print "Number is limited to 1-100 to prevent overloading"
except ValueError:
print "Not a number"
# Set the window size
size = width, height = 32*16, 32*16
# Create the pygame window
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Ants!")
# Font for rendering text
font = pygame.font.SysFont("consolas", 10)
# Create the map of tiles
tileMap = TileMap()
# Store all ants, living or dead, here.
ants = []
paused = False
# Create the user-requested number of ants.
for i in range(0, numAnts):
ant = Ant(tileMap.tiles)
# Train the ant!
AntBootCamp.train(ant)
ants.append(ant)
while True:
# Count the number of living or dead ants
alive = 0
dead = 0
for ant in ants:
if ant.alive == False:
dead += 1
else:
alive += 1
# Show the number of living ants in the window caption
pygame.display.set_caption(str(len(ants)) + " Ants! " + str(alive) + " alive " + str(dead) + " dead ")
# Clear the screen to black
screen.fill([0,0,0])
# Quit if the user hits the X button
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.MOUSEBUTTONUP:
paused = not paused
# Render all tiles
tileMap.draw(screen, font)
if not paused:
# update each ant
for ant in ants:
# If the ant is telling u to spawn a new ant, do so, and set spawnNew to False again.
if ant.spawnNew:
ant.spawnNew = False
newAnt = Ant(tileMap.tiles)
# Train the ant!
AntBootCamp.train(newAnt)
ants.append(newAnt)
ant.update()
# Draw the ants
for ant in ants:
ant.draw(screen, font)
pygame.display.flip()
pygame.time.wait(300) |
995,875 | ff3cba0535668303a6162970f00be968aa54488c | """Seed file to make sample data for flask_feedback db."""
from models import User, Feedback, db
from app import app
# Create all tables
db.drop_all()
db.create_all() |
995,876 | 10734c9e529ade925c45b2e611af71e1ec817f04 | # Generated by Django 3.1.1 on 2020-10-04 13:39
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dash', '0015_auto_20201004_1905'),
]
operations = [
migrations.RemoveField(
model_name='qna',
name='sub',
),
migrations.AddField(
model_name='qna',
name='subject',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='classsj',
name='date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 4, 19, 9, 10, 221826)),
),
migrations.DeleteModel(
name='subject',
),
]
|
995,877 | bf8ce3290780280a97b81ac3e1cff12b1a981d34 | from types import MappingProxyType
from typing import Mapping, Union, Any
from django.contrib.auth.hashers import make_password
DEFAULT_USER_PARAMS: Mapping[str, Union[Any]] = MappingProxyType(
{
"email": "member@messages.com",
"username": "member",
"password": make_password("member_password"),
"is_staff": False,
"is_superuser": False,
}
)
DEFAULT_USER_2_PARAMS: Mapping[str, Union[Any]] = MappingProxyType(
{
"email": "member2@messages.com",
"username": "member2",
"password": make_password("member_password"),
"is_staff": False,
"is_superuser": False,
}
)
DEFAULT_REGISTER_PARAMS: Mapping[str, str] = MappingProxyType(
{
"email": "member@messages.com",
"username": "member",
"password1": "member_password",
"password2": "member_password",
}
)
|
995,878 | 09f62b31cfe8676ca55b264f1b1a63bc41af8aef | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^pushgovtnotification/$', views.pushgovtnotification, name='pushgovtnotification'),
url(r'^getgovtnotifications/$', views.getgovtnotifications, name='getgovtnotifications'),
url(r'raiseinterest/$',views.raiseinterest,name='raiseinterest'),
url(r'getnotifications/$',views.getnotifications,name = 'getnotifications'),
url(r'negotiate/$',views.negotiate,name='negotiate'),
url(r'endnegotiation/$',views.endnegotiation,name = 'endnegotiation')
] |
995,879 | 09f3a7ffa72e9d8513f323edcc6fb22ef619c4ba | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Parses a XML file and counts the node types
"""
import xml.etree.ElementTree as ET
import pprint
def count_tags(filename):
osm_file = open(filename, "r")
tags = {}
# get an iterable
context = ET.iterparse(osm_file, events=("start", "end"))
# turn it into an iterator
context = iter(context)
# get the root element
event, root = context.next()
i = 1
for event, node in context:
if event == "start":
i = i + 1
if not(node.tag in tags):
tags[node.tag] = 1
else:
tags[node.tag] = tags[node.tag] + 1
#Print the value of tags every 10000 nodes
if (i % 10000) == 0:
print tags
i = 1
#Clear the root node to decrease memory usage
if event == "end":
root.clear()
return tags
def test():
tags = count_tags('C:/Users/Erni/Downloads/venice_italy.osm')
pprint.pprint(tags)
if __name__ == "__main__":
test() |
995,880 | 22b35ac5c6419a940e901a7201a78bacb43280e6 | # -*- encoding: UTF-8 -*-
#获取一个 1-100 的随机数 目标值
import random
goalNum = random.randint(1,100)
print('目标值:', goalNum)
num = -1
while num != goalNum:
num = int(input('请输入1-100内的随机数:'))
if num > goalNum:
print('大了')
elif num < goalNum:
print('小了')
print('Congratulations!')
|
995,881 | 31a356964d66761a0ac889d5539bf9c6476862ce | """
Django settings for ktlweb project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import locale
import sys
import logging
import dj_database_url
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
TRUTH = ('true', '1', 'yes', 'on')
DEBUG = os.getenv('WAGTAIL_DEBUG', 'False').lower() in TRUTH
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'home',
'gcal',
'news',
'search',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.postgres_search',
'modelcluster',
'compressor',
'taggit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'storages',
'wagtail.contrib.modeladmin'
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'ktlweb.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
os.path.join(BASE_DIR, 'home', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
]
}
}
]
WSGI_APPLICATION = 'ktlweb.wsgi.application'
# Database set by dj_database_url before EOF
DATABASES = {}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'nb'
TIME_ZONE = 'Europe/Oslo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
locale.setlocale(locale.LC_ALL, 'nb_NO.UTF-8')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# STATIC_URL = '/static/'
# MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# MEDIA_URL = '/media/'
# compressor settings
COMPRESS_ENABLED = False
# Wagtail settings
WAGTAIL_SITE_NAME = 'Karma Tashi Ling buddhistsamfunn'
# Usage stats from images and documents
WAGTAIL_USAGE_COUNT_ENABLED = True
DATABASES['default'] = dj_database_url.config()
KTLWEB_LOGGER = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'detailed': {
'format': '[%(levelname)s] - (%(pathname)s:%(lineno)d) %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'detailed'
}
},
'loggers': {
'gcal': {
'handlers': ['console'],
'level': logging.DEBUG if DEBUG else logging.INFO
}
}
}
# Search
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.contrib.postgres_search.backend'
}
}
|
995,882 | 51c50b7258dbcc77369174f0f442dbb506d6dabf | # Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Callable, Iterable, List
import cirq
import pandas as pd
class ConvertingSampler(cirq.Sampler):
"""Sampler delegate which converts circuit before sampling.
"""
def __init__(self,
sampler: cirq.Sampler,
convert_func: Union[
Callable[[cirq.Circuit], cirq.Circuit],
Iterable[Callable[[cirq.Circuit], cirq.Circuit]]]
) -> None:
"""Initializes the sampler with conversion.
Args:
sampler: Delegate sampler where all invocations are forwarded to.
convert_func: Either a function that converts the circuit, or list
of the converting functions which are applied one by one before
delegating to the target sampler.
"""
self._sampler = sampler
if isinstance(convert_func, Iterable):
self._converters = convert_func
else:
self._converters = convert_func,
def _convert(self, program: cirq.Circuit) -> cirq.Circuit:
for converter in self._converters:
program = converter(program)
return program
def run(
self,
program: cirq.Circuit,
param_resolver: cirq.ParamResolverOrSimilarType = None,
repetitions: int = 1,
) -> cirq.Result:
program = self._convert(program)
return self._sampler.run(program,
param_resolver,
repetitions)
def sample(
self,
program: cirq.Circuit,
*,
repetitions: int = 1,
params: cirq.Sweepable = None,
) -> pd.DataFrame:
program = self._convert(program)
return self._sampler.sample(program,
repetitions=repetitions,
params=params)
def run_sweep(
self,
program: cirq.Circuit,
params: cirq.Sweepable,
repetitions: int = 1,
) -> List[cirq.Result]:
program = self._convert(program)
return self._sampler.run_sweep(program, params, repetitions)
async def run_async(self, program: cirq.Circuit, *,
repetitions: int) -> cirq.Result:
program = self._convert(program)
return await self._sampler.run_async(program,
repetitions=repetitions)
async def run_sweep_async(
self,
program: cirq.Circuit,
params: cirq.Sweepable,
repetitions: int = 1,
) -> List[cirq.Result]:
program = self._convert(program)
return await self._sampler.run_sweep_async(program, params, repetitions) |
995,883 | 8e3fe72d0804cff95ac47f0a90bc8eaded315f9c | from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator, Dataset
import spacy
import numpy as np
import random
import math
import time
import dill as pickle
from transformer import Constants
def tokenize_de(text):
"""
Tokenizes German text from a string into a list of strings (tokens) and reverses it
"""
spacy_de = spacy.load('de')
return [tok.text for tok in spacy_de.tokenizer(text)][::-1]
def tokenize_en(text):
"""
Tokenizes English text from a string into a list of strings (tokens)
"""
spacy_en = spacy.load('en')
return [tok.text for tok in spacy_en.tokenizer(text)]
def get_dataloader(dataset_name, batch_size, device='cpu'):
if dataset_name == 'Multi30k':
SRC = Field(tokenize=tokenize_de,
init_token='<sos>',
eos_token='<eos>',
lower=True)
TRG = Field(tokenize=tokenize_en,
init_token='<sos>',
eos_token='<eos>',
lower=True)
# SRC = Field(tokenize="spacy",
# tokenizer_language="de",
# init_token='<sos>',
# eos_token='<eos>',
# lower=True)
#
# TRG = Field(tokenize="spacy",
# tokenizer_language="en",
# init_token='<sos>',
# eos_token='<eos>',
# lower=True)
train_data, valid_data, test_data = Multi30k.splits(exts=('.de', '.en'), fields=(SRC, TRG))
SRC.build_vocab(train_data, min_freq=2)
TRG.build_vocab(train_data, min_freq=2)
trainloader, validloader, testloader = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=batch_size,
device=device)
else:
raise NotImplementedError
print(f"Number of training examples: {len(train_data.examples)}")
print(f"Number of validation examples: {len(valid_data.examples)}")
print(f"Number of testing examples: {len(test_data.examples)}")
print(f"Unique tokens in source vocabulary: {len(SRC.vocab)}")
print(f"Unique tokens in target vocabulary: {len(TRG.vocab)}")
return trainloader, validloader, testloader, SRC, TRG
def prepare_dataloaders(opt, device):
batch_size = opt.batch_size
data = pickle.load(open(opt.data_pkl, 'rb'))
opt.max_token_seq_len = data['settings'].max_len
opt.src_pad_idx = data['vocab']['src'].vocab.stoi[Constants.PAD_WORD]
opt.trg_pad_idx = data['vocab']['trg'].vocab.stoi[Constants.PAD_WORD]
opt.src_vocab_size = len(data['vocab']['src'].vocab)
opt.trg_vocab_size = len(data['vocab']['trg'].vocab)
#========= Preparing Model =========#
# if opt.embs_share_weight:
# assert data['vocab']['src'].vocab.stoi == data['vocab']['trg'].vocab.stoi, \
# 'To sharing word embedding the src/trg word2idx table shall be the same.'
fields = {'src': data['vocab']['src'], 'trg':data['vocab']['trg']}
train = Dataset(examples=data['train'], fields=fields)
val = Dataset(examples=data['valid'], fields=fields)
train_iterator = BucketIterator(train, batch_size=batch_size, device=device, train=True)
val_iterator = BucketIterator(val, batch_size=batch_size, device=device)
return train_iterator, val_iterator
if __name__ == '__main__':
trainloader, validloader, testloader, SRC, TRG = get_dataloader(dataset_name='Multi30k', batch_size=10)
for batch in trainloader:
break |
995,884 | 5950f187ba73da3fdbe9ded868609285cb654f7d | # port definitions from course_vocab to vocab
from connect import connect
def port_definition(cur, course_id, vocab_id):
DEFCOM = """SELECT definition FROM course_vocab
WHERE course_id=%s AND vocab_id=%s
"""
cur.execute(DEFCOM, (course_id, vocab_id))
definition = cur.fetchall()[0][0]
COMM = """UPDATE vocab
SET definition=%s
WHERE id=%s
"""
cur.execute(COMM, (definition, vocab_id))
def port_all_definitions(cur):
cur.execute("SELECT course_id, vocab_id FROM course_vocab")
records = cur.fetchall()
print(records)
for instance in records:
print(instance[0])
port_definition(cur, instance[0], instance[1])
conn, cur = connect()
port_all_definitions(cur)
cur.close()
conn.commit()
conn.close() |
995,885 | c4691bfb93a98281d262f96a301c7df55e5c15ab | # -*- coding: UTF-8 -*-
import unittest
from eolas.lexer import lex
from eolas.parser import parse
class LexTest(unittest.TestCase):
def test_identifiers(self):
idents = ("a", "foo", "___x", "with spaces", "a b c d", "if true")
for ident in idents:
self.assertEqual(ident, next(lex(ident)).value)
def test_reserved_identifiers(self):
self.assertEqual("IF", next(lex("if")).type)
self.assertEqual("RETURN", next(lex("return")).type)
def test_ints(self):
self.assertEqual(42, next(lex("42")).value)
class ParseTest(unittest.TestCase):
def test_valid_programs(self):
valid_programs = (
"{ return 42 }",
"{ return 42; }",
"{ a = 42 }",
"{ a = b = c = d = e }",
"{ IF (a = a) THEN (Result = 1) ELSE (Result = 2) ; }"
)
for prg in valid_programs:
try:
parse(prg)
except SyntaxError as e:
self.assertIsNone(e)
if __name__ == "__main__":
unittest.main()
|
995,886 | 8325094ebe012a6c6a1599e9dfa2ba45fc0b8ede | import tensorflow as tf
# Import TensorFlow Datasets
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# Helper libraries
import math
import numpy as np
import matplotlib.pyplot as plt
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
dataset, metadata = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
train_dataset, test_dataset = dataset['train'], dataset['test']
class_names = metadata.features['label'].names
print("Class names: {}".format(class_names))
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}".format(num_train_examples))
print("Number of test examples: {}".format(num_test_examples))
def normalize(images, labels):
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
# The map function applies the normalize function to each element in the train
# and test datasets
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
# The first time you use the dataset, the images will be loaded from disk
# Caching will keep them in memory, making training faster
train_dataset = train_dataset.cache()
test_dataset = test_dataset.cache()
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation=tf.nn.relu,
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Conv2D(64, (3,3), padding='same', activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D((2, 2), strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
BATCH_SIZE = 32
train_dataset = train_dataset.cache().repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.cache().batch(BATCH_SIZE)
model.fit(train_dataset, epochs=5, steps_per_epoch=math.ceil(num_train_examples/BATCH_SIZE))
test_loss, test_accuracy = model.evaluate(test_dataset, steps=math.ceil(num_test_examples/32))
print('Accuracy on test dataset:', test_accuracy)
# Make predictions
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
print(np.argmax(predictions[0]))
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
# Predict the image
predictions_single = model.predict(img)
print(predictions_single)
print(np.argmax(predictions_single[0]))
print('End here')
|
995,887 | 279af42b0381bb3ef68d1a336f6368f2c10d7466 | from .caffael_exception import CaffaelException
class MissingInformationError(CaffaelException):
pass
|
995,888 | 23988794e6c3749687d245bd148674f6132d7e51 | """Tests for the Markdown application create API."""
from django.test import TestCase, override_settings
from marsha.core import factories as core_factories
from marsha.core.factories import (
OrganizationAccessFactory,
PlaylistAccessFactory,
PlaylistFactory,
)
from marsha.core.models import ADMINISTRATOR
from marsha.core.simple_jwt.factories import (
InstructorOrAdminLtiTokenFactory,
StudentLtiTokenFactory,
UserAccessTokenFactory,
)
from marsha.markdown.factories import MarkdownDocumentFactory
from marsha.markdown.models import MarkdownDocument
# We don't enforce arguments documentation in tests
# pylint: disable=unused-argument
@override_settings(MARKDOWN_ENABLED=True)
class MarkdownCreateAPITest(TestCase):
"""Test for the Markdown document create API."""
maxDiff = None
def test_api_document_create_anonymous(self):
"""An anonymous should not be able to create a Markdown document."""
response = self.client.post("/api/markdown-documents/")
self.assertEqual(response.status_code, 401)
def test_api_document_create_student(self):
"""A student should not be able to create a Markdown document."""
markdown_document = MarkdownDocumentFactory()
jwt_token = StudentLtiTokenFactory(
playlist=markdown_document.playlist,
permissions__can_update=True,
)
response = self.client.post(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_document_create_instructor(self):
"""An instructor should not be able to create a Markdown document."""
playlist = core_factories.PlaylistFactory()
jwt_token = InstructorOrAdminLtiTokenFactory(playlist=playlist)
self.assertEqual(MarkdownDocument.objects.count(), 0)
response = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_one",
"playlist": str(playlist.id),
"title": "Some document",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 1)
self.assertEqual(response.status_code, 201)
document = MarkdownDocument.objects.first()
self.assertEqual(
response.json(),
{
"id": str(document.id),
"images": [],
"is_draft": True,
"playlist": {
"id": str(playlist.id),
"lti_id": playlist.lti_id,
"title": playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "Some document",
}
],
},
)
def test_api_document_create_instructor_no_title(self):
"""An instructor should not be able to create a Markdown document without title."""
playlist = core_factories.PlaylistFactory()
jwt_token = InstructorOrAdminLtiTokenFactory(playlist=playlist)
self.assertEqual(MarkdownDocument.objects.count(), 0)
response = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_one",
"playlist": str(playlist.id),
"title": "",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 1)
self.assertEqual(response.status_code, 201)
document = MarkdownDocument.objects.first()
self.assertEqual(
response.json(),
{
"id": str(document.id),
"images": [],
"is_draft": True,
"playlist": {
"id": str(playlist.id),
"lti_id": playlist.lti_id,
"title": playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "",
}
],
},
)
def test_api_document_create_user_access_token(self):
"""A user with UserAccessToken should not be able to create a Markdown document."""
organization_access = OrganizationAccessFactory()
jwt_token = UserAccessTokenFactory(user=organization_access.user)
response = self.client.post(
"/api/markdown-documents/", HTTP_AUTHORIZATION=f"Bearer {jwt_token}"
)
self.assertEqual(response.status_code, 403)
def test_api_document_create_user_access_token_organization_admin(self):
"""An organization administrator should be able to create a Markdown document."""
organization_access = OrganizationAccessFactory(role=ADMINISTRATOR)
playlist = PlaylistFactory(organization=organization_access.organization)
jwt_token = UserAccessTokenFactory(user=organization_access.user)
self.assertEqual(MarkdownDocument.objects.count(), 0)
response = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_one",
"playlist": str(playlist.id),
"title": "Some document",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 1)
self.assertEqual(response.status_code, 201)
document = MarkdownDocument.objects.first()
self.assertEqual(
response.json(),
{
"id": str(document.id),
"images": [],
"is_draft": True,
"playlist": {
"id": str(playlist.id),
"lti_id": playlist.lti_id,
"title": playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "Some document",
}
],
},
)
response2 = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_two",
"playlist": str(playlist.id),
"title": "Document two",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 2)
self.assertEqual(response2.status_code, 201)
document2 = MarkdownDocument.objects.latest("created_on")
self.assertEqual(
response2.json(),
{
"id": str(document2.id),
"images": [],
"is_draft": True,
"playlist": {
"id": str(playlist.id),
"lti_id": playlist.lti_id,
"title": playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "Document two",
}
],
},
)
def test_api_document_create_user_access_token_playlist_admin(self):
"""A playlist administrator should be able to create a Markdown document."""
playlist_access = PlaylistAccessFactory(role=ADMINISTRATOR)
jwt_token = UserAccessTokenFactory(user=playlist_access.user)
self.assertEqual(MarkdownDocument.objects.count(), 0)
response = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_one",
"playlist": str(playlist_access.playlist.id),
"title": "Some document",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 1)
self.assertEqual(response.status_code, 201)
document = MarkdownDocument.objects.first()
self.assertEqual(
response.json(),
{
"id": str(document.id),
"images": [],
"is_draft": True,
"playlist": {
"id": str(playlist_access.playlist.id),
"lti_id": playlist_access.playlist.lti_id,
"title": playlist_access.playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "Some document",
}
],
},
)
response2 = self.client.post(
"/api/markdown-documents/",
{
"lti_id": "document_two",
"playlist": str(playlist_access.playlist.id),
"title": "Document two",
},
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(MarkdownDocument.objects.count(), 2)
self.assertEqual(response2.status_code, 201)
document2 = MarkdownDocument.objects.latest("created_on")
self.assertEqual(
response2.json(),
{
"id": str(document2.id),
"images": [],
"is_draft": True,
"playlist": {
"id": str(playlist_access.playlist.id),
"lti_id": playlist_access.playlist.lti_id,
"title": playlist_access.playlist.title,
},
"position": 0,
"rendering_options": {},
"translations": [
{
"content": "",
"language_code": "en",
"rendered_content": "",
"title": "Document two",
}
],
},
)
|
995,889 | 049fb10c68ae49cdd1a5b58a44242643c9d174e1 | from django.apps import AppConfig
class ProjectsAwardsConfig(AppConfig):
name = 'projects_awards'
|
995,890 | be1a1b49cc7a7f9daf0ecbe7c3ce5afc4ccd8813 |
import tweepy, re, twitterKeys, tag, datetime
RE_EMOJI = re.compile(
u'(\u00a9|\u00ae|[\u2000-\u3300]|\ud83c[\ud000-\udfff]|\ud83d[\ud000-\udfff]|\ud83e[\ud000-\udfff])')
TWEET_COUNT = 2500
PAGE_COUNT = TWEET_COUNT / 100
DIV_BY = 4
auth = tweepy.OAuthHandler(twitterKeys.consumer_key, twitterKeys.consumer_secret)
auth.set_access_token(twitterKeys.access_token, twitterKeys.access_token_secret)
api = tweepy.API(auth)
unRepeatedList = []
LATEST_ID = 0
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001F900-\U0001F9FF"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
def appendToList(newString):
unRepeatedList.append(newString)
def cleanDuplicatedTweets(resultObject, array, string):
global LATEST_ID
if (string not in array):
appendToList(string)
LATEST_ID = resultObject.id
def cleantext(resultObject):
newString = ' '.join(re.sub(r'(\#[A-Za-z0-9ğüşöçİĞÜŞÖÇ]*)(?!;)', '', resultObject.full_text).split()) # cleans Hashtags
newString = ' '.join(re.sub(r'((RT)*( )*\@[a-zA-Z0-9ğüşöçİĞÜŞÖÇ_]*:?)(?!;)', '', newString).split()) # cleans RT
newString = remove_emoji(newString) # cleans Emoji
newString = newString.replace(',','')
newString = newString.replace('.','')
newString = newString.replace('-',' ')
newString = newString.replace('...','')
newString = newString.replace('\'',' ')
newString = ' '.join(re.sub(r'(https:\/\/[a-zA-Z0-9.\/]*\b)', '', newString).split()) # cleans links
cleanDuplicatedTweets(resultObject, unRepeatedList, newString)
x = datetime.datetime.now()
f = open('tweets/'+x.strftime("%d%m%y%H%m%S")+'.txt', "a")
counter = 0
while (counter<TWEET_COUNT):
public_tweets = api.search(q='edirne', tweet_mode='extended', count=TWEET_COUNT, max_id = LATEST_ID)
# tag.twitArray(' Sn. Selim Ak ı ziyaret ettik. Misafirperverliklerinden dolayı…')
# tag.twitArray('Sn Şeref Tütüncü ile birlikte Sn.Pakize Uz, Sn Birsen Özgür ve Sn Soner Polat Gazi m…')
# tag.twitArray('Eski Edirne Asfaltı Bolluca sapağı sonrası Arnavutköy yönünde yaralanmalı kaza! Ayrıntılar 104.2 Radyo Trafik')
# tag.twitArray('Aile tanışmamız ısıfırikiikinbinyirmi sinankrg Uzunköprü Edirne')
for tweetIndex in range(len(public_tweets)):
cleantext(public_tweets[tweetIndex])
for tweet in unRepeatedList:
print(tweet)
f.write(tweet)
tag.twitArray(tweet, f)
counter += 1
print('Tweet Count = '+ str(counter))
f.write('Tweet Count = '+ str(counter))
f.close()
|
995,891 | 5a29113efd0d7f395fc9fc17a8040cdcb71a0b88 | from datetime import datetime
from asyncpg import UniqueViolationError
from aiohttp import web
import hashlib
from gino import Gino
db = Gino()
class BaseModel:
@classmethod
async def get_or_404(cls, id):
instance = await cls.get(id)
if instance:
return instance
raise web.HTTPNotFound()
@classmethod
async def delete_or_404(cls, id):
instance = await cls.get(id)
if instance:
await instance.delete()
return id
raise web.HTTPNotFound()
@classmethod
async def create_instance(cls, **kwargs):
try:
instance = await cls.create(**kwargs)
except UniqueViolationError:
raise web.HTTPBadRequest()
return instance
class User(db.Model, BaseModel):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64))
email = db.Column(db.String(120), index=True, unique=True)
password = db.Column(db.String(120))
def to_dict(self):
dict_user = super().to_dict()
dict_user.pop("password")
return dict_user
@classmethod
async def create_instance(cls, **kwargs):
kwargs['password'] = hashlib.md5(kwargs['password'].encode()).hexdigest()
return await super().create_instance(**kwargs)
class Post(db.Model, BaseModel):
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50))
text = db.Column(db.String(1000))
created_date = db.Column(db.String(50), default=datetime.isoformat(datetime.utcnow()))
user_id = db.Column(db.Integer, db.ForeignKey(User.id))
def to_dict(self):
posts = {
"id": self.id,
"title": self.title,
"text": self.text,
"created_date": str(self.created_date),
"user_id": self.user_id
}
return posts
|
995,892 | 863210ff5fb3a498ee29f000684a0c6393c55586 | import re
from typing import Tuple
re_class_name = re.compile(r"([A-Z]*[a-z]*)")
def convert_class_name(name: str) -> str:
"""
>>> convert_class_name('ClassName')
'class_name'
>>> convert_class_name('ABClassName')
'abclass_name'
"""
name_tokens = re_class_name.findall(name)
return "_".join(i.lower() for i in name_tokens if i)
def class_ref(cls: type) -> str:
"""
>>> class_ref(int)
'builtins.int'
"""
return cls.__module__ + "." + cls.__name__
def expand_class_ref(cls_ref: str) -> Tuple[str, str]:
"""
>>> expand_class_ref('test.test.Test')
('test.test', 'Test')
"""
parts = cls_ref.rpartition(".")
return parts[0], parts[-1]
|
995,893 | 0b6a385e583272d4865dc21fd6751b335e10dd5b | #! /usr/bin/env python3
# f(f(x)) = f(x)
def add_ten(num):
return num + 10
# non idempotent example
print(add_ten(add_ten(10)))
print(abs(-10))
# idempotent example
print(abs(abs(-10)) == 10)
|
995,894 | 4bcaf9ed5db498cd2c5d61744ca2276d87cbf6dd | # Camera communication over the internet
# Used to send / receive information about cameras on a network
import socket
import sys
import time
import raspcam.database
import threading
broadcast_message = 'iccom_raspcam_broadcast'
class ICCom:
def __init__(self, isHub, port):
self.isHub = isHub
self.port = port
self.foundCameras = []
self.foundHub = False
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.settimeout(5)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.localCam = raspcam.database.getCamera(raspcam.database.getSetting("localCamera"))
def broadcast(self, msg):
self.s.sendto(msg.encode('utf-8'), ('255.255.255.255', self.port))
def listen(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((' ', self.port))
print("Listening...")
data = None
while 1:
return s.recvfrom(1024)
def send(self, msg, host):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto(msg.encode("utf-8"), (host, self.port))
def beginCom(self):
if self.isHub:
# broadcast that we are the hub
threading.Thread(target=self.getCameraLoop)
while 1:
data = self.listen()
try:
cd = data.split(",")
camera = raspcam.models.Camera(cd[0], cd[1], cd[2], cd[3])
self.foundCamera.append(camera)
print("Camera found on network, adding to system")
except:
print("Failed to gather camera information")
else:
print("Waiting for hub...")
while 1:
data = self.listen()
if data[0] == broadcast_message:
#send camera data to hub asking for it
self.foundHub = True
self.send(str(self.localCam), data[1])
print("Found hub, sending camera data")
def getCameraLoop(self):
print("Beginning camera search...")
while 1:
time.sleep(5)
self.broadcast(broadcast_message)
|
995,895 | 3d73a83cfb7b3e52d4f7a56f653c4aea0d8956e0 | import sys
def gcd(a, b):
a, b = max(a, b), min(a, b)
while a % b > 0:
a, b = b, a % b
return b
def solve():
input = sys.stdin.readline
N = int(input())
A = [int(i) for i in input().split()]
gcdA = A[0]
for a in A: gcdA = gcd(gcdA, a)
print(gcdA)
return 0
if __name__ == "__main__":
solve() |
995,896 | 7322c154dd4f48a158856e20b7418c4224607e6d | import shutil
shutil.copy("country.txt","data.txt")
|
995,897 | 65f62bbf5f5fa5e31621e88b89a97b8537474fb2 | from django.apps import AppConfig
class MosiauthConfig(AppConfig):
name = 'apps.mosiauth'
|
995,898 | d99686bbba3788b8dcc496e8e7e301d1d95e4e10 | # coding: utf-8
import os
import pandas as pd
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# funções para mostrar as imagens
import numpy as np
import matplotlib.pyplot as plt
import torchvision
import torchvision.transforms as transforms
ARQUIVO_REDE = 'conv_digitos.pth'
PASTA_IMAGENS = 'digitos'
def carrega_imagem(fname):
img = pd.read_csv(fname, header=None).transpose().squeeze().values
return torch.tensor(img).view(16, 16)
def carrega_imagens(pasta = PASTA_IMAGENS):
pares = []
for digito in range(10):
subpasta = os.path.join(pasta, str(digito))
for arquivo in os.listdir(subpasta):
imagem = carrega_imagem(os.path.join(subpasta, arquivo))
# converte a imagem para o formato de entrada de uma Conv2d
# o tensor tem que ser do tipo float
# no formato [batch_size, input_channels, input_height, input_width]
# unsqueeze(0) cria uma nova dimensão e coloca o conteúdo dentro dela
imagem = imagem.float().unsqueeze(0).unsqueeze(0)
pares.append((imagem, digito))
return pares
# Criação do modelo da rede neural
class CcnModel(nn.Module):
def __init__(self):
super(CcnModel, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.mp = nn.MaxPool2d(2)
self.fc = nn.Linear(20, 10)
def forward(self, x):
in_size = x.size(0)
x = F.relu(self.mp(self.conv1(x)))
x = F.relu(self.mp(self.conv2(x)))
x = x.view(in_size, -1)
x = self.fc(x)
#return F.relu(x)
return F.log_softmax(x)
net = CcnModel() # Criamos uma instância da rede neural
# Critério para cálculo das perdas
criterion = nn.CrossEntropyLoss()
def treina(dados, max_epochs = 100):
optimizer = optim.Adam(net.parameters(), 0.001)
for epoch in range(max_epochs):
total = 0
for i, d in enumerate(dados):
entrada, label = iter(d)
entrada = Variable(entrada, requires_grad=True)
label = torch.tensor([label])
optimizer.zero_grad()
outputs = net(entrada)
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == label).sum().item()
total += correct
acuracia = (total / len(dados)) * 100
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'.format(
epoch + 1, max_epochs, i, len(dados), loss.item(), acuracia
))
if acuracia == 100.0:
break
def img_show(img):
to_pil = torchvision.transforms.ToPILImage()
img = to_pil(img)
plt.imshow(img)
plt.show()
def carrega_img(caminho):
entrada = carrega_imagem(caminho)
entrada = entrada.float().unsqueeze(0)
input = entrada.unsqueeze(0)
features = net.mp(net.conv2(F.relu(net.mp(net.conv1(input)))))
f_img = features.squeeze()
return f_img
def exibe_imagem():
images = [carrega_img('digitnet/digitos/0/imagem_0.csv'),
carrega_img('digitnet/digitos/1/imagem_0.csv'),
carrega_img('digitnet/digitos/2/imagem_0.csv'),
carrega_img('digitnet/digitos/3/imagem_0.csv'),
carrega_img('digitnet/digitos/4/imagem_0.csv'),
carrega_img('digitnet/digitos/5/imagem_0.csv'),
carrega_img('digitnet/digitos/6/imagem_0.csv'),
carrega_img('digitnet/digitos/7/imagem_0.csv'),
carrega_img('digitnet/digitos/8/imagem_0.csv'),
carrega_img('digitnet/digitos/9/imagem_0.csv')]
# for i in range(20):
# images.append(f_img[i].unsqueeze(0))
img_show(torchvision.utils.make_grid(images, nrow=10))
if __name__ == '__main__':
while True:
print('MENU DE OPÇÕES')
print('(T)reinar a rede')
print('(S)alvar a rede')
print('(E)xibir imagem')
print('(X) sair')
opcao = input('Digite sua opção: ').upper()
if opcao == 'T':
treina(carrega_imagens())
print('rede treinada com sucesso')
elif opcao == 'S':
torch.save(net.state_dict(), ARQUIVO_REDE)
print('rede salva com sucesso')
elif opcao == 'E':
exibe_imagem()
elif opcao == 'X':
break
else:
print('Digite uma opção válida')
|
995,899 | 438e462224e8289184983601aecd51c3e62029c3 | from _typeshed import NoneType
import requests
from bs4 import BeautifulSoup
import pandas as pd
from datetime import date
import sqlite3
def data_entry(con,code,name,price,date):
command = con.cursor()
table_detail = command.execute(""" SELECT * FROM "Product Code Details" """)
table_rows = table_detail.fetchall()
if (name,code) not in table_rows:
command2 = con.cursor()
command2.execute(""" INSERT INTO "Product Code Details" VALUES(?,?)""",(name,code))
con.commit()
command3 = con.cursor()
command3.execute(""" CREATE TABLE {}("Date" DATE,"Product Price" INTEGER) """.format(code))
con.commit()
command4 = con.cursor()
command4.execute(""" INSERT INTO {} VALUES(?,?)""".format(code),(date,price))
con.commit()
else:
command4 = con.cursor()
command4.execute(""" INSERT INTO {} VALUES(?,?)""".format(code),(date,price))
con.commit()
def dacorator_for_productname(name):
name = str(name)
name = name.replace("\n","")
name = name.split(",")
code = name[-1].split("-")
return "".join(map(str,code)) , " ".join([str(e) for e in name[0:-1]])
def dacorator_for_productprice(price):
price = str(price)
price = price.split("-")
if len(price) == 1:
if "₹" in price[0]:
price = price[0].replace("₹","")
price = price.replace(",","")
return float(price)
elif "$" in price[0]:
price = price[0].replace("$","")
return 74.19*float(price)
elif len(price)==2:
if "₹" in price[0] and "₹" in price[1]:
price0 = price[0].replace("₹","")
price0 = price0.replace(",","")
price1 = price[1].replace("₹","")
price1 = price1.replace(",","")
return (float(price0)+float(price1))/2
elif "$" in price[0]:
price0 = price[0].replace("$","")
price1 = price[0].replace("$","")
return ((74.19*float(price0))+(74.19*float(price1)))/2
def product_price_ids():
ids = ["priceblock_ourprice","priceblock_dealprice"]
for ID in ids:
price = soup.find(id=ID).get_text()
if price is not NoneType:
return str(ID)
URL = "https://www.amazon.in/ASUS-i7-10750H-ScreenPad-Celestial-UX581LV-XS74T/dp/B08D941WH6/ref=pd_sbs_7/258-1968671-9530502?pd_rd_w=zTJNT&pf_rd_p=18688541-e961-44b9-b86a-bd9b8fa83027&pf_rd_r=8S1D8FT6AVF007ZE0M4W&pd_rd_r=d138c222-d7ff-47b7-ae3c-df5f12848aa8&pd_rd_wg=GpR5H&pd_rd_i=B08D941WH6&psc=1"
Headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36"}
page = requests.get(URL,headers= Headers)
soup = BeautifulSoup(page.content , "html.parser")
con = sqlite3.connect("C:\\Users\\evil1\\Desktop\\customer\\product data vise price details.db")
product_name = soup.find(id="productTitle").get_text()
product_price = soup.find(id=product_price_ids).get_text()
todaydates = date.today()
product_code , product_name = dacorator_for_productname(product_name)
product_price = dacorator_for_productprice(product_price)
data_entry(con,product_code,product_name,product_price,todaydates) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.