blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a79b658c9c411eb6db1123df6286153aa342245 | ca020020c60ce4c65b3ceba8c9b405ef5d4bbac2 | /manage.py | 0dc0a269164d3d0c0f98c51cd976b1da2b9d2ce6 | [] | no_license | akononen/filmswithfriends | 623140460b57b52fc5061bbde4a2d379704c794c | e90d97d974c0f3c3fcab1500dd1f724264b2524d | refs/heads/master | 2020-04-21T02:23:39.011541 | 2019-05-03T06:06:06 | 2019-05-03T06:06:06 | 169,252,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "filmswithfriends.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"aleksi.kononen@student.tut.fi"
] | aleksi.kononen@student.tut.fi |
533952e656f8071b071330c312ac496fb79bd4c4 | e5c20767e48e579c3ab8e45c581476e1c4575243 | /3DSLICER/4.2.2/binaries/lib/Slicer-4.2/qt-scripted-modules/RSNA2012ProstateDemo.py | 9478223e572335f16981170686d96f4cf4cfb418 | [] | no_license | leoliuf/Neuroimaging | f8e01bf49026e4a2acc95e1c78219c25ea27a46f | 391777650c10449f56ecd32707c03263b98391db | refs/heads/master | 2023-03-01T22:05:57.745322 | 2015-06-19T02:18:32 | 2015-06-19T02:18:32 | 67,168,526 | 0 | 1 | null | 2016-09-01T21:44:50 | 2016-09-01T21:44:50 | null | UTF-8 | Python | false | false | 7,589 | py | import os
import unittest
from __main__ import vtk, qt, ctk, slicer
#
# RSNA2012ProstateDemo
#
class RSNA2012ProstateDemo:
def __init__(self, parent):
parent.title = "RSNA2012ProstateDemo" # TODO make this more human readable by adding spaces
parent.categories = ["Testing.TestCases"]
parent.dependencies = []
parent.contributors = ["Steve Pieper (Isomics)"] # replace with "Firstname Lastname (Org)"
parent.helpText = """
This module was developed as a self test to perform the operations needed for the RSNA 2012 Prostate Demo
"""
parent.acknowledgementText = """
This file was originally developed by Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['RSNA2012ProstateDemo'] = self.runTest
def runTest(self):
tester = RSNA2012ProstateDemoTest()
tester.runTest()
#
# qRSNA2012ProstateDemoWidget
#
class RSNA2012ProstateDemoWidget:
def __init__(self, parent = None):
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
def setup(self):
# Instantiate and connect widgets ...
# reload button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadButton = qt.QPushButton("Reload")
self.reloadButton.toolTip = "Reload this module."
self.reloadButton.name = "RSNA2012ProstateDemo Reload"
self.layout.addWidget(self.reloadButton)
self.reloadButton.connect('clicked()', self.onReload)
# reload and test button
# (use this during development, but remove it when delivering
# your module to users)
self.reloadAndTestButton = qt.QPushButton("Reload and Test")
self.reloadAndTestButton.toolTip = "Reload this module and then run the self tests."
self.layout.addWidget(self.reloadAndTestButton)
self.reloadAndTestButton.connect('clicked()', self.onReloadAndTest)
# Add vertical spacer
self.layout.addStretch(1)
def onReload(self,moduleName="RSNA2012ProstateDemo"):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default moduleName.
"""
import imp, sys, os, slicer
widgetName = moduleName + "Widget"
# reload the source code
# - set source file path
# - load the module to the global space
filePath = eval('slicer.modules.%s.path' % moduleName.lower())
p = os.path.dirname(filePath)
if not sys.path.__contains__(p):
sys.path.insert(0,p)
fp = open(filePath, "r")
globals()[moduleName] = imp.load_module(
moduleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE))
fp.close()
# rebuild the widget
# - find and hide the existing widget
# - create a new widget in the existing parent
parent = slicer.util.findChildren(name='%s Reload' % moduleName)[0].parent()
for child in parent.children():
try:
child.hide()
except AttributeError:
pass
# Remove spacer items
item = parent.layout().itemAt(0)
while item:
parent.layout().removeItem(item)
item = parent.layout().itemAt(0)
# create new widget inside existing parent
globals()[widgetName.lower()] = eval(
'globals()["%s"].%s(parent)' % (moduleName, widgetName))
globals()[widgetName.lower()].setup()
def onReloadAndTest(self,moduleName="RSNA2012ProstateDemo"):
self.onReload()
evalString = 'globals()["%s"].%sTest()' % (moduleName, moduleName)
tester = eval(evalString)
tester.runTest()
#
# RSNA2012ProstateDemoLogic
#
class RSNA2012ProstateDemoLogic:
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget
"""
def __init__(self):
pass
def hasImageData(self,volumeNode):
"""This is a dummy logic method that
returns true if the passed in volume
node has valid image data
"""
if not volumeNode:
print('no volume node')
return False
if volumeNode.GetImageData() == None:
print('no image data')
return False
return True
class RSNA2012ProstateDemoTest(unittest.TestCase):
"""
This is the test case for your scripted module.
"""
def delayDisplay(self,message,msec=100):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def setUp(self):
slicer.mrmlScene.Clear(0)
def runTest(self):
self.setUp()
self.test_RSNA2012ProstateDemo()
def test_RSNA2012ProstateDemo(self):
"""
Replicate one of the crashes in issue 2512
"""
print("Running RSNA2012ProstateDemo Test case:")
import urllib
# perform the downloads if needed, then load
filePath = slicer.app.temporaryPath + '/RSNA2012ProstateDemo.mrb'
urlPath = 'http://slicer.kitware.com/midas3/download?items=10697'
self.delayDisplay('Downloading MRB from %s to %s...\n' % (urlPath, filePath))
urllib.urlretrieve(urlPath, filePath)
slicer.mrmlScene.Clear(0)
appLogic = slicer.app.applicationLogic()
self.delayDisplay('Done loading data! Will now open the bundle')
mrbExtractPath = self.tempDirectory('__prostate_mrb_extract__')
mrbLoaded = appLogic.OpenSlicerDataBundle(filePath, mrbExtractPath)
slicer.app.processEvents()
# get all scene view nodes and test switching
svns = slicer.util.getNodes('vtkMRMLSceneViewNode*')
for reps in range(5):
for svname,svnode in svns.items():
self.delayDisplay('Restoring scene view %s ...' % svname )
svnode.RestoreScene()
self.delayDisplay('OK')
self.delayDisplay('Done testing scene views, will clear the scene')
slicer.mrmlScene.Clear(0)
self.delayDisplay('Test passed')
def tempDirectory(self,key='__SlicerTestTemp__',tempDir=None,includeDateTime=False):
"""Come up with a unique directory name in the temp dir and make it and return it
# TODO: switch to QTemporaryDir in Qt5.
Note: this directory is not automatically cleaned up
"""
if not tempDir:
tempDir = qt.QDir(slicer.app.temporaryPath)
tempDirName = key
if includeDateTime:
key += qt.QDateTime().currentDateTime().toString("yyyy-MM-dd_hh+mm+ss.zzz")
fileInfo = qt.QFileInfo(qt.QDir(tempDir), tempDirName)
dirPath = fileInfo.absoluteFilePath()
qt.QDir().mkpath(dirPath)
return dirPath
| [
"harald.waxenegger@gmail.com"
] | harald.waxenegger@gmail.com |
58c55c37a28dfaf4b6268d6b6d9d66081dbce2b3 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/numpy/2017/8/legendre.py | 5128643cd8480ed49db001f54918bc6838354373 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 57,260 | py | """
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except Exception:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except Exception:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was
2-D, the coefficients for the data in column k of `y` are in
column `k`. If `deg` is specified as a list, coefficients for
terms not included in the fit are set equal to zero in the
returned `coef`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = legvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = legvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
| [
"rodrigosoaresilva@gmail.com"
] | rodrigosoaresilva@gmail.com |
5e81c170611b6892c281a43dccde965414035113 | c7cf3ea88e562a641a89545b82300368cbd106f1 | /image_rotation.py | 86eee779543101c993b63968695c07b93c31c7e8 | [] | no_license | garimellahoney2/OpencvLab | 6c5fafd0bde443e078edd445d18b9cf2231735ec | 65f34834496855fc638c7ce331e3275a6a731cec | refs/heads/master | 2022-12-13T06:54:31.931345 | 2020-09-16T12:09:58 | 2020-09-16T12:09:58 | 274,964,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html#transformations
import cv2
img = cv2.imread(r"C:\Users\Personal\Documents\3rdyear\lab_manuals\noised_image.jpg",0)
rows,cols = img.shape#it only works for black and white because for colour we have 3 matrices
while(1):
flag = 0
for i in range(1,361):
M = cv2.getRotationMatrix2D((cols/2,rows/2),i,1)#this is transformation matrix first parametre is to define centre,angle for rotation,scale
dst = cv2.warpAffine(img,M,(cols,rows))
cv2.imshow('hello',dst)#it overwrites
if(cv2.waitKey(1)&0xFF==ord('q')):#waitkey returns 32bits but we want 8bit so we use bitwise operator to get last 8bits here 0xFF is 8 ones
flag = 1
break
if(flag):
break
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
b3ab80fc9ff47764f6c0bf07ebfada6f13074ce2 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_355/ch131_2020_04_01_17_55_42_755784.py | 4c58703c96d166274d0324a80fa7cc166fe51e65 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | import random
a = random.randint(1, 10)
b = random.randint(1, 10)
s = a + b
contador = 0
print ("Vc tem 10 dinheiros")
chutes = input("quantos chutes quer comprar?")
while contador < chutes:
pri_numero = input("aposte 1 numero")
seg_numeros = input("aposte outro numero, este deve ser maior ou igual ao anterior")
if s < pri_numero:
print ("Soma menor")
if s > seg_numero:
print ("Soma maior")
if s == pri_numero or s == seg_numero:
h = 10 - chutes
g= h + (h*3)
print("acertou")
return g
else:
print("Soma no meio")
contador+=1
| [
"you@example.com"
] | you@example.com |
3442d2ddf3b9c68f0ee4fcc3d22e673e983a2d03 | 3dde7ed023f2fb7b4ad3996894e82a72794565ad | /src/vlab/asgi.py | 955e0e87ce3806684dc90bf1162996b84773017d | [] | no_license | harshit37/vlab | 9fc038ca24757ba402949c592153c1e51010fa75 | 7e400dd7089cb9512bd01ddb7b07e1882a14809f | refs/heads/main | 2023-05-01T19:44:22.973221 | 2021-05-14T18:44:40 | 2021-05-14T18:44:40 | 366,010,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for vlab project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vlab.settings')
application = get_asgi_application()
| [
"root@Dell-PC.localdomain"
] | root@Dell-PC.localdomain |
7836a38150fdd249abc279976af5f7e338ec908d | 24924453c07e6002d66e9a951f8523fc1feaf83d | /Lectures/Lecture 4 - Introduction to Python/leapyear.py | 503b0095d5a079ecfd3cd2a966fbd61dfff588b0 | [] | no_license | ShonnyAIO/Code-In-Place-Stanford-Univeristy-2021 | e1fc87b38641b252e2493e5cc5c1be9c829a7f09 | c372465cc8a8b2fc0f40ec00c3f38e24e8def6c9 | refs/heads/main | 2023-05-08T21:55:28.787768 | 2021-05-29T03:23:00 | 2021-05-29T03:23:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | def main():
year = int(input("Give your year: "))
if(not year % 4):
if(not year % 100):
if(not year % 400):
print("That's leap year!")
else:
print("That's not a leap year")
else:
print("That's leap year!")
else:
print("That's not a leap year")
pass
if __name__ == "__main__":
main()
| [
"exodiakah@gmail.com"
] | exodiakah@gmail.com |
47dbfd79684e1c1de391ab0f088f07468a32f0a0 | 761442df47845984e777a342a9c17beaff112835 | /config/settings/stg.py | fbf974ecc65e790e24b4fc9a5684defe6db3689e | [
"MIT"
] | permissive | SportySpots/seedorf | 8dcd80c0b3bd794cd09354740dbf15628f20dfee | 3f09c720ea8df0d1171022b68b494c2758f75d44 | refs/heads/master | 2023-09-03T16:08:18.586144 | 2019-12-21T22:32:36 | 2019-12-21T22:32:36 | 106,041,744 | 3 | 0 | MIT | 2023-09-04T20:46:33 | 2017-10-06T19:17:40 | CSS | UTF-8 | Python | false | false | 288 | py | from .prd import * # noqa
MEDIA_URL = "https://sportyspots-stg.s3.amazonaws.com/"
CORS_ORIGIN_WHITELIST = [
"https://training.sportyspots.com",
"http://localhost:8000",
"http://localhost:8080",
"http://127.0.0.1:8000" "http://127.0.0.1:8080",
] + CORS_ORIGIN_WHITELIST
| [
"ashutoshb@gmail.com"
] | ashutoshb@gmail.com |
8b68148df57be36fa8e9062b4bf609759566fc8a | 66310d12b0afd748bb40aa5bc5fce8d9625838bc | /devtest/physics/numberdict.py | 8f1b3ac1a37fafd671d162a4daf0f7c142a161e8 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | chaulaode1257/devtest | 02fd8c31a801c0dfba28ff3e19be89d8d185d558 | 9ec93045ba4bab5b20ce99dc61cebd5b5a234d01 | refs/heads/master | 2023-03-19T17:39:05.775529 | 2020-12-21T01:11:12 | 2020-12-21T01:11:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,894 | py | # Dictionary containing numbers
#
# These objects are meant to be used like arrays with generalized
# indices. Non-existent elements default to zero. Global operations
# are addition, subtraction, and multiplication/division by a scalar.
#
# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
# last revision: 1999-7-23
#
# Modified by Keith Dart to work with new physical_quantities module, and
# modern Python.
from devtest.core import types
class NumberDict(types.AttrDictDefault):
"""Dictionary storing numerical values.
An instance of this class acts like an array of number with
generalized (non-integer) indices. A value of zero is assumed
for undefined entries. NumberDict instances support addition,
and subtraction with other NumberDict instances, and multiplication
and division by scalars.
"""
def __coerce__(self, other):
if isinstance(other, dict):
return self, self.__class__(other, self._default)
def __add__(self, other):
sum = self.copy()
for key in other:
sum[key] = sum[key] + other[key]
return sum
__radd__ = __add__
def __sub__(self, other):
sum = self.copy()
for key in other:
sum[key] = sum[key] - other[key]
return sum
def __rsub__(self, other):
sum = self.copy()
for key in other:
sum[key] = other[key] - self[key]
return sum
def __mul__(self, other):
new = self.__class__(default=self._default)
for key in self:
new[key] = other * self[key]
return new
__rmul__ = __mul__
def __truediv__(self, other):
new = self.__class__(default=self._default)
for key in self:
new[key] = self[key] / other
return new
__div__ = __floordiv__ = __truediv__
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
| [
"dart@google.com"
] | dart@google.com |
eb0af1cae0abc9f558c4b2dbf6c5134c2923070c | b5b3642dd43599f375baac26de6fe72aacaa0a38 | /8/a.py | bc1ad96065ee82d8eb2eb62b494fd4cf2d49a7c8 | [] | no_license | the-glu/aoc2019 | 61eea9f4c8fab86786f89d1545a0c4301767431f | 5590294e9d4f040d7c17792ac02efb52b0674387 | refs/heads/master | 2020-09-30T15:10:55.719241 | 2019-12-20T09:50:16 | 2019-12-20T09:50:16 | 227,313,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,864 | py | d = """112222222222222212222122220212222222202222222222222122222222202100222222120222101222222122222222221222220102222222222020222222212221222222222222221222102222222222222212222222220222222222202222222222222122222222222100222222120222121222222122222222221222221002222222222222222222202221222222222222222222022222222222222212222222220222222222202222222222222222222222222201222222120222010222222022222222221222220012222222222021222222212222222222222222221222112222222222222222222022222212222222222222222222222022222222202000222222021222021222222222222222221222220202222222222222222222202221222222222222222222212222222222222202222222222222222222222222222222222222222222212021222222021222001222222222222222222222222122222222222222222222222221222222222222221222222222222222222202222122222222222222202222212222222122222202212110222222222222220222222022222222220222221012222222222020222222222221222222222222220222022222222222222202222222221222222222222222212222202122222202202012222222222222220222222122222222222222222122222222222220222222222222222222222212220222112222222222222222222122221212222222222222202222212222222222222111222222121222121222222122222222220222222100222222222221222222212221222222222222221222022222222222222212222022222212222222212222202222222222222212122011222222222222002222222122222222222222220012222222222021222222222220222222222222220222022222222222220212222022222202222222202222212222201122222202222212222222222222102222222222222222222222222212222222222222222222212220222222222202221222002222222222222222222022220212222222212222212222220122222212022201222222121222100222222122222222220222222122222222222122222222202222222222222212222222002222222222222222222122222212222222222222222222222222222222102110222222221222101222222222222222220222221212222222222021222222202222222222222222221222202220222222220212222222222222222222212222202222211022222202022221222222222222001222222122222222021222220012222222222122222222212221222222222222222222212222222222221202222022221222222222222222212222200222222212012101222222021222200222222022222222120222222210222222222021222222212222222222222222220222002222222222221212222222220212222222202222222222212222222222202202222222221222100222222022222222220222222000222222222121222222212220222222222212221222112220222222221222222222222222222222212222212222222122222212122212212222221222010222222022222222022222222002222222222120222222202222222222222202222222222222222222221222222022221222222222212222202222212122222222122201212222120222101222222222222222222222221212222222222120222222222220222222222202220222022221222222222212222122220222222222212222212222212022222202222012222222120222000222222222222222121222222221222022222220222222222222222222222202221222002221222222222222222122222212222222202222012222211222222212112111212222122222020222222022222222022222221011222122222220222222212221222220222211221222112222222222221222222122221222222222222222102222211222222212002101202222020222111222222222022222122222222022222222222122222222222222222220222220222222202222222222220222222122221212222222202222222222222022222222202200222221021222220222221222122222122222221011222022222222222222202220222220222201222222112222222222221202222122222222222222222222012222210122222212012000202222121222011222221022220222020222220002222122222222222222222220222222222202020222002220222222222222222122221022222222202222222222220122222212102201222221022222221222221022022222222222220100202222222220222222202220222221202201221222022221222222220222222022220002222222202222202222210222222202022101202222220222120222221220021222021222221000212122222220222222212222222220222211122222002222222222220202222222221212222222222222102222221222222212002001202222021222011222220021121222222222220201212122222021222222212220222222222221220220022220222222221212222122222222222222222222102222222022222202112000222021022222011222222121220222121222220101222022222122222222212221222222212212122222022220222222121222222222220202222222222222012222210222222212222120222220221222222222221121121222220222220022212222222222222222222220222220202220020222202221212222020222222222221022222222212222112222202022222212212021202222021222202222220122221222120222220020112122222022222222200222222222222210120222022222202222021212222022222002202222222222222222221122222202112201212221020222200220221020120222022222220012212022222120222222212222222222212211122222012222212222020222222122221002212222212222102222222222222210002010212221221222211222221220121222222222220211102122222022222222220221222221222220122221222221212222022202222122222122222222202222012222211022222202222110222121122222112221221222222222121222220202122022222022222222220220222222212200020221202220222222221202222222220202222222222222022222211122222222222120212022221222120222222221120222122222222100222022222021222222201222222220222220220222022222212222120211222122220222212222202222222222202222222212212221202221221222021221220021220222022222222002112122222021222222211220222221222200122222212021202222122200222122220022202222202222222222211222222200122122212122220222112221220122122222120222222222102022222122202222220221222221202202220222102222212222022200222112220222212222212222212222220022222212012020202121120222212221222020220222120222221101122222222022212222210220222221202211222220022021202222222220222120222222202222202222012222210222222210212021212021120222102220222222220222120222221000122122222220202222222020222222212222021222112022212222221202222021222102220222212222022222221022222222202102212021121222101222222120021222122222222212122122222021202222210022222222222212220222112021202222122201222101222212202222202222102222211022222210012010212120222222121221222121021222022222222002112022222222212202200222222222222210222220012121212222120202222012221102212222201222212222212122222220122101212221222222212222222020222222122222221122212022222121212222211221222222222220120222222220222222120221222200221112211222212222112222211122222220222010222121220222211220220120021222022222220021222222222020212212210221222221202222122222012220202222221211222212220202211222201222112222212122222202112222222121122222220222222120120222021222222001212122222021212212200121222221212200022222222020202222020222222100220112221222200222022222221222222200012010212221120222200222222220121222221222222211022222222222202222221122222221212210221220222120212222121200222100220202211222200222022122212122222222122100212121120122212221221021122222020222222102202222222022202222210121222220212200220222122221222222222200222110222012201222222222002022211122222211222001222021022122101220220221120222021222222200112122222020212212210020222220222212122221222220202222020212222010221122202222220222022222220022222221122221212120021122222221222220020222021222220010012222222022202212221121222220222201122220212221212222020211122202220112200222201222112022201222222202102022212021020122021222220222221222021222221000102022222120212222220021222221222222121220102022202222020202222000220002211222221222002022220022222201022112222122121222102220221120121222120222221201212122222222212202220022222222221200022222222222222222022212122211221112222222222222012022222222222220122120212122021022202220222220220222122222222210212122222122212222212021222200220201021222202120222222022221222212220222210222212222202122211222222212002001202022222022200220220222120222021222221222002122222122212222221221222221201211221221012220222221220200122212212102210222210222212122221122222212122101212122022022211221221122122222222222222200012102222020212212221221222202210211221221022021202220022220222101202222211222221222002122211222222202122100212221122022001220222122221222121222221020212002222122222212202222222201222210221220001221212221022221222201211222211222202222122222211222222222122202212121121122010220221222121222021222220201012212222220212212211220222211212202220222221022202222220220122101202022221222221222122222201022222200002202212220021222000222222021220222102222222202202102222120222212211222222201211210021222211120222221021221122120220222220222210222012222201022222221202121222020122122201220021120221222211222220122212122222220212222201221222210200211220222111020212221222211122000220002210222212222202002201222222201222202222222020222011221021120222222211222221120002222222022202222210222222201210201122220001222222221020202122020202022212222201202002012202222222200002101212122121222012220022022120222120222220110102002222122212202211022222220210200220221222121222222220212122020212112220222221212102102212022222210212221202122221022021220021021221222010222222011102222222221202212212020222222202201221221211222202221020201022120202102201222210212202222212122222201222011212121222122201222120220220222121222221210002112222220222202222022222211212221121222101121222222120200222202201012222222201212002012210222222221002111202021221122000222220020121222020222220122202022222020222212211121122211222222220222020220202221120221222111212022221222201212112122222022222221022100222122122122222222122022220222222222220002202212222222212202221222022210221222022220212121222222022222122201222122212222202202212202221122222211102112202222020022002222121221022022200220221020011202222121212212200121122200210202222222111220222220222200022112212002211222200222122202211022222212120211222221220022020220022221021222011222221120011002222021212212222222222210220221121221020222212221022220122000202102201222221212002122212122222001012121202122222022001222220022121022201222020200212112222020202222210021122212212210221222000221222221222202022122200222200222221222222212220222222200000012212120021122210222020120221222202221021000022102222220212112201122022222202201022222211222212222221222122221212112222222220212112212212022222112212011222122220122120222221220022022110221020111000112222120202222202021022221202200022220000220222220222221122122222002222222212212022112221122222201112210212220122222212222020222112222022221122122010222222120222012220121122211212200122220212222202220020202022210212022210222201222222122222022222011002011202120222122211221221222211102101221121202220102222122212212221222122210200202220221010122210220020201022020212122202222212202222212222222222001022201212022020222102220021122201221011221222212010002222220202002200021222201210221121220110221210221020211222012202102201222200212102212222122222212221001202021022122211222020122111221110220021212222012222221212212210020222212201200122221200222211222120212122122201222210222202222222002202222222222001222222121221022020222221022121021110220122211000122222121222011211022222221221111020222011120220220020221022020222212212222222212002212200022222221002221212220021022021222220022222000200222020100202002222200212002200220022220202201220222222021222220221210122101222102220222202212212102201122222002020012222022222202012221221222122121120221021112221212212001222000211220022222212002121220010121212220220201222020220012212222210222102022221022222221101021212021021122122222121222222101101222120110110112212010102111220222222202212011020222212222111222221210122020221212220222211202110022221222222102011120212120020102120221222222111121220222120011112002202010122212222220222201200112021200112121120220020201022110200002202222211222200022220122222200220111212021122012012221221222000211012221021201122212222022022012212022022200202012222212201022210221220211022102210112212222220222200012201222222011120101212022121022002220122121212122002221122101110202212110112012210122022201212210112212110121221222021212022200201102200222211202200022220222222000210201202020021010002222020021201101220220122201220202222102102021212120122211201001112212020122110221022220122122220012212222211202200212212022222022202222222121221202122221220222011000012222221212020212202222212202200221022201221111000200000220002220200210122221222012202222211212000202222222222021201101202122122020222220120221112200121210020210101122212201012021210221122200211102212200012020200220220222022111212202211222222222222212202122222111202022222222121021111220220120001220120222021211200002222000022000202221222210210220101220102021110220112200221122210212212222220222012112220122222112020110222220121121211202020022111000100211022112020212212121102022212222122202222220201212002022012222002202020010222212212202202202222212212022222010210101202122020002101222022122202022121222120220100102212120012020220020222201201000211211120120112222202212222021021112220202202202120022212022222021111220202222022222121201022120201010212211122121202002212121222222222022022202221021010201111201200220112201122220010222210222202222102012222222222112020002222020121220221221021221211212021202121021100022212020212210212122122201220120000211202201121222101222220202122002210222210202221222202222222221210001212022120221222211020022022001002220220222000012202021002020200222022210221011220200101202012220220202221201020122220222222202202112220222222102121221212122021100121210020122112201210220021120012002202101002002220220222212201022100221122220011210011222021020201022211222200212121112200022022002210100212222022122000200021120110212011200220001020202212022122122202220222211002022220200122020122200001212021200110202212222212212001212222022222000200101222010221210201211021022111100001221120221201212212222202211220121022221001121111221222212011200110202120121001202200212222202212202212122022100010012222211121110001202022222120022020221222002202002222011102212200220022210111202010210122212200211000102021001010202220222202202100222220122022100112200212211122012202011020220102021210202020221000222202020212201210022122202012211212211010011200211002020122110021022202202222222211112211222222000102010222222221112210211122122022112122222020001002212222101102221201021222210201010220221011022102202110221221022010222222222221212022102221222022120111002202210022121121100221121221000120212022212121112212220112010212222222211020222010202112201010212110001220102211222220222210202210112221122212201222200212112221120220212020221201100210210120111020112212100112201211020122222020211220200002101012201211122221022201012201202212222102112221022102021100112212201021102002011222120220200200202022110012122202000212202220221122211020011012210022000121200121200122202011212220212212212222222202022022000220022212102221100111101221120002111210000122201220212202100002202202022022201110201021222110120202222000012021211021122212221220202201022201122122001201022212210220121002000020121212020022021221222111012202210102211201121022210002120001220212020012210020222021120121222220221201222102002202222022000201001222122220222210010222022102200120202220002201102212001112012210200022220101122102202111121121210100220221020012212200210212202111212201022012201010020202201020202110002122220211200202212021001110022202001022022220201122212012010111210101002020222011112222111210122200220012220220002210222222001012120222112121012001112021121202212121112121012112112212210002010210222222202111200210211202121012002102000101211111010100120221012202200121110120121110120020121100021222100011001001200112000212100001000010102120220120002000122100001220102"""
max_nb_0 = 9999999999999999999
max_layer = 0
layer = 0
z_layer = 0
nb_1 = 0
nb_2 = 0
image = {}
pos_x = 0
pos_y = 0
for p, x in enumerate(d):
if (x == "0" or x == "1") and (pos_x, pos_y) not in image:
image[(pos_x, pos_y)] = "X" if x == "1" else " "
if x == "1":
nb_1 += 1
if x == "2":
nb_2 += 1
p += 1
pos_x += 1
if (pos_x == 25):
pos_y += 1
pos_x = 0
if pos_y == 6:
pos_y = 0
print("L")
else:
print("NL")
if p % (25 * 6) == 0:
if z_layer < max_nb_0:
max_nb_0 = z_layer
max_layer = nb_1 * nb_2
layer += 1
z_layer = 0
nb_1 = 0
nb_2 = 0
print(max_nb_0)
print(max_layer)
for y in range(0, 6):
for x in range(0, 25):
print(image[(x, y)], end='')
print("")
| [
"maximilien@theglu.org"
] | maximilien@theglu.org |
37b16598f173de07ea41f4d67978f031034c90e7 | ee838d827f128b6d651675fbc11c6127be58280a | /scipy_341_ex3.py | 2c29670607e0bd65654f760b0260cb4d971ce5e0 | [] | no_license | CodedQuen/Scipy-and-Numpy | 80a4b2d6792ba4702634849d583e0ce86e4a2820 | 1b333d5f7cf2c6310c64523f9de80718c6a84cb4 | refs/heads/master | 2022-11-09T23:12:17.624938 | 2020-06-27T04:38:01 | 2020-06-27T04:38:01 | 275,300,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import numpy as np
from scipy.stats import geom
# Here set up the parameters for the normal distribution.
# where loc is the mean and scale is the standard deviation.
p = 0.5
dist = geom(p)
# Setup the sample range
x = np.linspace(0, 5, 1000)
# Calling norm's PMF and CDF
pmf = dist.pmf(x)
cdf = dist.cdf(x)
# Here we draw out 500 random values from
sample = dist.rvs(500) | [
"noreply@github.com"
] | noreply@github.com |
cb0905fda3cb7d3af65e15d93cfc3e24e05aac02 | 8d1c63af604114d488dea3f0ab55c148b8f2de43 | /EMRClusterWithHbase/emr_cluster_with_hbase/emr_cluster_with_hbase_stack.py | 2ea002afac6c8f61910bcb2aaa6ae1944d4bef08 | [] | no_license | G1Yara/AWSCDKProjects | 9ebe87b689934484c68b036d466d1e9c5b752e94 | bd7781ba6c0666f91cd94b4550140ed0b3f0e55c | refs/heads/main | 2023-07-04T03:48:05.239215 | 2021-08-08T11:48:41 | 2021-08-08T11:48:41 | 382,611,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,829 | py | from aws_cdk import core as cdk
from aws_cdk import aws_emr as _emr
from aws_cdk import aws_ec2 as _ec2
# For consistency with other languages, `cdk` is the preferred import name for
# the CDK's core module. The following line also imports it as `core` for use
# with examples from the CDK Developer's Guide, which are in the process of
# being updated to use `cdk`. You may delete this import if you don't need it.
from aws_cdk import core
class EmrClusterWithHbaseStack(cdk.Stack):
def __init__(self, scope: cdk.Construct, construct_id: str, stage: str, **kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# The code that defines your stack goes here
# Setting up EMR Cluster
# look up for existing default vpc
vpc = _ec2.Vpc.from_lookup(self,
id="vpc",
vpc_id=self.node.try_get_context(stage)["vpc_id"])
security_group_name = self.node.try_get_context(stage)["security_group_name"]
# creating new security group for emr cluster
emr_security_group = _ec2.SecurityGroup(self,
id=security_group_name,
security_group_name=security_group_name,
vpc=vpc)
acadia_services_sg = self.node.try_get_context(stage)["acadia_services_security_groups"]
# look up for existing acadia services security group
launch_wizard_sg = _ec2.SecurityGroup.from_lookup(self,
id=acadia_services_sg[0],
security_group_id=acadia_services_sg[0])
# add existing acadia services security group as inbound rule to emr security group
emr_security_group.add_ingress_rule(peer=launch_wizard_sg,
connection=_ec2.Port.all_traffic(),
description="existing services sg")
# creating emr cluster instance type
# setting up instance type & count
# setting up storage size
# setting up storage type
master_instance_group = _emr.CfnCluster.InstanceGroupConfigProperty(
instance_count=self.node.try_get_context(stage)["emr_cluster_instance_count"],
instance_type=self.node.try_get_context(stage)["emr_cluster_instance_type"],
name=self.node.try_get_context(stage)["emr_cluster_instance_name"],
ebs_configuration=_emr.CfnCluster.EbsConfigurationProperty(
ebs_block_device_configs=[_emr.CfnCluster.EbsBlockDeviceConfigProperty(
volume_specification=_emr.CfnCluster.VolumeSpecificationProperty(
size_in_gb=self.node.try_get_context(stage)["emr_cluster_instance_size_in_gbs"],
volume_type=self.node.try_get_context(stage)["emr_cluster_instance_volume_type"]))]
)
)
# creating job flow config
# setting up subnet
# setting up pem key name
# setting up hadoop version
# setting up security group id
emr_instance = _emr.CfnCluster.JobFlowInstancesConfigProperty(
master_instance_group=master_instance_group,
additional_master_security_groups=[emr_security_group.security_group_id],
additional_slave_security_groups=[emr_security_group.security_group_id],
core_instance_group=master_instance_group,
ec2_subnet_id=self.node.try_get_context(stage)["default_vpc_subnets"][0],
ec2_key_name=self.node.try_get_context(stage)["ec2_key_name"],
hadoop_version=self.node.try_get_context(stage)["hadoop_version"]
)
# creating required application list
# required applications are zookeeper, hbase, etc
emr_required_apps_list = list()
for app in self.node.try_get_context(stage)["emr_cluster_required_applications"]:
app_property = _emr.CfnCluster.ApplicationProperty(
name=app
)
emr_required_apps_list.append(app_property)
emr_cluster_name = self.node.try_get_context(stage)["emr_cluster_name"]
configurations_list = list()
config_property = _emr.CfnCluster.ConfigurationProperty(
classification="hbase",
configuration_properties={"hbase.emr.storageMode": self.node.try_get_context(stage)["emr_cluster_hbase_storage"]}
)
config_property_1 = _emr.CfnCluster.ConfigurationProperty(
classification="hbase-site",
configuration_properties={"hbase.rootdir": self.node.try_get_context(stage)["emr_cluster_hbase_s3_path"]}
)
configurations_list.append(config_property)
configurations_list.append(config_property_1)
# creating emr cluster
_emr.CfnCluster(self,
id=emr_cluster_name + stage,
name=emr_cluster_name,
instances=emr_instance,
release_label=self.node.try_get_context(stage)["emr_cluster_version"],
job_flow_role=self.node.try_get_context(stage)["emr_cluster_job_role"],
service_role=self.node.try_get_context(stage)["emr_cluster_service_role"],
visible_to_all_users=True,
ebs_root_volume_size=self.node.try_get_context(stage)["emr_cluster_volume_size"],
applications=emr_required_apps_list,
log_uri=self.node.try_get_context(stage)["emr_cluster_log_s3_bucket"],
configurations=configurations_list
)
| [
"jeevan.yara@costrategix.com"
] | jeevan.yara@costrategix.com |
aabe87fa211f6a080703506ca296b4e66a5a0250 | daf4d684435b7f68c9801b53ebd2843927cb8f7b | /blog/migrations/0001_initial.py | ddda976f0c73a18442d002f9cd9151557e7a27a5 | [] | no_license | f-vitorio/projeto_fvblog | 97225e930e302260f6ff2d58ee571c03c49447dd | d4ff00235ed464507a7bd59bd4c65a9962468a40 | refs/heads/master | 2023-04-30T15:58:43.730839 | 2021-05-16T15:37:11 | 2021-05-16T15:37:11 | 365,357,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # Generated by Django 3.2.2 on 2021-05-08 22:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"fabio.vitorios@gmail.com"
] | fabio.vitorios@gmail.com |
2fa9a4345cb0ce267a252e6a85ac0ed094801074 | d220b1ce312153f70e776eafbb44bf4fadb50870 | /translation/transformer/mindspore/train.py | 43565da0bbabfd8306f19dde6a88862a14bf5366 | [] | no_license | luweizheng/nn-bench | 1012a6e84759cf7b6392ed5b138a6961b4c30b50 | 5644d22257b4e1e4418b66914940acab99deaba3 | refs/heads/master | 2023-04-11T12:39:18.969953 | 2021-04-22T12:02:10 | 2021-04-22T12:02:10 | 343,982,095 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,042 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Transformer training script."""
import os
import time
import argparse
import ast
from mindspore import dtype as mstype
from mindspore.common.tensor import Tensor
from mindspore.nn.optim import Adam
from mindspore.train.model import Model
from mindspore.train.loss_scale_manager import DynamicLossScaleManager
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint
from mindspore.train.callback import Callback, TimeMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.communication.management as D
from mindspore.communication.management import get_rank
from mindspore.context import ParallelMode
from mindspore import context
from mindspore.common import set_seed
from src.transformer_for_train import TransformerTrainOneStepCell, TransformerNetworkWithLoss, \
TransformerTrainOneStepWithLossScaleCell
from src.config import cfg, transformer_net_cfg
from src.dataset import create_transformer_dataset
from src.lr_schedule import create_dynamic_lr
set_seed(1)
def get_ms_timestamp():
t = time.time()
return int(round(t * 1000))
time_stamp_init = False
time_stamp_first = 0
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF terminating training.
Note:
If per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
"""
def __init__(self, per_print_times=1, rank_id=0):
super(LossCallBack, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self._per_print_times = per_print_times
self.rank_id = rank_id
global time_stamp_init, time_stamp_first
if not time_stamp_init:
time_stamp_first = get_ms_timestamp()
time_stamp_init = True
def step_end(self, run_context):
"""Monitor the loss in training."""
global time_stamp_first
time_stamp_current = get_ms_timestamp()
cb_params = run_context.original_args()
print("time: {}, epoch: {}, step: {}, outputs are {}".format(time_stamp_current - time_stamp_first,
cb_params.cur_epoch_num, cb_params.cur_step_num,
str(cb_params.net_outputs)))
with open("./loss_{}.log".format(self.rank_id), "a+") as f:
f.write("time: {}, epoch: {}, step: {}, outputs are {}".format(time_stamp_current - time_stamp_first,
cb_params.cur_epoch_num,
cb_params.cur_step_num,
str(cb_params.net_outputs)))
f.write('\n')
def argparse_init():
"""
Argparse init.
"""
parser = argparse.ArgumentParser(description='transformer')
parser.add_argument("--distribute", type=str, default="false", choices=['true', 'false'],
help="Run distribute, default is false.")
parser.add_argument("--epoch_size", type=int, default=52, help="Epoch size, default is 52.")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
parser.add_argument("--enable_lossscale", type=str, default="true", choices=['true', 'false'],
help="Use lossscale or not, default is true.")
parser.add_argument("--do_shuffle", type=str, default="true", choices=['true', 'false'],
help="Enable shuffle for dataset, default is true.")
parser.add_argument("--checkpoint_path", type=str, default="", help="Checkpoint file path")
parser.add_argument("--enable_save_ckpt", type=str, default="true", choices=['true', 'false'],
help="Enable save checkpoint, default is true.")
parser.add_argument("--save_checkpoint_steps", type=int, default=2500, help="Save checkpoint steps, "
"default is 2500.")
parser.add_argument("--save_checkpoint_num", type=int, default=30, help="Save checkpoint numbers, default is 30.")
parser.add_argument("--save_checkpoint_path", type=str, default="./", help="Save checkpoint file path")
parser.add_argument("--data_path", type=str, default="", help="Data path, it is better to use absolute path")
parser.add_argument("--bucket_boundaries", type=ast.literal_eval, default=[16, 32, 48, 64, 128],
help="sequence length for different bucket")
return parser
def run_transformer_train():
"""
Transformer training.
"""
parser = argparse_init()
args, _ = parser.parse_known_args()
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=args.device_id)
context.set_context(reserve_class_name_in_scope=False, enable_auto_mixed_precision=False)
if args.distribute == "true":
device_num = args.device_num
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
D.init()
rank_id = args.device_id % device_num
save_ckpt_path = os.path.join(args.save_checkpoint_path, 'ckpt_' + str(get_rank()) + '/')
else:
device_num = 1
rank_id = 0
save_ckpt_path = os.path.join(args.save_checkpoint_path, 'ckpt_0/')
dataset = create_transformer_dataset(epoch_count=1, rank_size=device_num,
rank_id=rank_id, do_shuffle=args.do_shuffle,
dataset_path=args.data_path,
bucket_boundaries=args.bucket_boundaries)
netwithloss = TransformerNetworkWithLoss(transformer_net_cfg, True)
if args.checkpoint_path:
parameter_dict = load_checkpoint(args.checkpoint_path)
load_param_into_net(netwithloss, parameter_dict)
lr = Tensor(create_dynamic_lr(schedule="constant*rsqrt_hidden*linear_warmup*rsqrt_decay",
training_steps=dataset.get_dataset_size()*args.epoch_size,
learning_rate=cfg.lr_schedule.learning_rate,
warmup_steps=cfg.lr_schedule.warmup_steps,
hidden_size=transformer_net_cfg.hidden_size,
start_decay_step=cfg.lr_schedule.start_decay_step,
min_lr=cfg.lr_schedule.min_lr), mstype.float32)
optimizer = Adam(netwithloss.trainable_params(), lr)
callbacks = [TimeMonitor(dataset.get_dataset_size()), LossCallBack(rank_id=rank_id)]
if args.enable_save_ckpt == "true":
if device_num == 1 or (device_num > 1 and rank_id == 0):
ckpt_config = CheckpointConfig(save_checkpoint_steps=args.save_checkpoint_steps,
keep_checkpoint_max=args.save_checkpoint_num)
ckpoint_cb = ModelCheckpoint(prefix='transformer', directory=save_ckpt_path, config=ckpt_config)
callbacks.append(ckpoint_cb)
if args.enable_lossscale == "true":
scale_manager = DynamicLossScaleManager(init_loss_scale=cfg.init_loss_scale_value,
scale_factor=cfg.scale_factor,
scale_window=cfg.scale_window)
update_cell = scale_manager.get_update_cell()
netwithgrads = TransformerTrainOneStepWithLossScaleCell(netwithloss, optimizer=optimizer,
scale_update_cell=update_cell)
else:
netwithgrads = TransformerTrainOneStepCell(netwithloss, optimizer=optimizer)
netwithgrads.set_train(True)
model = Model(netwithgrads)
model.train(args.epoch_size, dataset, callbacks=callbacks, dataset_sink_mode=False)
if __name__ == '__main__':
run_transformer_train() | [
"luweizheng36@hotmail.com"
] | luweizheng36@hotmail.com |
1c58e39e99b670be9e8d2f6c4131c0c5d37638b3 | 6a1595e33051ebbd098f78cb0ff7d09cfc0a57dc | /day3/work_day3.py | 64838662dc94e940f40402a3a1643dba55a2d0ff | [] | no_license | MannixZ/Python_1-100 | f3514ef1255ca27b656209716bdb27d3821df46e | 740c3d2800f8d55fd2bcd8f789486253e01c9d53 | refs/heads/master | 2020-07-04T03:18:10.125091 | 2019-11-18T16:45:11 | 2019-11-18T16:45:11 | 202,135,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/29 15:05
# @Author : Mannix
# @File : work_day3.py
# @Software: PyCharm
def work_1():
'''英制单位英寸和公制单位厘米互换'''
value = float(input('请输入长度: '))
unit = input('请输入单位: ')
if unit == 'in' or unit == '英寸':
print('%f英寸 = %f厘米' % (value, value * 2.54))
elif unit == 'cm' or unit == '厘米':
print('%f厘米 = %f英寸' % (value, value / 2.54))
else:
print('请输入有效的单位')
def work_2():
'''掷骰子决定做什么事情'''
from random import randint
face = randint(1, 6)
if face == 1:
result = '唱首歌'
elif face == 2:
result = '跳个舞'
elif face == 3:
result = '学狗叫'
elif face == 4:
result = '做俯卧撑'
elif face == 5:
result = '念绕口令'
else:
result = '讲冷笑话'
print(result)
if __name__ == '__main__':
work_1()
work_2() | [
"noreply@github.com"
] | noreply@github.com |
2cdf52486711ebe99c6646a833bcf3b370fd8337 | d6c9c730ca514af81307018c669bd2f7e5de51c6 | /Stack_20190722/stack_class.py | fea59faa2fbd2e6bd8c2179dd296a356f5911880 | [] | no_license | itbullet/python_projects | a1a56d070a6a70b0814cdc2a83cbd1ce9bc0dab8 | 06d171f1cab7f45c704944e40ffb0b7a175c1d2d | refs/heads/master | 2020-06-22T15:07:40.768297 | 2019-09-09T13:49:02 | 2019-09-09T13:49:02 | 197,734,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
last = len(self.items) - 1
return self.items[last]
def size(self):
return len(self.items) | [
"eduard.shapirov@gmail.com"
] | eduard.shapirov@gmail.com |
d16669265ce1d3d275559219e2fae8d5d8e1a567 | dd673e10e2b3e6da88971e2d428b89aeb019efa9 | /setup.py | 25c07b4c89bb8f05b9ca7626ebe27400aabfb3d8 | [] | no_license | JaFro96/helloworld-cli | 5cbe47b95d8b9d62ba16d3fe2d16b424b1fbd03c | a151f3d8cf5b75363dea8c26c1abcf74ff89e9a1 | refs/heads/master | 2020-04-07T23:36:36.161841 | 2018-11-23T14:14:04 | 2018-11-23T14:14:04 | 158,819,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from setuptools import setup
setup(
name = 'helloworld-cli',
version = '0.1.0',
packages = ['helloworld'],
entry_points = {
'console_scripts': [
'helloworld = helloworld.__main__:main'
]
})
| [
"jannis.froehlking@gmail.com"
] | jannis.froehlking@gmail.com |
83865e2461c7fdd4cb466554d9f685060a332d9a | 345b37bd2d062f4d020b3c974854e016e727afd7 | /black.py | 24c57ca4aee1d54ae862e8e879a4d27f9577f029 | [
"MIT"
] | permissive | AfolabiOlaoluwa/black | 04d458de4aa841e12bdc1e0b577fcee09392915e | 4c086b137e8869166282765a8242808785605278 | refs/heads/master | 2021-04-06T02:26:23.245318 | 2018-03-14T21:38:33 | 2018-03-14T21:38:33 | 125,282,966 | 1 | 1 | MIT | 2018-03-14T22:43:03 | 2018-03-14T22:43:02 | null | UTF-8 | Python | false | false | 47,591 | py | #!/usr/bin/env python3
import asyncio
from asyncio.base_events import BaseEventLoop
from concurrent.futures import Executor, ProcessPoolExecutor
from functools import partial
import keyword
import os
from pathlib import Path
import tokenize
from typing import (
Dict, Generic, Iterable, Iterator, List, Optional, Set, Tuple, TypeVar, Union
)
from attr import attrib, dataclass, Factory
import click
# lib2to3 fork
from blib2to3.pytree import Node, Leaf, type_repr
from blib2to3 import pygram, pytree
from blib2to3.pgen2 import driver, token
from blib2to3.pgen2.parse import ParseError
__version__ = "18.3a0"
DEFAULT_LINE_LENGTH = 88
# types
syms = pygram.python_symbols
FileContent = str
Encoding = str
Depth = int
NodeType = int
LeafID = int
Priority = int
LN = Union[Leaf, Node]
out = partial(click.secho, bold=True, err=True)
err = partial(click.secho, fg='red', err=True)
class NothingChanged(UserWarning):
"""Raised by `format_file` when the reformatted code is the same as source."""
class CannotSplit(Exception):
"""A readable split that fits the allotted line length is impossible.
Raised by `left_hand_split()` and `right_hand_split()`.
"""
@click.command()
@click.option(
'-l',
'--line-length',
type=int,
default=DEFAULT_LINE_LENGTH,
help='How many character per line to allow.',
show_default=True,
)
@click.option(
'--fast/--safe',
is_flag=True,
help='If --fast given, skip temporary sanity checks. [default: --safe]',
)
@click.version_option(version=__version__)
@click.argument(
'src',
nargs=-1,
type=click.Path(exists=True, file_okay=True, dir_okay=True, readable=True),
)
@click.pass_context
def main(ctx: click.Context, line_length: int, fast: bool, src: List[str]) -> None:
"""The uncompromising code formatter."""
sources: List[Path] = []
for s in src:
p = Path(s)
if p.is_dir():
sources.extend(gen_python_files_in_dir(p))
elif p.is_file():
# if a file was explicitly given, we don't care about its extension
sources.append(p)
else:
err(f'invalid path: {s}')
if len(sources) == 0:
ctx.exit(0)
elif len(sources) == 1:
p = sources[0]
report = Report()
try:
changed = format_file_in_place(p, line_length=line_length, fast=fast)
report.done(p, changed)
except Exception as exc:
report.failed(p, str(exc))
ctx.exit(report.return_code)
else:
loop = asyncio.get_event_loop()
executor = ProcessPoolExecutor(max_workers=os.cpu_count())
return_code = 1
try:
return_code = loop.run_until_complete(
schedule_formatting(sources, line_length, fast, loop, executor)
)
finally:
loop.close()
ctx.exit(return_code)
async def schedule_formatting(
sources: List[Path],
line_length: int,
fast: bool,
loop: BaseEventLoop,
executor: Executor,
) -> int:
tasks = {
src: loop.run_in_executor(
executor, format_file_in_place, src, line_length, fast
)
for src in sources
}
await asyncio.wait(tasks.values())
cancelled = []
report = Report()
for src, task in tasks.items():
if not task.done():
report.failed(src, 'timed out, cancelling')
task.cancel()
cancelled.append(task)
elif task.exception():
report.failed(src, str(task.exception()))
else:
report.done(src, task.result())
if cancelled:
await asyncio.wait(cancelled, timeout=2)
out('All done! ✨ 🍰 ✨')
click.echo(str(report))
return report.return_code
def format_file_in_place(src: Path, line_length: int, fast: bool) -> bool:
"""Format the file and rewrite if changed. Return True if changed."""
try:
contents, encoding = format_file(src, line_length=line_length, fast=fast)
except NothingChanged:
return False
with open(src, "w", encoding=encoding) as f:
f.write(contents)
return True
def format_file(
src: Path, line_length: int, fast: bool
) -> Tuple[FileContent, Encoding]:
"""Reformats a file and returns its contents and encoding."""
with tokenize.open(src) as src_buffer:
src_contents = src_buffer.read()
if src_contents.strip() == '':
raise NothingChanged(src)
dst_contents = format_str(src_contents, line_length=line_length)
if src_contents == dst_contents:
raise NothingChanged(src)
if not fast:
assert_equivalent(src_contents, dst_contents)
assert_stable(src_contents, dst_contents, line_length=line_length)
return dst_contents, src_buffer.encoding
def format_str(src_contents: str, line_length: int) -> FileContent:
"""Reformats a string and returns new contents."""
src_node = lib2to3_parse(src_contents)
dst_contents = ""
comments: List[Line] = []
lines = LineGenerator()
elt = EmptyLineTracker()
empty_line = Line()
after = 0
for current_line in lines.visit(src_node):
for _ in range(after):
dst_contents += str(empty_line)
before, after = elt.maybe_empty_lines(current_line)
for _ in range(before):
dst_contents += str(empty_line)
if not current_line.is_comment:
for comment in comments:
dst_contents += str(comment)
comments = []
for line in split_line(current_line, line_length=line_length):
dst_contents += str(line)
else:
comments.append(current_line)
for comment in comments:
dst_contents += str(comment)
return dst_contents
def lib2to3_parse(src_txt: str) -> Node:
"""Given a string with source, return the lib2to3 Node."""
grammar = pygram.python_grammar_no_print_statement
drv = driver.Driver(grammar, pytree.convert)
if src_txt[-1] != '\n':
nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n'
src_txt += nl
try:
result = drv.parse_string(src_txt, True)
except ParseError as pe:
lineno, column = pe.context[1]
lines = src_txt.splitlines()
try:
faulty_line = lines[lineno - 1]
except IndexError:
faulty_line = "<line number missing in source>"
raise ValueError(f"Cannot parse: {lineno}:{column}: {faulty_line}") from None
if isinstance(result, Leaf):
result = Node(syms.file_input, [result])
return result
def lib2to3_unparse(node: Node) -> str:
"""Given a lib2to3 node, return its string representation."""
code = str(node)
return code
T = TypeVar('T')
class Visitor(Generic[T]):
"""Basic lib2to3 visitor that yields things on visiting."""
def visit(self, node: LN) -> Iterator[T]:
if node.type < 256:
name = token.tok_name[node.type]
else:
name = type_repr(node.type)
yield from getattr(self, f'visit_{name}', self.visit_default)(node)
def visit_default(self, node: LN) -> Iterator[T]:
if isinstance(node, Node):
for child in node.children:
yield from self.visit(child)
@dataclass
class DebugVisitor(Visitor[T]):
tree_depth: int = attrib(default=0)
def visit_default(self, node: LN) -> Iterator[T]:
indent = ' ' * (2 * self.tree_depth)
if isinstance(node, Node):
_type = type_repr(node.type)
out(f'{indent}{_type}', fg='yellow')
self.tree_depth += 1
for child in node.children:
yield from self.visit(child)
self.tree_depth -= 1
out(f'{indent}/{_type}', fg='yellow', bold=False)
else:
_type = token.tok_name.get(node.type, str(node.type))
out(f'{indent}{_type}', fg='blue', nl=False)
if node.prefix:
# We don't have to handle prefixes for `Node` objects since
# that delegates to the first child anyway.
out(f' {node.prefix!r}', fg='green', bold=False, nl=False)
out(f' {node.value!r}', fg='blue', bold=False)
KEYWORDS = set(keyword.kwlist)
WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE}
FLOW_CONTROL = {'return', 'raise', 'break', 'continue'}
STATEMENT = {
syms.if_stmt,
syms.while_stmt,
syms.for_stmt,
syms.try_stmt,
syms.except_clause,
syms.with_stmt,
syms.funcdef,
syms.classdef,
}
STANDALONE_COMMENT = 153
LOGIC_OPERATORS = {'and', 'or'}
COMPARATORS = {
token.LESS,
token.GREATER,
token.EQEQUAL,
token.NOTEQUAL,
token.LESSEQUAL,
token.GREATEREQUAL,
}
MATH_OPERATORS = {
token.PLUS,
token.MINUS,
token.STAR,
token.SLASH,
token.VBAR,
token.AMPER,
token.PERCENT,
token.CIRCUMFLEX,
token.LEFTSHIFT,
token.RIGHTSHIFT,
token.DOUBLESTAR,
token.DOUBLESLASH,
}
COMPREHENSION_PRIORITY = 20
COMMA_PRIORITY = 10
LOGIC_PRIORITY = 5
STRING_PRIORITY = 4
COMPARATOR_PRIORITY = 3
MATH_PRIORITY = 1
@dataclass
class BracketTracker:
depth: int = attrib(default=0)
bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = attrib(default=Factory(dict))
delimiters: Dict[LeafID, Priority] = attrib(default=Factory(dict))
previous: Optional[Leaf] = attrib(default=None)
def mark(self, leaf: Leaf) -> None:
if leaf.type == token.COMMENT:
return
if leaf.type in CLOSING_BRACKETS:
self.depth -= 1
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
leaf.opening_bracket = opening_bracket # type: ignore
leaf.bracket_depth = self.depth # type: ignore
if self.depth == 0:
delim = is_delimiter(leaf)
if delim:
self.delimiters[id(leaf)] = delim
elif self.previous is not None:
if leaf.type == token.STRING and self.previous.type == token.STRING:
self.delimiters[id(self.previous)] = STRING_PRIORITY
elif (
leaf.type == token.NAME and
leaf.value == 'for' and
leaf.parent and
leaf.parent.type in {syms.comp_for, syms.old_comp_for}
):
self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
elif (
leaf.type == token.NAME and
leaf.value == 'if' and
leaf.parent and
leaf.parent.type in {syms.comp_if, syms.old_comp_if}
):
self.delimiters[id(self.previous)] = COMPREHENSION_PRIORITY
if leaf.type in OPENING_BRACKETS:
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
self.depth += 1
self.previous = leaf
def any_open_brackets(self) -> bool:
"""Returns True if there is an yet unmatched open bracket on the line."""
return bool(self.bracket_match)
def max_priority(self, exclude: Iterable[LeafID] = ()) -> int:
"""Returns the highest priority of a delimiter found on the line.
Values are consistent with what `is_delimiter()` returns.
"""
return max(v for k, v in self.delimiters.items() if k not in exclude)
@dataclass
class Line:
depth: int = attrib(default=0)
leaves: List[Leaf] = attrib(default=Factory(list))
comments: Dict[LeafID, Leaf] = attrib(default=Factory(dict))
bracket_tracker: BracketTracker = attrib(default=Factory(BracketTracker))
inside_brackets: bool = attrib(default=False)
def append(self, leaf: Leaf, preformatted: bool = False) -> None:
has_value = leaf.value.strip()
if not has_value:
return
if self.leaves and not preformatted:
# Note: at this point leaf.prefix should be empty except for
# imports, for which we only preserve newlines.
leaf.prefix += whitespace(leaf)
if self.inside_brackets or not preformatted:
self.bracket_tracker.mark(leaf)
self.maybe_remove_trailing_comma(leaf)
if self.maybe_adapt_standalone_comment(leaf):
return
if not self.append_comment(leaf):
self.leaves.append(leaf)
@property
def is_comment(self) -> bool:
return bool(self) and self.leaves[0].type == STANDALONE_COMMENT
@property
def is_decorator(self) -> bool:
return bool(self) and self.leaves[0].type == token.AT
@property
def is_import(self) -> bool:
return bool(self) and is_import(self.leaves[0])
@property
def is_class(self) -> bool:
return (
bool(self) and
self.leaves[0].type == token.NAME and
self.leaves[0].value == 'class'
)
@property
def is_def(self) -> bool:
"""Also returns True for async defs."""
try:
first_leaf = self.leaves[0]
except IndexError:
return False
try:
second_leaf: Optional[Leaf] = self.leaves[1]
except IndexError:
second_leaf = None
return (
(first_leaf.type == token.NAME and first_leaf.value == 'def') or
(
first_leaf.type == token.NAME and
first_leaf.value == 'async' and
second_leaf is not None and
second_leaf.type == token.NAME and
second_leaf.value == 'def'
)
)
@property
def is_flow_control(self) -> bool:
return (
bool(self) and
self.leaves[0].type == token.NAME and
self.leaves[0].value in FLOW_CONTROL
)
@property
def is_yield(self) -> bool:
return (
bool(self) and
self.leaves[0].type == token.NAME and
self.leaves[0].value == 'yield'
)
def maybe_remove_trailing_comma(self, closing: Leaf) -> bool:
if not (
self.leaves and
self.leaves[-1].type == token.COMMA and
closing.type in CLOSING_BRACKETS
):
return False
if closing.type == token.RSQB or closing.type == token.RBRACE:
self.leaves.pop()
return True
# For parens let's check if it's safe to remove the comma. If the
# trailing one is the only one, we might mistakenly change a tuple
# into a different type by removing the comma.
depth = closing.bracket_depth + 1 # type: ignore
commas = 0
opening = closing.opening_bracket # type: ignore
for _opening_index, leaf in enumerate(self.leaves):
if leaf is opening:
break
else:
return False
for leaf in self.leaves[_opening_index + 1:]:
if leaf is closing:
break
bracket_depth = leaf.bracket_depth # type: ignore
if bracket_depth == depth and leaf.type == token.COMMA:
commas += 1
if commas > 1:
self.leaves.pop()
return True
return False
def maybe_adapt_standalone_comment(self, comment: Leaf) -> bool:
"""Hack a standalone comment to act as a trailing comment for line splitting.
If this line has brackets and a standalone `comment`, we need to adapt
it to be able to still reformat the line.
This is not perfect, the line to which the standalone comment gets
appended will appear "too long" when splitting.
"""
if not (
comment.type == STANDALONE_COMMENT and
self.bracket_tracker.any_open_brackets()
):
return False
comment.type = token.COMMENT
comment.prefix = '\n' + ' ' * (self.depth + 1)
return self.append_comment(comment)
def append_comment(self, comment: Leaf) -> bool:
if comment.type != token.COMMENT:
return False
try:
after = id(self.last_non_delimiter())
except LookupError:
comment.type = STANDALONE_COMMENT
comment.prefix = ''
return False
else:
if after in self.comments:
self.comments[after].value += str(comment)
else:
self.comments[after] = comment
return True
def last_non_delimiter(self) -> Leaf:
for i in range(len(self.leaves)):
last = self.leaves[-i - 1]
if not is_delimiter(last):
return last
raise LookupError("No non-delimiters found")
def __str__(self) -> str:
if not self:
return '\n'
indent = ' ' * self.depth
leaves = iter(self.leaves)
first = next(leaves)
res = f'{first.prefix}{indent}{first.value}'
for leaf in leaves:
res += str(leaf)
for comment in self.comments.values():
res += str(comment)
return res + '\n'
def __bool__(self) -> bool:
return bool(self.leaves or self.comments)
@dataclass
class EmptyLineTracker:
"""Provides a stateful method that returns the number of potential extra
empty lines needed before and after the currently processed line.
Note: this tracker works on lines that haven't been split yet.
"""
previous_line: Optional[Line] = attrib(default=None)
previous_after: int = attrib(default=0)
previous_defs: List[int] = attrib(default=Factory(list))
def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
"""Returns the number of extra empty lines before and after the `current_line`.
This is for separating `def`, `async def` and `class` with extra empty lines
(two on module-level), as well as providing an extra empty line after flow
control keywords to make them more prominent.
"""
before, after = self._maybe_empty_lines(current_line)
self.previous_after = after
self.previous_line = current_line
return before, after
def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]:
before = 0
depth = current_line.depth
while self.previous_defs and self.previous_defs[-1] >= depth:
self.previous_defs.pop()
before = (1 if depth else 2) - self.previous_after
is_decorator = current_line.is_decorator
if is_decorator or current_line.is_def or current_line.is_class:
if not is_decorator:
self.previous_defs.append(depth)
if self.previous_line is None:
# Don't insert empty lines before the first line in the file.
return 0, 0
if self.previous_line and self.previous_line.is_decorator:
# Don't insert empty lines between decorators.
return 0, 0
newlines = 2
if current_line.depth:
newlines -= 1
newlines -= self.previous_after
return newlines, 0
if current_line.is_flow_control:
return before, 1
if (
self.previous_line and
self.previous_line.is_import and
not current_line.is_import and
depth == self.previous_line.depth
):
return (before or 1), 0
if (
self.previous_line and
self.previous_line.is_yield and
(not current_line.is_yield or depth != self.previous_line.depth)
):
return (before or 1), 0
return before, 0
@dataclass
class LineGenerator(Visitor[Line]):
"""Generates reformatted Line objects. Empty lines are not emitted.
Note: destroys the tree it's visiting by mutating prefixes of its leaves
in ways that will no longer stringify to valid Python code on the tree.
"""
current_line: Line = attrib(default=Factory(Line))
standalone_comments: List[Leaf] = attrib(default=Factory(list))
def line(self, indent: int = 0) -> Iterator[Line]:
"""Generate a line.
If the line is empty, only emit if it makes sense.
If the line is too long, split it first and then generate.
If any lines were generated, set up a new current_line.
"""
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
complete_line = self.current_line
self.current_line = Line(depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
if isinstance(node, Leaf):
for comment in generate_comments(node):
if self.current_line.bracket_tracker.any_open_brackets():
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment, to be processed later (see
# docstring in `generate_comments()`
self.standalone_comments.append(comment)
normalize_prefix(node)
if node.type not in WHITESPACE:
for comment in self.standalone_comments:
yield from self.line()
self.current_line.append(comment)
yield from self.line()
self.standalone_comments = []
self.current_line.append(node)
yield from super().visit_default(node)
def visit_suite(self, node: Node) -> Iterator[Line]:
"""Body of a statement after a colon."""
children = iter(node.children)
# Process newline before indenting. It might contain an inline
# comment that should go right after the colon.
newline = next(children)
yield from self.visit(newline)
yield from self.line(+1)
for child in children:
yield from self.visit(child)
yield from self.line(-1)
def visit_stmt(self, node: Node, keywords: Set[str]) -> Iterator[Line]:
"""Visit a statement.
The relevant Python language keywords for this statement are NAME leaves
within it.
"""
for child in node.children:
if child.type == token.NAME and child.value in keywords: # type: ignore
yield from self.line()
yield from self.visit(child)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
"""A statement without nested statements."""
is_suite_like = node.parent and node.parent.type in STATEMENT
if is_suite_like:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.NAME and child.value == 'async': # type: ignore
break
internal_stmt = next(children)
for child in internal_stmt.children:
yield from self.visit(child)
def visit_decorators(self, node: Node) -> Iterator[Line]:
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
yield from self.visit_default(leaf)
yield from self.line()
def __attrs_post_init__(self) -> None:
"""You are in a twisty little maze of passages."""
v = self.visit_stmt
self.visit_if_stmt = partial(v, keywords={'if', 'else', 'elif'})
self.visit_while_stmt = partial(v, keywords={'while', 'else'})
self.visit_for_stmt = partial(v, keywords={'for', 'else'})
self.visit_try_stmt = partial(v, keywords={'try', 'except', 'else', 'finally'})
self.visit_except_clause = partial(v, keywords={'except'})
self.visit_funcdef = partial(v, keywords={'def'})
self.visit_with_stmt = partial(v, keywords={'with'})
self.visit_classdef = partial(v, keywords={'class'})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE}
OPENING_BRACKETS = set(BRACKET.keys())
CLOSING_BRACKETS = set(BRACKET.values())
BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS
def whitespace(leaf: Leaf) -> str:
"""Return whitespace prefix if needed for the given `leaf`."""
NO = ''
SPACE = ' '
DOUBLESPACE = ' '
t = leaf.type
p = leaf.parent
if t == token.COLON:
return NO
if t == token.COMMA:
return NO
if t == token.RPAR:
return NO
if t == token.COMMENT:
return DOUBLESPACE
if t == STANDALONE_COMMENT:
return NO
assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
if p.type in {syms.parameters, syms.arglist}:
# untyped function signatures or calls
if t == token.RPAR:
return NO
prev = leaf.prev_sibling
if not prev or prev.type != token.COMMA:
return NO
if p.type == syms.varargslist:
# lambdas
if t == token.RPAR:
return NO
prev = leaf.prev_sibling
if prev and prev.type != token.COMMA:
return NO
elif p.type == syms.typedargslist:
# typed function signatures
prev = leaf.prev_sibling
if not prev:
return NO
if t == token.EQUAL:
if prev.type != syms.tname:
return NO
elif prev.type == token.EQUAL:
# A bit hacky: if the equal sign has whitespace, it means we
# previously found it's a typed argument. So, we're using that, too.
return prev.prefix
elif prev.type != token.COMMA:
return NO
elif p.type == syms.tname:
# type names
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type != token.COMMA:
return NO
elif p.type == syms.trailer:
# attributes and calls
if t == token.LPAR or t == token.RPAR:
return NO
prev = leaf.prev_sibling
if not prev:
if t == token.DOT:
prevp = preceding_leaf(p)
if not prevp or prevp.type != token.NUMBER:
return NO
elif t == token.LSQB:
return NO
elif prev.type != token.COMMA:
return NO
elif p.type == syms.argument:
# single argument
if t == token.EQUAL:
return NO
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type == token.LPAR:
return NO
elif prev.type == token.EQUAL or prev.type == token.DOUBLESTAR:
return NO
elif p.type == syms.decorator:
# decorators
return NO
elif p.type == syms.dotted_name:
prev = leaf.prev_sibling
if prev:
return NO
prevp = preceding_leaf(p)
if not prevp or prevp.type == token.AT:
return NO
elif p.type == syms.classdef:
if t == token.LPAR:
return NO
prev = leaf.prev_sibling
if prev and prev.type == token.LPAR:
return NO
elif p.type == syms.subscript:
# indexing
if t == token.COLON:
return NO
prev = leaf.prev_sibling
if not prev or prev.type == token.COLON:
return NO
elif p.type in {
syms.test,
syms.not_test,
syms.xor_expr,
syms.or_test,
syms.and_test,
syms.arith_expr,
syms.shift_expr,
syms.yield_expr,
syms.term,
syms.power,
syms.comparison,
}:
# various arithmetic and logic expressions
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
if prevp.type == token.EQUAL:
if prevp.parent and prevp.parent.type in {
syms.varargslist, syms.parameters, syms.arglist, syms.argument
}:
return NO
return SPACE
elif p.type == syms.atom:
if t in CLOSING_BRACKETS:
return NO
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp:
return NO
if prevp.type in OPENING_BRACKETS:
return NO
if prevp.type == token.EQUAL:
if prevp.parent and prevp.parent.type in {
syms.varargslist, syms.parameters, syms.arglist, syms.argument
}:
return NO
if prevp.type == token.DOUBLESTAR:
if prevp.parent and prevp.parent.type in {
syms.varargslist, syms.parameters, syms.arglist, syms.dictsetmaker
}:
return NO
elif prev.type in OPENING_BRACKETS:
return NO
elif t == token.DOT:
# dots, but not the first one.
return NO
elif (
p.type == syms.listmaker or
p.type == syms.testlist_gexp or
p.type == syms.subscriptlist
):
# list interior, including unpacking
prev = leaf.prev_sibling
if not prev:
return NO
elif p.type == syms.dictsetmaker:
# dict and set interior, including unpacking
prev = leaf.prev_sibling
if not prev:
return NO
if prev.type == token.DOUBLESTAR:
return NO
elif p.type == syms.factor or p.type == syms.star_expr:
# unary ops
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
prevp_parent = prevp.parent
assert prevp_parent is not None
if prevp.type == token.COLON and prevp_parent.type in {
syms.subscript, syms.sliceop
}:
return NO
elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
return NO
elif t == token.NAME or t == token.NUMBER:
return NO
elif p.type == syms.import_from and t == token.NAME:
prev = leaf.prev_sibling
if prev and prev.type == token.DOT:
return NO
elif p.type == syms.sliceop:
return NO
return SPACE
def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]:
"""Returns the first leaf that precedes `node`, if any."""
while node:
res = node.prev_sibling
if res:
if isinstance(res, Leaf):
return res
try:
return list(res.leaves())[-1]
except IndexError:
return None
node = node.parent
return None
def is_delimiter(leaf: Leaf) -> int:
"""Returns the priority of the `leaf` delimiter. Returns 0 if not delimiter.
Higher numbers are higher priority.
"""
if leaf.type == token.COMMA:
return COMMA_PRIORITY
if leaf.type == token.NAME and leaf.value in LOGIC_OPERATORS:
return LOGIC_PRIORITY
if leaf.type in COMPARATORS:
return COMPARATOR_PRIORITY
if (
leaf.type in MATH_OPERATORS and
leaf.parent and
leaf.parent.type not in {syms.factor, syms.star_expr}
):
return MATH_PRIORITY
return 0
def generate_comments(leaf: Leaf) -> Iterator[Leaf]:
"""Cleans the prefix of the `leaf` and generates comments from it, if any.
Comments in lib2to3 are shoved into the whitespace prefix. This happens
in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
move because it does away with modifying the grammar to include all the
possible places in which comments can be placed.
The sad consequence for us though is that comments don't "belong" anywhere.
This is why this function generates simple parentless Leaf objects for
comments. We simply don't know what the correct parent should be.
No matter though, we can live without this. We really only need to
differentiate between inline and standalone comments. The latter don't
share the line with any code.
Inline comments are emitted as regular token.COMMENT leaves. Standalone
are emitted with a fake STANDALONE_COMMENT token identifier.
"""
if not leaf.prefix:
return
if '#' not in leaf.prefix:
return
before_comment, content = leaf.prefix.split('#', 1)
content = content.rstrip()
if content and (content[0] not in {' ', '!', '#'}):
content = ' ' + content
is_standalone_comment = (
'\n' in before_comment or '\n' in content or leaf.type == token.DEDENT
)
if not is_standalone_comment:
# simple trailing comment
yield Leaf(token.COMMENT, value='#' + content)
return
for line in ('#' + content).split('\n'):
line = line.lstrip()
if not line.startswith('#'):
continue
yield Leaf(STANDALONE_COMMENT, line)
def split_line(line: Line, line_length: int, inner: bool = False) -> Iterator[Line]:
"""Splits a `line` into potentially many lines.
They should fit in the allotted `line_length` but might not be able to.
`inner` signifies that there were a pair of brackets somewhere around the
current `line`, possibly transitively. This means we can fallback to splitting
by delimiters if the LHS/RHS don't yield any results.
"""
line_str = str(line).strip('\n')
if len(line_str) <= line_length and '\n' not in line_str:
yield line
return
if line.is_def:
split_funcs = [left_hand_split]
elif line.inside_brackets:
split_funcs = [delimiter_split]
if '\n' not in line_str:
# Only attempt RHS if we don't have multiline strings or comments
# on this line.
split_funcs.append(right_hand_split)
else:
split_funcs = [right_hand_split]
for split_func in split_funcs:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
result: List[Line] = []
try:
for l in split_func(line):
if str(l).strip('\n') == line_str:
raise CannotSplit("Split function returned an unchanged result")
result.extend(split_line(l, line_length=line_length, inner=True))
except CannotSplit as cs:
continue
else:
yield from result
break
else:
yield line
def left_hand_split(line: Line) -> Iterator[Line]:
"""Split line into many lines, starting with the first matching bracket pair.
Note: this usually looks weird, only use this for function definitions.
Prefer RHS otherwise.
"""
head = Line(depth=line.depth)
body = Line(depth=line.depth + 1, inside_brackets=True)
tail = Line(depth=line.depth)
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = head_leaves
matching_bracket = None
for leaf in line.leaves:
if (
current_leaves is body_leaves and
leaf.type in CLOSING_BRACKETS and
leaf.opening_bracket is matching_bracket # type: ignore
):
current_leaves = tail_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type in OPENING_BRACKETS:
matching_bracket = leaf
current_leaves = body_leaves
# Since body is a new indent level, remove spurious leading whitespace.
if body_leaves:
normalize_prefix(body_leaves[0])
# Build the new lines.
for result, leaves in (
(head, head_leaves), (body, body_leaves), (tail, tail_leaves)
):
for leaf in leaves:
result.append(leaf, preformatted=True)
comment_after = line.comments.get(id(leaf))
if comment_after:
result.append(comment_after, preformatted=True)
# Check if the split succeeded.
tail_len = len(str(tail))
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save "
f"{tail_len} characters is not worth it"
)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(line: Line) -> Iterator[Line]:
"""Split line into many lines, starting with the last matching bracket pair."""
head = Line(depth=line.depth)
body = Line(depth=line.depth + 1, inside_brackets=True)
tail = Line(depth=line.depth)
tail_leaves: List[Leaf] = []
body_leaves: List[Leaf] = []
head_leaves: List[Leaf] = []
current_leaves = tail_leaves
opening_bracket = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS:
opening_bracket = leaf.opening_bracket # type: ignore
current_leaves = body_leaves
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
# Since body is a new indent level, remove spurious leading whitespace.
if body_leaves:
normalize_prefix(body_leaves[0])
# Build the new lines.
for result, leaves in (
(head, head_leaves), (body, body_leaves), (tail, tail_leaves)
):
for leaf in leaves:
result.append(leaf, preformatted=True)
comment_after = line.comments.get(id(leaf))
if comment_after:
result.append(comment_after, preformatted=True)
# Check if the split succeeded.
tail_len = len(str(tail).strip('\n'))
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save "
f"{tail_len} characters is not worth it"
)
for result in (head, body, tail):
if result:
yield result
def delimiter_split(line: Line) -> Iterator[Line]:
"""Split according to delimiters of the highest priority.
This kind of split doesn't increase indentation.
"""
try:
last_leaf = line.leaves[-1]
except IndexError:
raise CannotSplit("Line empty")
delimiters = line.bracket_tracker.delimiters
try:
delimiter_priority = line.bracket_tracker.max_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found")
current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
for leaf in line.leaves:
current_line.append(leaf, preformatted=True)
comment_after = line.comments.get(id(leaf))
if comment_after:
current_line.append(comment_after, preformatted=True)
leaf_priority = delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
normalize_prefix(current_line.leaves[0])
yield current_line
current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets)
if current_line:
if (
delimiter_priority == COMMA_PRIORITY and
current_line.leaves[-1].type != token.COMMA
):
current_line.append(Leaf(token.COMMA, ','))
normalize_prefix(current_line.leaves[0])
yield current_line
def is_import(leaf: Leaf) -> bool:
"""Returns True if the given leaf starts an import statement."""
p = leaf.parent
t = leaf.type
v = leaf.value
return bool(
t == token.NAME and
(
(v == 'import' and p and p.type == syms.import_name) or
(v == 'from' and p and p.type == syms.import_from)
)
)
def normalize_prefix(leaf: Leaf) -> None:
"""Leave existing extra newlines for imports. Remove everything else."""
if is_import(leaf):
spl = leaf.prefix.split('#', 1)
nl_count = spl[0].count('\n')
if len(spl) > 1:
# Skip one newline since it was for a standalone comment.
nl_count -= 1
leaf.prefix = '\n' * nl_count
return
leaf.prefix = ''
PYTHON_EXTENSIONS = {'.py'}
BLACKLISTED_DIRECTORIES = {
'build', 'buck-out', 'dist', '_build', '.git', '.hg', '.mypy_cache', '.tox', '.venv'
}
def gen_python_files_in_dir(path: Path) -> Iterator[Path]:
for child in path.iterdir():
if child.is_dir():
if child.name in BLACKLISTED_DIRECTORIES:
continue
yield from gen_python_files_in_dir(child)
elif child.suffix in PYTHON_EXTENSIONS:
yield child
@dataclass
class Report:
"""Provides a reformatting counter."""
change_count: int = attrib(default=0)
same_count: int = attrib(default=0)
failure_count: int = attrib(default=0)
def done(self, src: Path, changed: bool) -> None:
"""Increment the counter for successful reformatting. Write out a message."""
if changed:
out(f'reformatted {src}')
self.change_count += 1
else:
out(f'{src} already well formatted, good job.', bold=False)
self.same_count += 1
def failed(self, src: Path, message: str) -> None:
"""Increment the counter for failed reformatting. Write out a message."""
err(f'error: cannot format {src}: {message}')
self.failure_count += 1
@property
def return_code(self) -> int:
"""Which return code should the app use considering the current state."""
return 1 if self.failure_count else 0
def __str__(self) -> str:
"""A color report of the current state.
Use `click.unstyle` to remove colors.
"""
report = []
if self.change_count:
s = 's' if self.change_count > 1 else ''
report.append(
click.style(f'{self.change_count} file{s} reformatted', bold=True)
)
if self.same_count:
s = 's' if self.same_count > 1 else ''
report.append(f'{self.same_count} file{s} left unchanged')
if self.failure_count:
s = 's' if self.failure_count > 1 else ''
report.append(
click.style(
f'{self.failure_count} file{s} failed to reformat', fg='red'
)
)
return ', '.join(report) + '.'
def assert_equivalent(src: str, dst: str) -> None:
"""Raises AssertionError if `src` and `dst` aren't equivalent.
This is a temporary sanity check until Black becomes stable.
"""
import ast
import traceback
def _v(node: ast.AST, depth: int = 0) -> Iterator[str]:
"""Simple visitor generating strings to compare ASTs by content."""
yield f"{' ' * depth}{node.__class__.__name__}("
for field in sorted(node._fields):
try:
value = getattr(node, field)
except AttributeError:
continue
yield f"{' ' * (depth+1)}{field}="
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from _v(item, depth + 2)
elif isinstance(value, ast.AST):
yield from _v(value, depth + 2)
else:
yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}"
yield f"{' ' * depth}) # /{node.__class__.__name__}"
try:
src_ast = ast.parse(src)
except Exception as exc:
raise AssertionError(f"cannot parse source: {exc}") from None
try:
dst_ast = ast.parse(dst)
except Exception as exc:
log = dump_to_file(''.join(traceback.format_tb(exc.__traceback__)), dst)
raise AssertionError(
f"INTERNAL ERROR: Black produced invalid code: {exc}. "
f"Please report a bug on https://github.com/ambv/black/issues. "
f"This invalid output might be helpful: {log}",
) from None
src_ast_str = '\n'.join(_v(src_ast))
dst_ast_str = '\n'.join(_v(dst_ast))
if src_ast_str != dst_ast_str:
log = dump_to_file(diff(src_ast_str, dst_ast_str, 'src', 'dst'))
raise AssertionError(
f"INTERNAL ERROR: Black produced code that is not equivalent to "
f"the source. "
f"Please report a bug on https://github.com/ambv/black/issues. "
f"This diff might be helpful: {log}",
) from None
def assert_stable(src: str, dst: str, line_length: int) -> None:
"""Raises AssertionError if `dst` reformats differently the second time.
This is a temporary sanity check until Black becomes stable.
"""
newdst = format_str(dst, line_length=line_length)
if dst != newdst:
log = dump_to_file(
diff(src, dst, 'source', 'first pass'),
diff(dst, newdst, 'first pass', 'second pass'),
)
raise AssertionError(
f"INTERNAL ERROR: Black produced different code on the second pass "
f"of the formatter. "
f"Please report a bug on https://github.com/ambv/black/issues. "
f"This diff might be helpful: {log}",
) from None
def dump_to_file(*output: str) -> str:
"""Dumps `output` to a temporary file. Returns path to the file."""
import tempfile
with tempfile.NamedTemporaryFile(
mode='w', prefix='blk_', suffix='.log', delete=False
) as f:
for lines in output:
f.write(lines)
f.write('\n')
return f.name
def diff(a: str, b: str, a_name: str, b_name: str) -> str:
"""Returns a udiff string between strings `a` and `b`."""
import difflib
a_lines = [line + '\n' for line in a.split('\n')]
b_lines = [line + '\n' for line in b.split('\n')]
return ''.join(
difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5)
)
if __name__ == '__main__':
main()
| [
"lukasz@langa.pl"
] | lukasz@langa.pl |
f349f95dd59e3f2da5fac2f55430ae681541b0d6 | 70f52a9fb99c992c9ee092a6f6be516ec210c7e3 | /app/projects/service.py | b7bff8218065f55559a22898eb6d6fc8dbb20851 | [] | no_license | LilaKIM/arborator-backend | c92634165b52af8af70928ad3662dd399248e864 | ac900ee8df0a4a9d792f12401d7e64e06e2e3d69 | refs/heads/main | 2023-05-30T20:43:40.676374 | 2021-06-21T13:54:27 | 2021-06-21T13:54:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,547 | py | from typing import Dict, List
import json
import base64
from app import db
from flask import abort, current_app
from flask_login import current_user
from .interface import ProjectExtendedInterface, ProjectInterface
from .model import Project, ProjectAccess, ProjectFeature, ProjectMetaFeature, DefaultUserTrees
from ..samples.model import SampleRole
class ProjectService:
@staticmethod
def get_all() -> List[Project]:
return Project.query.all()
@staticmethod
def create(new_attrs: ProjectInterface) -> Project:
new_project = Project(**new_attrs)
db.session.add(new_project)
db.session.commit()
return new_project
@staticmethod
def get_by_name(project_name: str) -> Project:
return Project.query.filter(Project.project_name == project_name).first()
@staticmethod
def update(project: Project, changes) -> Project:
project.update(changes)
db.session.commit()
return project
@staticmethod
def delete_by_name(project_name: str) -> str:
project = Project.query.filter(
Project.project_name == project_name).first()
if not project:
return ""
db.session.delete(project)
db.session.commit()
return project_name
@staticmethod
def change_image(project_name, value):
""" set a project image (blob base64) and return the new project """
project = Project.query.filter(
Project.project_name == project_name).first()
project.image = value
db.session.commit()
return project
@staticmethod
def check_if_project_exist(project: Project) -> None:
if not project:
message = "There was no such project stored on arborator backend"
abort(404, {"message": message})
# @staticmethod
# def get_settings_infos(project_name, current_user):
# """ get project informations without any samples """
# project = Project.query.filter(Project.project_name == project_name).first()
# if not current_user.is_authenticated: # TODO : handle anonymous user
# roles = []
# else:
# roles = set(SampleRole.query.filter_by(project_id = project.id, user_id = current_user.id).all())
# # if not roles and project.is_private: return 403 # removed for now -> the check is done in view and for each actions
# admins = [a.user_id for a in ProjectAccess.query.filter_by(project_id=project.id, access_level=2).all()]
# guests = [g.user_id for g in ProjectAccess.query.filter_by(project_id=project.id, access_level=1).all()]
# # config from arborator
# features = ProjectFeature.query.filter_by(project_id=project.id).all()
# shown_features = [f.value for f in features] if features else []
# mfs = ProjectMetaFeature.query.filter_by(project_id=project.id)
# shown_metafeatures = [mf.value for mf in mfs] if mfs else []
# # config from grew
# reply = grew_request("getProjectConfig", current_app, data={"project_id": project_name})
# if reply["status"] != "OK":
# abort(400)
# annotationFeatures = reply["data"]
# if annotationFeatures is None:
# print("This project does not have a configuration stored on grew")
# config = {
# "shownfeatures": shown_features,
# "shownmeta": shown_metafeatures,
# "annotationFeatures": annotationFeatures,
# }
# # cats = [c.value for c in project_dao.find_project_cats(project.id)]
# # stocks = project_dao.find_project_stocks(project.id)
# # labels = [ {'id':s.id,'labels':[ {"id":l.id, "stock_id":l.stock_id , "value":l.value} for l in project_dao.find_stock_labels(s.id) ]} for s in stocks ]
# defaultUserTrees = [
# u.as_json() for u in DefaultUserTrees.query.filter_by(project_id=project.id).all()
# ]
# # if project.image != None:
# # image = str(base64.b64encode(project.image))
# # else:
# # image = ""
# settings_info = {
# # "name": project.project_name,
# # "visibility": project.visibility,
# # "description": project.description,
# # "image": image,
# "config": config,
# # "admins": admins,
# # "guests": guests,
# # "show_all_trees": project.show_all_trees,
# # "exercise_mode": project.exercise_mode,
# # "default_user_trees": defaultUserTrees,
# }
# return settings_info
class ProjectAccessService:
@staticmethod
def create(new_attrs) -> ProjectAccess:
new_project_access = ProjectAccess(**new_attrs)
db.session.add(new_project_access)
db.session.commit()
return new_project_access
@staticmethod
def update(project_access: ProjectAccess, changes):
project_access.update(changes)
db.session.commit()
return project_access
@staticmethod
def delete(user_id: str, project_id: int):
project_access_list = ProjectAccess.query.filter_by(
user_id=user_id, project_id=project_id
).all()
if not project_access_list:
return []
for project_access in project_access_list:
db.session.delete(project_access)
db.session.commit()
return [(project_id, user_id)]
# TODO : Rename this as `get_by_username` because we are not fetching the user_id
# ... but the username
@staticmethod
def get_by_user_id(user_id: str, project_id: str) -> ProjectAccess:
return ProjectAccess.query.filter_by(
project_id=project_id, user_id=user_id
).first()
@staticmethod
def get_admins(project_id: str) -> List[str]:
project_access_list: List[ProjectAccess] = ProjectAccess.query.filter_by(
project_id=project_id, access_level=2
)
if project_access_list:
return [project_access.user_id for project_access in project_access_list]
else:
return []
@staticmethod
def get_guests(project_id: str) -> List[str]:
project_access_list: List[ProjectAccess] = ProjectAccess.query.filter_by(
project_id=project_id, access_level=1
)
if project_access_list:
return [project_access.user_id for project_access in project_access_list]
else:
return []
@staticmethod
def get_users_role(project_id: str) -> Dict[str, List[str]]:
admins = ProjectAccessService.get_admins(project_id)
guests = ProjectAccessService.get_guests(project_id)
return {
"admins": admins,
"guests": guests,
}
@staticmethod
def require_access_level(project_id, required_access_level) -> None:
access_level = 0
if current_user.is_authenticated:
if current_user.super_admin:
pass
else:
access_level = ProjectAccessService.get_by_user_id(
current_user.id, project_id
).access_level.code
if access_level >= required_access_level:
return
else:
abort(403)
class ProjectFeatureService:
@staticmethod
def create(new_attrs) -> ProjectFeature:
new_project_access = ProjectFeature(**new_attrs)
db.session.add(new_project_access)
db.session.commit()
return new_project_access
@staticmethod
def get_by_project_id(project_id: str) -> List[str]:
features = ProjectFeature.query.filter_by(project_id=project_id).all()
if features:
return [f.value for f in features]
else:
return []
@staticmethod
def delete_by_project_id(project_id: str) -> str:
"""TODO : Delete all the project features at once. This is a weird way of doing, but it's because we have a table specificaly
...dedicated for linking project shown features and project. Maybe a simple textfield in the project settings would do the job"""
features = ProjectFeature.query.filter_by(project_id=project_id).all()
for feature in features:
db.session.delete(feature)
db.session.commit()
return project_id
class ProjectMetaFeatureService:
@staticmethod
def create(new_attrs) -> ProjectMetaFeature:
new_project_access = ProjectMetaFeature(**new_attrs)
db.session.add(new_project_access)
db.session.commit()
return new_project_access
@staticmethod
def get_by_project_id(project_id: str) -> List[str]:
meta_features = ProjectMetaFeature.query.filter_by(
project_id=project_id).all()
if meta_features:
return [meta_feature.value for meta_feature in meta_features]
else:
return []
@staticmethod
def delete_by_project_id(project_id: str) -> str:
"""Delete all the project features at once. This is a weird way of doing, but it's because we have a table specificaly
...dedicated for linking project shown features and project. Maybe a simple textfield in the project settings would do the job"""
features = ProjectMetaFeature.query.filter_by(
project_id=project_id).all()
for feature in features:
db.session.delete(feature)
db.session.commit()
return project_id
| [
"kirianguiller@gmail.com"
] | kirianguiller@gmail.com |
07633b13bd1cf0f0286c52bae03096144bf0adb2 | 868cd4895a8da17a7e3e2c8da0ec9e139f8d0c30 | /keras/keras35_lstm_sequences.py | 13dc52d0ca3c1cfc6a2f3bdc6e3f021efc2c58f9 | [] | no_license | inJAJA/Study | 35d4e410df7b476a4c298664bb99ce9b09bf6296 | c2fd9a1e1f3a31cb3737cbb4891d848cc802f1d4 | refs/heads/master | 2022-12-21T11:41:15.396610 | 2020-09-20T23:51:45 | 2020-09-20T23:51:45 | 263,212,524 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py |
## LSTM_Sequence : LSTM을 2개 연결하기
from numpy import array
from keras.models import Model
from keras.layers import Dense, LSTM, Input
# 1. 데이터
x = array([[1,2,3],[2,3,4],[3,4,5],[4,5,6],
[5,6,7],[6,7,8],[7,8,9],[8,9,10],
[9,10,11],[11,12,13],
[20,30,40],[30,40,50],[40,50,60],
])
y = array([4,5,6,7,8,9,10,11,12,13,50,60,70]) # (13, ) 벡터
x_predict = array([50, 60, 70]) # (3, )
print('x.shape : ',x.shape) # (13, 3)
print('y.shape : ',y.shape) # (13, ) != (13, 1)
# 벡터 행렬
# x = x.reshape(13, 3, 1)
x = x.reshape(x.shape[0], x.shape[1], 1) # x.shape[0] = 13 / x.shape[1] = 3 / data 1개씩 작업 하겠다.
print(x.shape) # (13, 3, 1)
#2. 모델구성
input1 = Input(shape = (3, 1))
LSTM1 = LSTM(100, return_sequences= True)(input1)
# LSTM2 = LSTM(10)(LSTM1, return_sequences= True)(LSTM1) # return_sequences를 썼으면 무조건 LSTM사용
LSTM2 = LSTM(100)(LSTM1)
dense1 = Dense(50)(LSTM2)
dense2 = Dense(50)(dense1)
dense3 = Dense(50)(dense2)
output1 = Dense(1)(dense3)
model = Model(inputs = input1, outputs = output1)
model.summary()
'''
LSTM = ( , , ) : 3 차원
Dense = ( , ) : 2 차원
# return_sequences : 들어온 원래 차원으로 output
ex) x.shape = (13, 3, 1)
LSTM1 = LSTM( 10 )(dense1)
' 2 '차원으로 output
LSTM1 = LSTM( 10, return_sequence = True )(LSTM2)
(받아 들인) ' 3 '차원으로 output
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 3, 1) 0
_________________________________________________________________
lstm_1 (LSTM) (None, 3, 10) 480
_________________________________________________________________
lstm_2 (LSTM) (None, 10) 840
_________________________________________________________________
dense_1 (Dense) (None, 5) 55
_________________________________________________________________
dense_2 (Dense) (None, 1) 6
=================================================================
# 앞에 output_node가 input_dim(feature)가 된다.
# LSTM_sequences_parameter
:num_param = 4 * ( num_units + input_dim + bias) * num_units
= 4 * (LSTM2_output + LSTM1_output + 1 ) * LSTM2_output
= 4 * ( 10 + 10 + 1 ) * 10
= 840
'''
# EarlyStopping
from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor = 'loss', patience=100, mode = 'min')
#3. 실행
model.compile(optimizer='adam', loss = 'mse')
model.fit(x, y, epochs =10000, batch_size = 13,callbacks = [es]
)
#4. 예측
x_predict = x_predict.reshape(1, 3, 1) # x값 (13, 3, 1)와 동일한 shape로 만들어 주기 위함
# (1, 3, 1) : 확인 1 * 3 * 1 = 3
# x_predict = x_predict.reshape(1, x_predict.shape[0], 1)
print(x_predict)
y_predict = model.predict(x_predict)
print(y_predict)
| [
"zaiin4050@gmail.com"
] | zaiin4050@gmail.com |
4b3016bfcd3071fffd9aa21d51d55071e41cbcdd | 117250ff85300373ee6f51be278563a05527fef7 | /jurivoc/Scripts/test-word2vec.py | ae1084ff60d888380d70c5eca8e9cf1a2f0fa20e | [] | no_license | xianfanZ/thesaurus_juridique | e216686f1e16fd128d3e0c4d2976a1aa1501b615 | 32704986cff712cb40696b6c576760a0a59c01dd | refs/heads/master | 2021-01-21T06:55:26.816080 | 2017-04-14T23:58:49 | 2017-04-14T23:58:49 | 83,296,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # -*- coding: utf-8 -*-
from gensim.models import word2vec
import logging
# main program
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = word2vec.Text8Corpus("../../Corpus/types/ABUS_DE_CONFIANCE.txt") # load corpus
model = word2vec.Word2Vec(sentences, size=200)
# calculate the similarity of two words
y1 = model.similarity("somme", "tribunal")
print(u"similarity:", y1)
print("--------\n")
# 20 most similar words with "tribunal"
y2 = model.most_similar("l'action", topn=5)
print(u"similarity words with tribunal\n")
for w in y2:
print(w[0],w[1])
print("--------\n")
# find the corresponding relationship
y3 = model.most_similar(["dossier"]) | [
"zhangxianfan.cheryl@gmail.com"
] | zhangxianfan.cheryl@gmail.com |
a1114a9398ef20a6155681319bce9f66984d1d74 | df7551294967bf85d759545271fe75bd57ad3adf | /Multiclass classification.py | bf49c01595a2860addedd98a55b0c44555a97266 | [] | no_license | raghavkabra/Machine-learninig | c02f3bd8b4cbbab0680109dc43fd69c73eb07280 | ca9b1969be8a89344c16d8d7413404b34f7b6c21 | refs/heads/master | 2021-01-20T15:50:37.477218 | 2016-06-27T09:30:12 | 2016-06-27T09:30:12 | 61,787,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,937 | py | ## 1. Introduction to the data
import pandas as pd
cars = pd.read_csv("auto.csv")
print(cars.head())
unique_regions = cars["origin"].unique()
print(unique_regions)
##2. Dummy variables
dummy_cylinders = pd.get_dummies(cars["cylinders"], prefix="cyl")
cars = pd.concat([cars, dummy_cylinders], axis=1)
print(cars.head())
dummy_years = pd.get_dummies(cars["year"], prefix="year")
cars = pd.concat([cars, dummy_years], axis=1)
cars = cars.drop("year", axis=1)
cars = cars.drop("cylinders", axis=1)
print(cars.head())
## 3. Multiclass classification
shuffled_rows = np.random.permutation(cars.index)
shuffled_cars = cars.iloc[shuffled_rows]
highest_train_row = int(cars.shape[0] * .70)
train = shuffled_cars.iloc[0:highest_train_row]
test = shuffled_cars.iloc[highest_train_row:]
## 4. Training a multiclass logistic regression model
from sklearn.linear_model import LogisticRegression
unique_origins = cars["origin"].unique()
unique_origins.sort()
models = {}
features = [c for c in train.columns if c.startswith("cyl") or c.startswith("year")]
for origin in unique_origins:
model = LogisticRegression()
X_train = train[features]
y_train = train["origin"] == origin
model.fit(X_train, y_train)
models[origin] = model
## 5. Testing the models
testing_probs = pd.DataFrame(columns=unique_origins)
testing_probs = pd.DataFrame(columns=unique_origins)
for origin in unique_origins:
## Select testing features.
X_test = test[features]
# Compute probability of observation being in the origin.
testing_probs[origin] = models[origin].predict_proba(X_test)[:,1]
## 6. Choose the origin
predicted_origins = testing_probs.idxmax(axis=1)
print(predicted_origins)
## 7. Conclusion
##
# Variable testing_probs is in memory
predicted_origins = testing_probs.idxmax(axis=1) | [
"noreply@github.com"
] | noreply@github.com |
723c56869dcbe51563a60e055a7706f3999667c7 | 43c24c890221d6c98e4a45cd63dba4f1aa859f55 | /test/cpython/test_shelve.py | 8f8bff4866c04207c68433d618ae4b4cbfeb0b4c | [
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | jmgc/pyston | c8e4df03c33c6b81d20b7d51a781d9e10148238e | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | refs/heads/master | 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 | NOASSERTION | 2020-09-11T14:38:39 | 2015-07-17T08:09:31 | Python | UTF-8 | Python | false | false | 42 | py | ../../from_cpython/Lib/test/test_shelve.py | [
"kmod@dropbox.com"
] | kmod@dropbox.com |
385482e1ff987a86454cad4be76b6a7843637fba | 3e393878c487013c793a7b7e623359e9dea4670e | /adversarial_robustness_toolbox/art/classifiers/tensorflow.py | ac7d9ed57dff29739f0d705382f20e61978e6df5 | [] | no_license | mattgorb/detecting_erroneous_inputs | 241b25194c53f177259213f0149836eca96b7b5c | 6e191c920a06437703aa46d4a5aa8b3875d90b81 | refs/heads/master | 2021-01-09T19:55:21.082681 | 2020-02-28T06:33:39 | 2020-02-28T06:33:39 | 242,439,913 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 39,283 | py | # MIT License
#
# Copyright (C) IBM Corporation 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the classifier `TensorFlowClassifier` for TensorFlow models.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import random
import numpy as np
import six
from ..classifiers.classifier import Classifier, ClassifierNeuralNetwork, ClassifierGradients
logger = logging.getLogger(__name__)
class TensorFlowClassifier(ClassifierNeuralNetwork, ClassifierGradients, Classifier):
"""
This class implements a classifier with the TensorFlow framework.
"""
def __init__(self, input_ph, output, labels_ph=None, train=None, loss=None, learning=None, sess=None,
channel_index=3, clip_values=None, defences=None, preprocessing=(0, 1)):
"""
Initialization specific to TensorFlow models implementation.
:param input_ph: The input placeholder.
:type input_ph: `tf.Placeholder`
:param output: The output layer of the model. This can be logits, probabilities or anything else. Logits
output should be preferred where possible to ensure attack efficiency.
:type output: `tf.Tensor`
:param labels_ph: The labels placeholder of the model. This parameter is necessary when training the model and
when computing gradients w.r.t. the loss function.
:type labels_ph: `tf.Tensor`
:param train: The train tensor for fitting, including an optimizer. Use this parameter only when training the
model.
:type train: `tf.Tensor`
:param loss: The loss function for which to compute gradients. This parameter is necessary when training the
model and when computing gradients w.r.t. the loss function.
:type loss: `tf.Tensor`
:param learning: The placeholder to indicate if the model is training.
:type learning: `tf.Placeholder` of type bool.
:param sess: Computation session.
:type sess: `tf.Session`
:param channel_index: Index of the axis in data containing the color channels or features.
:type channel_index: `int`
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range of all
features. If arrays are provided, each value will be considered the bound for a feature, thus
the shape of clip values needs to match the total number of features.
:type clip_values: `tuple`
:param defences: Defences to be activated with the classifier.
:type defences: `str` or `list(str)`
:param preprocessing: Tuple of the form `(subtractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one.
:type preprocessing: `tuple`
"""
# pylint: disable=E0401
import tensorflow as tf
if tf.__version__[0] == '2':
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
super(TensorFlowClassifier, self).__init__(clip_values=clip_values, channel_index=channel_index,
defences=defences,
preprocessing=preprocessing)
self._nb_classes = int(output.get_shape()[-1])
self._input_shape = tuple(input_ph.get_shape().as_list()[1:])
self._input_ph = input_ph
self._output = output
self._labels_ph = labels_ph
self._train = train
self._loss = loss
self._learning = learning
self._feed_dict = {}
# Assign session
if sess is None:
raise ValueError("A session cannot be None.")
self._sess = sess
# Get the internal layers
self._layer_names = self._get_layers()
# Get the loss gradients graph
if self._loss is not None:
self._loss_grads = tf.gradients(self._loss, self._input_ph)[0]
def predict(self, x, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:type x: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: Array of predictions of shape `(num_inputs, nb_classes)`.
:rtype: `np.ndarray`
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Run prediction with batch processing
results = np.zeros((x_preprocessed.shape[0], self.nb_classes()), dtype=np.float32)
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = m * batch_size, min((m + 1) * batch_size, x_preprocessed.shape[0])
# Create feed_dict
feed_dict = {self._input_ph: x_preprocessed[begin:end]}
feed_dict.update(self._feed_dict)
# Run prediction
results[begin:end] = self._sess.run(self._output, feed_dict=feed_dict)
return results
def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs):
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
TensorFlow and providing it takes no effect.
:type kwargs: `dict`
:return: `None`
"""
# Check if train and output_ph available
if self._train is None or self._labels_ph is None:
raise ValueError("Need the training objective and the output placeholder to train the model.")
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
ind = np.arange(len(x_preprocessed))
# Start training
for _ in range(nb_epochs):
# Shuffle the examples
random.shuffle(ind)
# Train for one epoch
for m in range(num_batch):
i_batch = x_preprocessed[ind[m * batch_size:(m + 1) * batch_size]]
o_batch = y_preprocessed[ind[m * batch_size:(m + 1) * batch_size]]
# Create feed_dict
feed_dict = {self._input_ph: i_batch, self._labels_ph: o_batch}
feed_dict.update(self._feed_dict)
# Run train step
self._sess.run(self._train, feed_dict=feed_dict)
def fit_generator(self, generator, nb_epochs=20, **kwargs):
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in TensorFlow, it will.
:type generator: :class:`.DataGenerator`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
TensorFlow and providing it takes no effect.
:type kwargs: `dict`
:return: `None`
"""
from art.data_generators import TFDataGenerator
# Train directly in TensorFlow
if isinstance(generator, TFDataGenerator) and not (
hasattr(self, 'label_smooth') or hasattr(self, 'feature_squeeze')):
for _ in range(nb_epochs):
for _ in range(int(generator.size / generator.batch_size)):
i_batch, o_batch = generator.get_batch()
# Create feed_dict
feed_dict = {self._input_ph: i_batch, self._labels_ph: o_batch}
feed_dict.update(self._feed_dict)
# Run train step
self._sess.run(self._train, feed_dict=feed_dict)
super(TensorFlowClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)
def class_gradient(self, x, label=None, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:type label: `int` or `list`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
:rtype: `np.ndarray`
"""
# Check value of label for computing gradients
if not (label is None or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes()))
or (isinstance(label, np.ndarray) and len(label.shape) == 1 and (label < self.nb_classes()).all()
and label.shape[0] == x.shape[0])):
raise ValueError('Label %s is out of range.' % label)
self._init_class_grads(label=label)
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Create feed_dict
feed_dict = {self._input_ph: x_preprocessed}
feed_dict.update(self._feed_dict)
# Compute the gradient and return
if label is None:
# Compute the gradients w.r.t. all classes
grads = self._sess.run(self._class_grads, feed_dict=feed_dict)
grads = np.swapaxes(np.array(grads), 0, 1)
elif isinstance(label, (int, np.integer)):
# Compute the gradients only w.r.t. the provided label
grads = self._sess.run(self._class_grads[label], feed_dict=feed_dict)
grads = grads[None, ...]
grads = np.swapaxes(np.array(grads), 0, 1)
else:
# For each sample, compute the gradients w.r.t. the indicated target class (possibly distinct)
unique_label = list(np.unique(label))
grads = self._sess.run([self._class_grads[l] for l in unique_label], feed_dict=feed_dict)
grads = np.swapaxes(np.array(grads), 0, 1)
lst = [unique_label.index(i) for i in label]
grads = np.expand_dims(grads[np.arange(len(grads)), lst], axis=1)
grads = self._apply_preprocessing_gradient(x, grads)
return grads
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=False)
# Check if loss available
if not hasattr(self, '_loss_grads') or self._loss_grads is None or self._labels_ph is None:
raise ValueError("Need the loss function and the labels placeholder to compute the loss gradient.")
# Create feed_dict
feed_dict = {self._input_ph: x_preprocessed, self._labels_ph: y_preprocessed}
feed_dict.update(self._feed_dict)
# Compute gradients
grads = self._sess.run(self._loss_grads, feed_dict=feed_dict)
grads = self._apply_preprocessing_gradient(x, grads)
assert grads.shape == x_preprocessed.shape
return grads
def _init_class_grads(self, label=None):
# pylint: disable=E0401
import tensorflow as tf
if tf.__version__[0] == '2':
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
if not hasattr(self, '_class_grads'):
self._class_grads = [None for _ in range(self.nb_classes())]
# Construct the class gradients graph
if label is None:
if None in self._class_grads:
self._class_grads = [tf.gradients(self._output[:, i], self._input_ph)[0]
for i in range(self.nb_classes())]
elif isinstance(label, int):
if self._class_grads[label] is None:
self._class_grads[label] = tf.gradients(self._output[:, label], self._input_ph)[0]
else:
for unique_label in np.unique(label):
if self._class_grads[unique_label] is None:
self._class_grads[unique_label] = tf.gradients(self._output[:, unique_label], self._input_ph)[0]
def _get_layers(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
"""
# pylint: disable=E0401
import tensorflow as tf
if tf.__version__[0] == '2':
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
# Get the computational graph
with self._sess.graph.as_default():
graph = tf.get_default_graph()
# Get the list of operators and heuristically filter them
tmp_list = []
ops = graph.get_operations()
# pylint: disable=R1702
for op in ops:
if op.values():
if op.values()[0].get_shape() is not None:
if op.values()[0].get_shape().ndims is not None:
if len(op.values()[0].get_shape().as_list()) > 1:
if op.values()[0].get_shape().as_list()[0] is None:
if op.values()[0].get_shape().as_list()[1] is not None:
if not op.values()[0].name.startswith("gradients"):
if not op.values()[0].name.startswith("softmax_cross_entropy_loss"):
if not op.type == "Placeholder":
tmp_list.append(op.values()[0].name)
# Shorten the list
if not tmp_list:
return tmp_list
result = [tmp_list[-1]]
for name in reversed(tmp_list[:-1]):
if result[0].split("/")[0] != name.split("/")[0]:
result = [name] + result
logger.info('Inferred %i hidden layers on TensorFlow classifier.', len(result))
return result
@property
def layer_names(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
.. warning:: `layer_names` tries to infer the internal structure of the model.
This feature comes with no guarantees on the correctness of the result.
The intended order of the layers tries to match their order in the model, but this is not
guaranteed either.
"""
return self._layer_names
def get_activations(self, x, layer, batch_size=128):
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:type x: `np.ndarray`
:param layer: Layer for computing the activations
:type layer: `int` or `str`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
:rtype: `np.ndarray`
"""
# pylint: disable=E0401
import tensorflow as tf
if tf.__version__[0] == '2':
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
# Get the computational graph
with self._sess.graph.as_default():
graph = tf.get_default_graph()
if isinstance(layer, six.string_types): # basestring for Python 2 (str, unicode) support
if layer not in self._layer_names:
raise ValueError("Layer name %s is not part of the graph." % layer)
layer_tensor = graph.get_tensor_by_name(layer)
elif isinstance(layer, (int, np.integer)):
layer_tensor = graph.get_tensor_by_name(self._layer_names[layer])
else:
raise TypeError("Layer must be of type `str` or `int`. Received %s" % layer)
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Run prediction with batch processing
results = []
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = m * batch_size, min((m + 1) * batch_size, x_preprocessed.shape[0])
# Create feed_dict
feed_dict = {self._input_ph: x_preprocessed[begin:end]}
feed_dict.update(self._feed_dict)
# Run prediction for the current batch
layer_output = self._sess.run(layer_tensor, feed_dict=feed_dict)
results.append(layer_output)
results = np.concatenate(results)
return results
def set_learning_phase(self, train):
"""
Set the learning phase for the backend framework.
:param train: True to set the learning phase to training, False to set it to prediction.
:type train: `bool`
"""
if isinstance(train, bool):
self._learning_phase = train
self._feed_dict[self._learning] = train
def nb_classes(self):
"""
Return the number of output classes.
:return: Number of classes in the data.
:rtype: `int`
"""
return self._nb_classes
def save(self, filename, path=None):
"""
Save a model to file in the format specific to the backend framework. For TensorFlow, .ckpt is used.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `DATA_PATH`.
:type path: `str`
:return: None
"""
# pylint: disable=E0611
import os
import shutil
from tensorflow.python import saved_model
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
if path is None:
from art import DATA_PATH
full_path = os.path.join(DATA_PATH, filename)
else:
full_path = os.path.join(path, filename)
if os.path.exists(full_path):
shutil.rmtree(full_path)
builder = saved_model.builder.SavedModelBuilder(full_path)
signature = predict_signature_def(inputs={'SavedInputPhD': self._input_ph},
outputs={'SavedOutput': self._output})
builder.add_meta_graph_and_variables(sess=self._sess, tags=[tag_constants.SERVING],
signature_def_map={'predict': signature})
builder.save()
logger.info('Model saved in path: %s.', full_path)
def __getstate__(self):
"""
Use to ensure `TensorFlowClassifier` can be pickled.
:return: State dictionary with instance parameters.
:rtype: `dict`
"""
import time
state = self.__dict__.copy()
# Remove the unpicklable entries
del state['_sess']
del state['_input_ph']
state['_output'] = self._output.name
if self._labels_ph is not None:
state['_labels_ph'] = self._labels_ph.name
if self._loss is not None:
state['_loss'] = self._loss.name
if hasattr(self, '_loss_grads'):
state['_loss_grads'] = self._loss_grads.name
else:
state['_loss_grads'] = False
if self._learning is not None:
state['_learning'] = self._learning.name
if self._train is not None:
state['_train'] = self._train.name
if hasattr(self, '_class_grads'):
state['_class_grads'] = [ts if ts is None else ts.name for ts in self._class_grads]
else:
state['_class_grads'] = False
model_name = str(time.time())
state['model_name'] = model_name
self.save(model_name)
return state
def __setstate__(self, state):
"""
Use to ensure `TensorFlowClassifier` can be unpickled.
:param state: State dictionary with instance parameters to restore.
:type state: `dict`
"""
self.__dict__.update(state)
# Load and update all functionality related to TensorFlow
# pylint: disable=E0611, E0401
import os
import tensorflow as tf
if tf.__version__[0] == '2':
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
from tensorflow.python.saved_model import tag_constants
from art import DATA_PATH
full_path = os.path.join(DATA_PATH, state['model_name'])
graph = tf.Graph()
sess = tf.Session(graph=graph)
loaded = tf.saved_model.loader.load(sess, [tag_constants.SERVING], full_path)
# Recover session
self._sess = sess
# Recover input_ph
input_tensor_name = loaded.signature_def['predict'].inputs['SavedInputPhD'].name
self._input_ph = graph.get_tensor_by_name(input_tensor_name)
# Recover output layer
self._output = graph.get_tensor_by_name(state['_output'])
# Recover labels' placeholder if any
if state['_labels_ph'] is not None:
self._labels_ph = graph.get_tensor_by_name(state['_labels_ph'])
# Recover loss if any
if state['_loss'] is not None:
self._loss = graph.get_tensor_by_name(state['_loss'])
# Recover loss_grads if any
if state['_loss_grads']:
self._loss_grads = graph.get_tensor_by_name(state['_loss_grads'])
else:
self.__dict__.pop('_loss_grads', None)
# Recover learning if any
if state['_learning'] is not None:
self._learning = graph.get_tensor_by_name(state['_learning'])
# Recover train if any
if state['_train'] is not None:
self._train = graph.get_operation_by_name(state['_train'])
# Recover class_grads if any
if state['_class_grads']:
self._class_grads = [ts if ts is None else graph.get_tensor_by_name(ts) for ts in state['_class_grads']]
else:
self.__dict__.pop('_class_grads', None)
self.__dict__.pop('model_name', None)
def __repr__(self):
repr_ = "%s(input_ph=%r, output=%r, labels_ph=%r, train=%r, loss=%r, learning=%r, " \
"sess=%r, channel_index=%r, clip_values=%r, defences=%r, preprocessing=%r)" \
% (self.__module__ + '.' + self.__class__.__name__,
self._input_ph, self._output, self._labels_ph, self._train, self._loss, self._learning, self._sess,
self.channel_index, self.clip_values, self.defences, self.preprocessing)
return repr_
# backward compatibility for ART v0.10 and earlier
TFClassifier = TensorFlowClassifier
class TensorFlowV2Classifier(ClassifierNeuralNetwork, ClassifierGradients, Classifier):
"""
This class implements a classifier with the TensorFlow v2 framework.
"""
def __init__(self, model, nb_classes, loss_object=None, train_step=None, channel_index=3, clip_values=None,
defences=None, preprocessing=(0, 1)):
"""
Initialization specific to TensorFlow v2 models.
:param model: a python functions or callable class defining the model and providing it prediction as output.
:type model: `function` or `callable class`
:param nb_classes: the number of classes in the classification task
:type nb_classes: `int`
:param loss_object: The loss function for which to compute gradients. This parameter is applied for training
the model and computing gradients of the loss w.r.t. the input.
:type loss_object: `tf.keras.losses`
:param train_step: a function that applies a gradient update to the trainable variables
:type train_step: `function`
:param channel_index: Index of the axis in data containing the color channels or features.
:type channel_index: `int`
:param clip_values: Tuple of the form `(min, max)` of floats or `np.ndarray` representing the minimum and
maximum values allowed for features. If floats are provided, these will be used as the range of all
features. If arrays are provided, each value will be considered the bound for a feature, thus
the shape of clip values needs to match the total number of features.
:type clip_values: `tuple`
:param defences: Defences to be activated with the classifier.
:type defences: `str` or `list(str)`
:param preprocessing: Tuple of the form `(substractor, divider)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be substracted from the input. The input will then
be divided by the second one.
:type preprocessing: `tuple`
"""
super(TensorFlowV2Classifier, self).__init__(clip_values=clip_values, channel_index=channel_index,
defences=defences, preprocessing=preprocessing)
self._model = model
self._nb_classes = nb_classes
self._loss_object = loss_object
self._train_step = train_step
def predict(self, x, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs.
:param x: Test set.
:type x: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
:rtype: `np.ndarray`
"""
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Run prediction with batch processing
results = np.zeros((x_preprocessed.shape[0], self.nb_classes()), dtype=np.float32)
num_batch = int(np.ceil(len(x_preprocessed) / float(batch_size)))
for m in range(num_batch):
# Batch indexes
begin, end = m * batch_size, min((m + 1) * batch_size, x_preprocessed.shape[0])
# Run prediction
results[begin:end] = self._model(x_preprocessed[begin:end])
return results
def fit(self, x, y, batch_size=128, nb_epochs=10, **kwargs):
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data.
:type x: `np.ndarray`
:param y: Labels, one-vs-rest encoding.
:type y: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
TensorFlow and providing it takes no effect.
:type kwargs: `dict`
:return: `None`
"""
import tensorflow as tf
# Apply preprocessing
x_preprocessed, y_preprocessed = self._apply_preprocessing(x, y, fit=True)
train_ds = tf.data.Dataset.from_tensor_slices((x_preprocessed, y_preprocessed)).shuffle(10000).batch(batch_size)
for _ in range(nb_epochs):
for images, labels in train_ds:
self._train_step(images, labels)
def fit_generator(self, generator, nb_epochs=20, **kwargs):
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch. If the generator can be used for native
training in TensorFlow, it will.
:type generator: :class:`.DataGenerator`
:param nb_epochs: Number of epochs to use for training.
:type nb_epochs: `int`
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for
TensorFlow and providing it takes no effect.
:type kwargs: `dict`
:return: `None`
"""
raise NotImplementedError
def class_gradient(self, x, label=None, **kwargs):
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:type label: `int` or `list`
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
:rtype: `np.ndarray`
"""
import tensorflow as tf
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y=None, fit=False)
# Compute the gradients
if tf.executing_eagerly():
if label is None:
# Compute the gradients w.r.t. all classes
class_gradients = list()
for i in range(self.nb_classes()):
with tf.GradientTape() as tape:
x_preprocessed_tf = tf.convert_to_tensor(x_preprocessed)
tape.watch(x_preprocessed_tf)
predictions = self._model(x_preprocessed_tf)
prediction = predictions[:, i]
tape.watch(prediction)
class_gradient = tape.gradient(prediction, x_preprocessed_tf).numpy()
class_gradients.append(class_gradient)
gradients = np.swapaxes(np.array(class_gradients), 0, 1)
elif isinstance(label, (int, np.integer)):
# Compute the gradients only w.r.t. the provided label
with tf.GradientTape() as tape:
x_preprocessed_tf = tf.convert_to_tensor(x_preprocessed)
tape.watch(x_preprocessed_tf)
predictions = self._model(x_preprocessed_tf)
prediction = predictions[:, label]
tape.watch(prediction)
class_gradient = tape.gradient(prediction, x_preprocessed_tf).numpy()
gradients = np.expand_dims(class_gradient, axis=1)
else:
# For each sample, compute the gradients w.r.t. the indicated target class (possibly distinct)
class_gradients = list()
unique_labels = list(np.unique(label))
for unique_label in unique_labels:
with tf.GradientTape() as tape:
x_preprocessed_tf = tf.convert_to_tensor(x_preprocessed)
tape.watch(x_preprocessed_tf)
predictions = self._model(x_preprocessed_tf)
prediction = predictions[:, unique_label]
tape.watch(prediction)
class_gradient = tape.gradient(prediction, x_preprocessed_tf).numpy()
class_gradients.append(class_gradient)
gradients = np.swapaxes(np.array(class_gradients), 0, 1)
lst = [unique_labels.index(i) for i in label]
gradients = np.expand_dims(gradients[np.arange(len(gradients)), lst], axis=1)
else:
raise ValueError('Expecting eager execution.')
return gradients
def loss_gradient(self, x, y, **kwargs):
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:type x: `np.ndarray`
:param y: Correct labels, one-vs-rest encoding.
:type y: `np.ndarray`
:return: Array of gradients of the same shape as `x`.
:rtype: `np.ndarray`
"""
import tensorflow as tf
# Apply preprocessing
x_preprocessed, _ = self._apply_preprocessing(x, y, fit=False)
if tf.executing_eagerly():
with tf.GradientTape() as tape:
x_preprocessed_tf = tf.convert_to_tensor(x_preprocessed)
tape.watch(x_preprocessed_tf)
predictions = self._model(x_preprocessed_tf)
loss = self._loss_object(np.argmax(y, axis=1), predictions)
gradients = tape.gradient(loss, x_preprocessed_tf).numpy()
else:
raise ValueError('Expecting eager execution.')
# Apply preprocessing gradients
gradients = self._apply_preprocessing_gradient(x, gradients)
return gradients
def _get_layers(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
"""
raise NotImplementedError
@property
def layer_names(self):
"""
Return the hidden layers in the model, if applicable.
:return: The hidden layers in the model, input and output layers excluded.
:rtype: `list`
.. warning:: `layer_names` tries to infer the internal structure of the model.
This feature comes with no guarantees on the correctness of the result.
The intended order of the layers tries to match their order in the model, but this is not
guaranteed either.
"""
raise NotImplementedError
def get_activations(self, x, layer, batch_size=128):
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:type x: `np.ndarray`
:param layer: Layer for computing the activations
:type layer: `int` or `str`
:param batch_size: Size of batches.
:type batch_size: `int`
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
:rtype: `np.ndarray`
"""
raise NotImplementedError
def set_learning_phase(self, train):
"""
Set the learning phase for the backend framework.
:param train: True to set the learning phase to training, False to set it to prediction.
:type train: `bool`
"""
raise NotImplementedError
def nb_classes(self):
"""
Return the number of output classes.
:return: Number of classes in the data.
:rtype: `int`
"""
return self._nb_classes
def save(self, filename, path=None):
"""
Save a model to file in the format specific to the backend framework. For TensorFlow, .ckpt is used.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `DATA_PATH`.
:type path: `str`
:return: None
"""
raise NotImplementedError
def __repr__(self):
repr_ = "%s(model=%r, nb_classes=%r, loss_object=%r, learning=%r, train_step=%r, " \
"channel_index=%r, clip_values=%r, defences=%r, preprocessing=%r)" \
% (self.__module__ + '.' + self.__class__.__name__,
self._model, self._nb_classes, self._loss_object, self._learning, self._train_step,
self.channel_index, self.clip_values, self.defences, self.preprocessing)
return repr_
| [
"Idontknow8112@@@"
] | Idontknow8112@@@ |
2e5b8495a8a6785b298450a12c2423c57bffeb2c | 968932c0550f7a0e19dfe354114a5196f9d1aee6 | /manage.py | 36d568ccb31ba38d01aa135ef41751d8bde463d1 | [
"MIT"
] | permissive | kaphie/InstaClone | 4589bf8de0b747c0297e03becb1e23eed6403d38 | 801853615b99071d8eb31498144cac039fa0fa2c | refs/heads/master | 2023-08-02T12:13:20.815748 | 2020-06-04T08:32:08 | 2020-06-04T08:32:08 | 267,879,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "instaClone.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"bkafrika144@gmail.com"
] | bkafrika144@gmail.com |
1d95b787262c812df63310329f558d1fb2407a77 | 8bee8da678b972dc038a458bdb4a71ffa1935e8b | /Scripts_MT_Structure/Reflectivity_test_BK.py | e4304282747d401e51baeea77dd86c8bba0a5c27 | [
"BSD-3-Clause",
"MIT"
] | permissive | nienkebrinkman/SS_MTI | f00674cafe92d5459d16d9a64265fb68b981e24c | 2632214f7df9caaa53d33432193ba0602470d21a | refs/heads/master | 2023-04-11T10:11:29.346463 | 2021-08-04T14:37:41 | 2021-08-04T14:37:41 | 274,366,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | import erzsol3Py.erzsol3Py as erz
""" 1. Create csv-model file """
import numpy as np
# depth_model = np.arange(0, 3450, 50)
# dz = (depth_model[1] - depth_model[0]) / 1000 # convert to km
# Vp_model = np.linspace(5, 5, len(depth_model))
# Vs_model = Vp_model / 2
dz = np.array([1.0])
Vp_model = np.array([5.0])
Vs_model = np.array([2.6])
rho_model = np.ones_like(Vp_model) * 2.7
qa = 0.001
qb = 0.002
# """ 2. Generating a .mod model """
folder = "/home/nienke/Documents/Research/SS_MTI/External_packages/erzsol3/Test_Files/"
mod_file_name = folder + "Test.mod"
file = open(mod_file_name, "w+")
model_name = "Test_model\n"
nLayers_thickness = " {} {}\n".format(len(Vp_model), 0)
L = [model_name, nLayers_thickness]
file.writelines(L)
for i, (vp, vs, rho) in enumerate(zip(Vp_model, Vs_model, rho_model)):
L = [
"3 {:.3f} {:.3f} {:.2f} {:.3f} {:.3f} {:.3f}\n".format(
vp, vs, rho, dz[i], qa, qb
)
]
file.writelines(L)
file.close()
""" Write .dst file """
epis = [22.0]
azis = [45.0]
dst_file_name = folder + "Test.dst"
file = open(dst_file_name, "w")
n_rec = len(epis)
L = [" {} # of distances /distances\n".format(n_rec)]
file.writelines(L)
for i, (epi, azi) in enumerate(zip(epis, azis)):
L = [" {:.2f} {:.2f}\n".format(epi, azi)]
file.writelines(L)
file.close()
| [
"nienke.brinkman@erdw.ethz.ch"
] | nienke.brinkman@erdw.ethz.ch |
b03c6bdfebfbe7358825863a45d6d3a2564c5301 | 608a922d4ff844589ae529e5b1d6acafc9d8f655 | /core/migrations/0004_auto_20210218_0351.py | 7ac7f39650efef114771948908e05f48794cc639 | [
"MIT"
] | permissive | Agyey/Restaurant-API | 14dd0629208283fbd48203fad0c0e8d586e1ca97 | ecef12da1dde087e545bb89d132309b5c91943c6 | refs/heads/main | 2023-03-03T16:05:04.888811 | 2021-02-18T00:31:13 | 2021-02-18T00:31:13 | 339,881,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Generated by Django 3.1.6 on 2021-02-17 22:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0003_auto_20210218_0341"),
]
operations = [
migrations.AlterField(
model_name="restaurant",
name="established",
field=models.DateField(
default=datetime.datetime(2021, 2, 18, 3, 51, 28, 49289)
),
),
]
| [
"agyey27@yahoo.co.in"
] | agyey27@yahoo.co.in |
e945933bb1181d2976332caa4371b920cbec54b0 | ec56cadfe20834b9e6536ce075f0d807f509c967 | /VGG/utils/vgg_utils.py | 21f45b5a57e6cf7d9b9a6845f1aa69e9ab35d02d | [] | no_license | joao-afonso-pereira/Fingerprint-Antispoofing-Domain-Adaptation-and-Adversarial-Learning | 3ecb2f795b63490b76c0427f3262af254dd118ea | 5607c3d1f02efa285d04b933b9d1f601903e6535 | refs/heads/master | 2022-12-07T20:53:29.842211 | 2020-09-02T20:41:23 | 2020-09-02T20:41:23 | 284,818,782 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | import numpy as np
def _nanargmin(arr):
try:
return np.nanargmin(arr)
except ValueError:
return np.nan
def frange(start, stop, step):
num = start
_list = []
while num <= stop:
_list.append(num)
num = num + step
return _list
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False | [
"joaoappereira@gmail.com"
] | joaoappereira@gmail.com |
a20838320324c0d78d3f26c32300498cc1369aba | e3cacb42b3eaced8ad68b5d9c871cb212a9026a5 | /profile_project/urls.py | 0f97233a86f963449ce82de8653625c658cf24a9 | [
"MIT"
] | permissive | Vishal2602/profiles-rest-api | fb0673356f0d45002a3062ef499c22bfb89428b1 | d869ce3f5bd2ad7b87655cc2fa691ccba5b9f4fa | refs/heads/main | 2022-12-29T10:32:47.528189 | 2020-10-14T09:01:46 | 2020-10-14T09:01:46 | 303,087,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | """profile_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include("profiles_api.urls"))
]
| [
"vishals2602@gmail.com"
] | vishals2602@gmail.com |
3c45d1ba786cee4c5a59c34859e5c387fea12955 | 17c7ca173a8619ec6b3138d1cf96dad5fe8916da | /make_kdd.py | e9d464a8017767a15b2aa055e8272fae8f24956d | [] | no_license | rob-n3ikc/continuous-Boltzmann-Machine | 81adcf6f630b526bb38e27faad863419bd9e2e65 | b0a239ae57804832034cb05270e5a671549beb97 | refs/heads/master | 2021-06-25T20:43:04.394980 | 2021-01-15T19:52:38 | 2021-01-15T19:52:38 | 203,423,404 | 0 | 0 | null | 2021-01-15T19:52:39 | 2019-08-20T17:29:58 | C++ | UTF-8 | Python | false | false | 1,374 | py | #!/usr/bin/python
# nominally python2, but python3 should work as it isn't anything fancy
import math
import sys
def square( nsample, nwide, start):
retval = []
for i in range( start, nsample+start):
if (i/nwide)%2 == 0 :
retval.append(1.)
else:
retval.append(-1.)
return retval
def sin_wave( nsample, nwide, start):
retval = []
for i in range(start, nsample+start):
x = math.pi/nwide*i
retval.append(math.sin(x))
return retval
def to_str( numeric_list):
retval = []
for i in numeric_list:
retval.append(str(i))
return retval
# TBD add_noise
#MAIN function here
if len(sys.argv) < 3 :
print('USAGE kdd_gen length width')
sys.exit(0)
nsample = int(sys.argv[1])
nwid = int(sys.argv[2])
sys.stdout.write(str(nsample+1))
sys.stdout.write('\n')
for i in range(0, nsample):
o = '@attribute '+ str(i) + '\n'
sys.stdout.write(o)
o = '@attribute class category 0 \n@data \n'
sys.stdout.write(o)
for i in range(0,nwid):
o = square( nsample, nwid,i)
# o = sin_wave( nsample, nwid,i)
o.append(0)
sys.stdout.write( ','.join(to_str(o)))
sys.stdout.write('\n')
for i in range(0,nwid):
# o = square( nsample, nwid,i)
o = sin_wave( nsample, nwid,i)
o.append(0)
sys.stdout.write( ','.join(to_str(o)))
sys.stdout.write('\n')
| [
"noreply@github.com"
] | noreply@github.com |
26864c7ca7f2c60cdd522839d715e9bc97910ed5 | 1c2fdf5e2c02ac497e0054d7f6b171c97ab9d669 | /myapp/urls.py | 6e7764972545ca9612a5f1c0383d5d7a0bb46a68 | [] | no_license | sachingowda1996/p10 | 3bf5ced5fbf10b2903c7b85a4af6b9bcabac236a | 8e1dcaa6228d528a1bdb6a12e6a7bd74481f63bc | refs/heads/master | 2022-11-29T09:18:55.164044 | 2020-08-10T16:42:24 | 2020-08-10T16:42:24 | 286,528,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | from django.urls import path
from myapp import views
app_name="myapp"
urlpatterns = [
path('trail/',views.trail,name="Trail"),
path('profile/',views.profile,name="profile"),
path('get_demo/',views.get_demo,name="get_demo"),
path('post_demo/',views.post_demo,name="post_demo"),
]
| [
"sachinkn2019@gmail.com"
] | sachinkn2019@gmail.com |
547e6a3a571c9e2c706f867b40ebd19184612a68 | 4b64dd47fa9321b50875e96298a5f0766ffe97c9 | /adventofcode/2020/day7/run.py | 9046f4736d837a0d56f7717eedabdfc086788e75 | [] | no_license | choupi/puzzle | 2ce01aa85201660da41378c6df093036fa2d3a19 | 736964767717770fe786197aecdf7b170d421c8e | refs/heads/master | 2021-07-23T13:17:45.086526 | 2021-07-20T11:06:28 | 2021-07-20T11:06:28 | 13,580,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | def dfs(bag):
if bag == 'shiny gold':
bag_result[bag] = 1
return 1
if bag in bag_result:
return bag_result[bag]
if bag not in bag_dict or not bag_dict[bag]:
#print(bag)
bag_result[bag] = 0
return 0
for b in bag_dict[bag]:
if b in bag_result and bag_result[b] == 1:
bag_result[bag] = 1
return 1
if dfs(b):
bag_result[bag] = 1
return 1
return 0
bag_dict = {}
with open('input.txt') as f:
#with open('inp') as f:
for l in f:
if 'no other bags.' in l:
continue
bag, contains = l.strip().split(' contain ', 1)
bag = bag[:bag.rindex(' ')]
#print(bag)
contains = [' '.join(c.split(' ')[1:-1]) for c in contains.split(', ')]
#print(bag, contains)
bag_dict[bag] = contains
#print(len(bag_dict))
bag_result = {}
for bag in bag_dict:
if bag in bag_result:
continue
dfs(bag)
print(sum([v for b,v in bag_result.items()])-1)
| [
"chromosome460@gmail.com"
] | chromosome460@gmail.com |
3eeafad473611f2134280a3625dfe10dba05c18c | 9d28b8d5e2416f0f2037c7bc27266a017c201d8f | /manage.py | f3998254019c831f1fe8b08b3f189249a4762e97 | [] | no_license | njs03332/soup | ac39451c447ef3de178884eb3987cffc18e1a3af | aab45edeb99dd0ad3014018d834bf655b71e1b80 | refs/heads/master | 2021-10-27T22:21:43.632550 | 2019-04-20T08:26:07 | 2019-04-20T08:26:07 | 181,488,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mynewsdiary.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"njs03332@gmail.com"
] | njs03332@gmail.com |
76f55e7c8955a43c6f2ee8943674c16a479335be | 3898aadbdee48ba30e7186a381b3949445df1f34 | /build/catkin_generated/generate_cached_setup.py | 9d114b92d0cf4c89d80e5db389b9e36644434892 | [] | no_license | AdrianKlei/Studienarbeit | 968f632b9ab1860e6a92e10d1de864a3c2b66a6a | 4f6cec57d2bd43a2e5d249857bd77fd95639dde2 | refs/heads/master | 2022-12-10T00:26:42.068418 | 2020-09-09T11:58:12 | 2020-09-09T11:58:12 | 293,834,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/adrian/fremen_ws/devel;/home/adrian/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/adrian/fremen_ws/devel/env.sh')
output_filename = '/home/adrian/fremen_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"a.kleimeier9@gmail.com"
] | a.kleimeier9@gmail.com |
2fd8f4c115292d74b608708a12cf0e05d045b84f | fb8666ba2f687441fb462482ddc2465669ab815f | /my_funcs.py | f661f67cc3dd41b03181ba1e5b4f1dc6385a14c6 | [
"Apache-2.0"
] | permissive | wchunhao2000/pyneta | 407315bcabdb5c95e277ad6d8f9e2b6f9650f212 | 3c03dff486e3a777e3e6cca4e0de5b823cf64256 | refs/heads/main | 2023-06-14T04:40:39.348164 | 2021-07-12T19:20:56 | 2021-07-12T19:20:56 | 359,228,154 | 0 | 0 | Apache-2.0 | 2021-07-12T19:20:56 | 2021-04-18T18:54:37 | Python | UTF-8 | Python | false | false | 473 | py | import yaml
def yaml_load_devices(filename="arista_devices.yml"):
with open(filename, "r") as f:
return yaml.safe_load(f)
raise ValueError("Reading YAML file failed")
def output_printer(arp_list):
print()
print("-" * 40)
for arp_entry in arp_list:
mac_address = arp_entry["hwAddress"]
ip_address = arp_entry["address"]
print("{:^15}{:^5}{:^15}".format(ip_address, "-->", mac_address))
print("-" * 40)
print()
| [
"wchunhao2000@hotmail.com"
] | wchunhao2000@hotmail.com |
ce12c3bac2fa1e50590db1267dd69ad54d66dae2 | 1bfca35cb83842000a3e37f81a69627535a12bf6 | /examples/testWhile3.py | 78d24d522215b5cd084dafeb3a2c0b6ab0f53bc6 | [] | no_license | scar86/python_scripts | 4a8a51f15d21f3b71fa8f0cd2131f75612c40613 | 686b1229c6736147b7cfcd2d0bf31e5f12e85e00 | refs/heads/master | 2021-01-11T00:28:31.989712 | 2016-11-04T18:52:21 | 2016-11-04T18:52:21 | 70,526,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | '''Test yourself again: what happens?'''
nums = list()
i = 4
while (i < 9):
nums.append(i)
i = i+2
print(nums)
| [
"gogs@fake.local"
] | gogs@fake.local |
52ce23e7f3882bef68452fb5455b78d398f419b7 | 335852dcc77acd702a7b62bb32f1773888f9c524 | /src/finetune_eval_config.py | 446973721ed341b598caf3369dc59c86fcfc6ee8 | [
"MIT"
] | permissive | lvyufeng/emotect_mindspore | 107c6cdd8cc9eac1b53b67915de08ec60b3bef22 | d36172969c3adce018f97e6adaeb46d9ec9e241c | refs/heads/main | 2023-03-12T10:58:56.355157 | 2021-02-25T23:40:15 | 2021-02-25T23:40:15 | 340,881,320 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,332 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .bert_model import BertConfig
optimizer_cfg = edict({
'learning_rate': 2e-5
})
bert_net_cfg = BertConfig(
seq_length=512,
vocab_size=18000,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=513,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| [
"lvyufeng@cqu.edu.cn"
] | lvyufeng@cqu.edu.cn |
abf926933f8f8d4dabf73aa3ae493088c66fad10 | 13e4a2e3014777f8be4cf169e51665b4da385ee2 | /bin/ewf2mattock | 27955c3654901730c73d02c7a4aadfb216909046 | [] | no_license | robklpd/MattockFS | 3c249f07ab982454eda5b8ad8209d26f16f29b2f | 38146dd4390314e79380f2571db9b253cc336b16 | refs/heads/master | 2021-01-17T18:45:50.186072 | 2016-08-21T00:38:50 | 2016-08-21T00:38:50 | 66,283,336 | 0 | 0 | null | 2016-08-22T15:13:50 | 2016-08-22T15:13:50 | null | UTF-8 | Python | false | false | 1,897 | #!/usr/bin/python
# NOTE: This script is meant mainly for testing purposes.
# It does not respect mattock throttling considerations
from mattock.api import MountPoint
import pyewf
import sys
import json
ewffiles = sys.argv[1:]
if len(ewffiles) == 0:
print "Please specify EWF file to use."
else:
mp = MountPoint("/var/mattock/mnt/0")
context = mp.register_worker("ewf2mattock","K")
handle = pyewf.handle()
handle.open(ewffiles)
kickjob = context.poll_job()
meta = {}
meta["hashes"] = handle.get_hash_values()
meta["header"] = handle.get_header_values()
meta["size"] = handle.get_media_size()
metajson = json.dumps(meta)
mutable = kickjob.childdata(len(metajson))
with open(mutable, "r+") as f:
f.seek(0)
f.write(metajson)
meta_carvpath = kickjob.frozen_childdata()
kickjob.childsubmit(carvpath=meta_carvpath,
nextactor="dsm",
routerstate="",
mimetype="mattock-meta/ewf",
extension="meta")
print "Meta forwarded to dsm as", meta_carvpath
remaining = handle.get_media_size()
mutable = kickjob.childdata(remaining)
with open(mutable, "r+") as f:
f.seek(0)
while remaining > 0:
if remaining > 1048576:
chunk = 1048576
remaining -= chunk
else:
chunk = remaining
remaining = 0
data = handle.read(chunk)
f.write(data)
handle.close()
img_carvpath = kickjob.frozen_childdata()
kickjob.childsubmit(carvpath=img_carvpath,
nextactor="mmls",
routerstate="",
mimetype="application/disk-img",
extension="dd")
print "Image forwarded to mmls as", img_carvpath
kickjob.done()
| [
"pibara@gmail.com"
] | pibara@gmail.com | |
97f5234c19b1e8eed56e1695c2261be1efff2775 | 87a3082f6de154aa29b9ca94859940bd21d5a174 | /1_code/2_ensembles.py | 281274d852eb58560448f1d5368f9e424177c103 | [] | no_license | purrlab/ENHANCE | c4b9dc1750bbeebdcbe679cfe0fba9bf8f98d09b | 61b4176c4e8b90e6670c5fde67e98dc6babc4308 | refs/heads/main | 2023-07-27T04:06:29.234375 | 2021-09-13T10:47:54 | 2021-09-13T10:47:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,884 | py | # coding: utf-8
# Introduction
"""
We based the ensembles on the predictions of three available multi-task models: asymmetry, border and color.
Per annotation source (student, crowd and automated), we calculate using averaging ensemble technique
the class prediction (abnormal or healthy).
"""
from statistics import mean
import pandas
from numpy import std
from sklearn.ensemble import VotingClassifier
NAME = '2_ensembles'
PROJECT = 'HINTS'
PYTHON_VERSION = '3.8.2'
KERAS_VERSION = '2.3.1'
# Preamble
## Imports
from constants import *
import os, re
from keras.models import model_from_json
from get_data import get_multi_task_data
from generate_data import generate_data_2
from report_results import report_auc
from sklearn.metrics import roc_auc_score
from get_data import HINTS_TYPE
import numpy as np
from scipy import optimize
import keras
import sys
import pandas as pd
## Settings
## Set working directory
workdir = re.sub("(?<={})[\w\W]*".format(PROJECT), "", os.getcwd())
os.chdir(workdir)
## Set up pipeline folder if missing
pipeline = os.path.join('empirical', '2_pipeline', NAME)
if not os.path.exists(pipeline):
os.makedirs(pipeline)
for folder in ['out', 'store', 'tmp']:
os.makedirs(os.path.join(pipeline, folder))
def getPredictionsPath(currentSeed, architecture, annotationType, annotationSource):
return os.path.join('empirical', '2_pipeline', '1_multi_task', 'out', architecture, annotationType,
annotationSource, str(currentSeed) + 'predictions.csv')
def getPredictionsPathsForArchitecture(seed, architecture):
studAPath = getPredictionsPath(seed, architecture, 'asymmetry', 'student')
studBPath = getPredictionsPath(seed, architecture, 'border', 'student')
studCPath = getPredictionsPath(seed, architecture, 'color', 'student')
crowdAPath = getPredictionsPath(seed, architecture, 'asymmetry', 'mturk')
crowdBPath = getPredictionsPath(seed, architecture, 'border', 'mturk')
crowdCPath = getPredictionsPath(seed, architecture, 'color', 'mturk')
autoAPath = getPredictionsPath(seed, architecture, 'asymmetry', 'automated')
autoBPath = getPredictionsPath(seed, architecture, 'border', 'automated')
autoCPath = getPredictionsPath(seed, architecture, 'color', 'automated')
predictionsPaths = {
"studentA": studAPath,
"studentB": studBPath,
"studentC": studCPath,
"mturkA": crowdAPath,
"mturkB": crowdBPath,
"mturkC": crowdCPath,
"automatedA": autoAPath,
"automatedB": autoBPath,
"automatedC": autoCPath
}
return predictionsPaths
architectures = ['vgg16', 'resnet', 'inception']
annotationSources = ['student', 'mturk', 'automated']
seeds = [1970, 1972, 2008, 2019, 2020]
for architecture in architectures:
for annotationSource in annotationSources:
aucs = pd.DataFrame(columns=['seed', 'auc'])
for seed in seeds:
pathToPredictions = getPredictionsPathsForArchitecture(seed, architecture)
predictionsAsymmetryModel = pd.read_csv(pathToPredictions[annotationSource + 'A'])
predictionsBorderModel = pd.read_csv(pathToPredictions[annotationSource + 'B'])
predictionsColorModel = pd.read_csv(pathToPredictions[annotationSource + 'C'])
# USE AVERAGING
df = pandas.DataFrame()
df['A'] = predictionsAsymmetryModel['prediction']
df['B'] = predictionsBorderModel['prediction']
df['C'] = predictionsColorModel['prediction']
probabilities = (df['A'] + df['B'] + df['C']) / 3.0
auc = roc_auc_score(predictionsAsymmetryModel['true_label'], probabilities)
aucs = aucs.append({'seed': seed, 'auc': auc}, ignore_index=True)
report_auc(aucs, os.path.join(pipeline, 'out', 'aucs_' + architecture + '_' + annotationSource + '.csv'))
| [
"noreply@github.com"
] | noreply@github.com |
b592bfd26e518c213f887d4d3836f718c8a09754 | 4234dc363d0599e93abc1d9a401540ad67702b3b | /clients/client/python/test/test_ui_container.py | c3e92fd3828cd09328d2c9a7225f247880fd3b55 | [
"Apache-2.0"
] | permissive | ninjayoto/sdk | 8065d3f9e68d287fc57cc2ae6571434eaf013157 | 73823009a416905a4ca1f9543f1a94dd21e4e8da | refs/heads/master | 2023-08-28T03:58:26.962617 | 2021-11-01T17:57:24 | 2021-11-01T17:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.21
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.ui_nodes import UiNodes
from ory_client.model.ui_texts import UiTexts
globals()['UiNodes'] = UiNodes
globals()['UiTexts'] = UiTexts
from ory_client.model.ui_container import UiContainer
class TestUiContainer(unittest.TestCase):
"""UiContainer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUiContainer(self):
"""Test UiContainer"""
# FIXME: construct object with mandatory attributes with example values
# model = UiContainer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"3372410+aeneasr@users.noreply.github.com"
] | 3372410+aeneasr@users.noreply.github.com |
1430f7e16fa54fc01134cf993d1d04dd603472f9 | f185d465b4cfb87d1d6b909566299218ac81e0f9 | /hw6/solutions/ukf_slam.py | 3c406bf137f1a4ec30eccfebb74a7de1f41ef20a | [] | no_license | handsomeboy/FreiburgSLAM | 1d10d7762e041742ee235b88f56d47d43ef2528b | 16d4bbe6c9bcc0cf3c7a7acb02bd87409fb62d00 | refs/heads/master | 2020-04-16T09:42:55.623665 | 2019-01-12T10:17:42 | 2019-01-12T10:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,944 | py | import scipy
import main
def prediction(mu, sigma, u):
"""Updates the belief concerning the robot pose according to
the motion model (From the original MATLAB code: Use u.r1,
u.t, and u.r2 to access the rotation and translation values)
In this case u['r1'], u['t'] and u['r2'] to access values
Args:
mu ((2N+3, 1) numpy float array): state mean matrix
mean. N in this case is the number of landmarks
sigma ((2N+3, 2N+3) numpy float array): covariance matrix
u (dictionary): odometry reading (r1, t, r2)
Returns:
mu (numpy float array): updated mu by u
sigma (numpy float array): updated sigma by u
"""
# TODO: Compute the new mu based on the noise-free (odometry-based) motion model
# Remember to normalize theta after the update (hint: use the function normalize_angle available in tools)
# TODO: Compute the 3x3 Jacobian Gx of the motion model
# TODO: Construct the full Jacobian G
# Motion noise
motionNoise = 0.1
R3 = scipy.array([[motionNoise, 0., 0.],
[0., motionNoise, 0.],
[0., 0., motionNoise/10.]])
R = scipy.zeros((sigma.shape[0],sigma.shape[0]))
R[0:3,0:3] = R3
# TODO: Compute the predicted sigma after incorporating the motion
return mu, sigma
def correction(mu, sigma, z, mapout, scale):
"""Updates the belief, i. e., mu and sigma after observing
landmarks, according to the sensor model. The employed sensor
model measures the range and bearing of a landmark.
Args:
mu ((2N+3, 1) numpy float array): state mean matrix
The first 3 components of mu correspond to the current
estimate of the robot pose [x, y, theta] The current
pose estimate of the landmark with id = j is:
[mu[2*j+2], mu[2*j+3]]
sigma ((2N+3, 2N+3) numpy float array): covariance matrix
z: landmark observations.
Each observation z(i) has an id z(i).id, a range z(i).
range, and a bearing z(i).bearing. The vector observed
Landmarks indicates which landmarks have been observed
at some point by the robot.
observedLandmarks (boolean numpy array): new landmark
signifier. False if the landmark with id = j has never
been observed before.
Returns:
mu (numpy float array): updated mu
sigma (numpy float array): updated sigma
observedLandmarks (boolean numpy array): updated landmark
signifier.
"""
# For computing sigma
#global scale;
# Number of measurements in this time step
m = len(z['id'])
# Measurement noise
Q = 0.01*scipy.eye(2)
i = 0
lm = len(m)
while i < lm:#1:m
# If the landmark is observed for the first time:
if (scipy.sum(mapout == z[i].id) == 0):
# Add new landmark to the map
[mu, sigma, mapout] = main.add_landmark_to_map(mu,
sigma,
z[i],
mapout,
Q,
scale)
# The measurement has been incorporated so we quit the correction step
i = lm
else:
# Compute sigma points from the predicted mean and covariance
# This corresponds to line 6 on slide 32
sigma_points = main.compute_sigma_points(mu, sigma, scale)
# Normalize!
sigma_points[2,:] = main.normalize_angle(sigma_points[2,:])
# Compute lambda
n = len(mu)
num_sig = sigma_points.shape[1]
lam = scale - n
# extract the current location of the landmark for each sigma point
# Use this for computing an expected measurement, i.e., applying the h function
landmarkIndex = mapout == z[i].id
landmarkXs = sigma_points[2*landmarkIndex + 1, :]
landmarkYs = sigma_points[2*landmarkIndex + 2, :]
# TODO: Compute z_points (2x2n+1), which consists of predicted measurements from all sigma points
# This corresponds to line 7 on slide 32
# setup the weight vector for mean and covariance
wm = [lam/scale, scipy.tile(1/(2*scale), (1, 2*n))]
# TODO: Compute zm, line 8 on slide 32
# zm is the recovered expected measurement mean from z_points.
# It will be a 2x1 vector [expected_range; expected_bearing].
# For computing the expected_bearing compute a weighted average by
# summing the sines/cosines of the angle
# TODO: Compute the innovation covariance matrix S (2x2), line 9 on slide 32
# Remember to normalize the bearing after computing the difference
# TODO: Compute Sigma_x_z, line 10 on slide 32
# (which is equivalent to sigma times the Jacobian H transposed in EKF).
# sigma_x_z is an nx2 matrix, where n is the current dimensionality of mu
# Remember to normalize the bearing after computing the difference
# TODO: Compute the Kalman gain, line 11 on slide 32
# Get the actual measurement as a vector (for computing the difference to the observation)
z_actual = [z[i]['range'], z[i]['bearing']]
# TODO: Update mu and sigma, line 12 + 13 on slide 32
# normalize the relative bearing
# TODO: Normalize the robot heading mu(3)
# Don't touch this iterator
i = i + 1
return mu, sigma, observedLandmarks
| [
"icfaust@gmail.com"
] | icfaust@gmail.com |
e9ebc7aaca1f90e2f3771a9aa5a6dcfda029d314 | 762de1c66746267e05d53184d7854934616416ee | /tools/MolSurfGenService/MolSurfaceGen32/chimera/share/AddAttr/gui.py | 6e64524fe36c39941dcf1ec1fd3c40af7584d9e7 | [] | no_license | project-renard-survey/semanticscience | 6e74f5d475cf0ebcd9bb7be6bb9522cf15ed8677 | 024890dba56c3e82ea2cf8c773965117f8cda339 | refs/heads/master | 2021-07-07T21:47:17.767414 | 2017-10-04T12:13:50 | 2017-10-04T12:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,341 | py | # --- UCSF Chimera Copyright ---
# Copyright (c) 2000 Regents of the University of California.
# All rights reserved. This software provided pursuant to a
# license agreement containing restrictions on its disclosure,
# duplication and use. This notice must be embedded in or
# attached to all copies, including partial copies, of the
# software or any revisions or derivations thereof.
# --- UCSF Chimera Copyright ---
#
# $Id: gui.py 26655 2009-01-07 22:02:30Z gregc $
import chimera
from chimera import replyobj
from chimera.baseDialog import ModelessDialog
import Tkinter, Pmw
from OpenSave import OpenModeless
from AddAttr import addAttributes
class AddAttrDialog(OpenModeless):
title = "Define Attribute"
provideStatus = True
name = "add/change attrs"
help = "ContributedSoftware/defineattrib/defineattrib.html"
def __init__(self):
OpenModeless.__init__(self, clientPos='s', clientSticky='nsew',
historyID="AddAttr")
def Apply(self):
mols = self.molListBox.getvalue()
if not mols:
self.enter()
replyobj.error("No models chosen in dialog\n")
return
for path in self.getPaths():
setAttrs = addAttributes(path, models=mols,
log=self.doLog.get(),
raiseAttrDialog=self.openDialog.get())
if setAttrs == []:
replyobj.error("No attributes were set from"
" file %s\n" % path)
def fillInUI(self, parent):
OpenModeless.fillInUI(self, parent)
from chimera.widgets import MoleculeScrolledListBox
self.molListBox = MoleculeScrolledListBox(self.clientArea,
listbox_selectmode="extended",
labelpos="w", label_text="Restrict to models:")
self.molListBox.grid(row=0, column=0, sticky="nsew")
self.clientArea.rowconfigure(0, weight=1)
self.clientArea.columnconfigure(0, weight=1)
checkButtonFrame = Tkinter.Frame(self.clientArea)
checkButtonFrame.grid(row=1, column=0)
self.openDialog = Tkinter.IntVar(parent)
self.openDialog.set(True)
Tkinter.Checkbutton(checkButtonFrame, variable=self.openDialog,
text="Open Render/Select by Attribute").grid(
row=0, column=0, sticky='w')
self.doLog = Tkinter.IntVar(parent)
self.doLog.set(False)
Tkinter.Checkbutton(checkButtonFrame,
text="Send match info to Reply Log",
variable=self.doLog).grid(row=1, column=0, sticky='w')
from chimera import dialogs
dialogs.register(AddAttrDialog.name, AddAttrDialog)
| [
"alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5"
] | alex.gawronski@d60594c4-dda9-11dd-87d8-31aa04531ed5 |
bb18aefd362885d2dd59a4b42d6c8feec042ccd5 | 879677f51fed955b6f4c3c735546d43472540d4b | /parse_input.py | e69e689105c5f403364a853e07fcf8610febd008 | [] | no_license | bhaktipriya/Vanilla-Neural-Nets | 5a79f9bc2a213d5032c28cd4add28ec2255a8154 | 430fdc901a6b782073583e3be20d9ca1d48354f4 | refs/heads/master | 2021-01-11T15:20:10.180002 | 2017-01-29T08:08:43 | 2017-01-29T08:08:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | import numpy as np
import scipy.misc
import copy
import scipy.ndimage.interpolation
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def parse(filename):
content = [line.rstrip('\n') for line in open(filename)]
digits = {'0':[],'1':[],'2':[],'3':[],'4':[],'5':[],'6':[],'7':[],'8':[],'9':[]}
for ll in xrange(0,len(content),33):
#print ll
digit_and_label = copy.deepcopy(content[ll:ll+33])
digit_ = copy.deepcopy(np.zeros([32,32]))
digit_ = copy.deepcopy([list(line) for line in digit_])
for i in xrange(32):
for j in xrange(32):
if digit_and_label[i][j]=='1':
digit_[i][j] = int(255)
num = int(digit_and_label[32])
a = copy.deepcopy(np.array(digit_))
#print num
# print a.shape
res = copy.deepcopy(scipy.misc.imresize(a,(8,8),interp='nearest'))
#print res
for i in xrange(8):
for j in xrange(8):
if res[i][j]<=128:
res[i][j] = 0
else:
res[i][j] = 1
#print num, "====================="
res = copy.deepcopy(np.insert(res.flatten(),0,1))
#scipy.misc.imshow(res)
res=copy.deepcopy(res.tolist())
#print res
digits[str(num)].append(copy.deepcopy(res))
return digits
#print parse('train')
| [
"bhaktipriya96@gmail.com"
] | bhaktipriya96@gmail.com |
29131f57a53f289fa1acbf453e12bd04d8254414 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03971/s324739743.py | cf8d957b84e9ee6c1e59d6f5affdedeba9c06742 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | N, A, B = map(int, input().split())
S = input()
ac_count = 0
ac_b_count = 0
for s in S:
if ac_count < A + B:
if s == 'a':
print('Yes')
ac_count += 1
elif s == 'b' and ac_b_count < B:
print('Yes')
ac_count += 1
ac_b_count += 1
else:
print('No')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0e99241442ef4baba926c3f67fdd64a90cfc019f | d47be173a3ad7bdc3839fbefdafae8235fb2542f | /components/equipment.py | 430f8fdcdba1b58ce7a0f2d6b9d1b30626c6613f | [] | no_license | ignaoya/caster | ce4ccefc69bbc4a08412414c8ace1df20a427f9c | e64276707dbe85315571486494be8f48964b8dc1 | refs/heads/master | 2021-07-12T16:20:14.567858 | 2020-07-28T11:03:44 | 2020-07-28T11:03:44 | 177,807,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,801 | py | from equipment_slots import EquipmentSlots
class Equipment:
def __init__(self, main_hand=None, off_hand=None):
self.main_hand = main_hand
self.off_hand = off_hand
@property
def max_hp_bonus(self):
bonus = 0
if self.main_hand and self.main_hand.equippable:
bonus += self.main_hand.equippable.max_hp_bonus
if self.off_hand and self.off_hand.equippable:
bonus += self.main_hand.equippable.max_hp_bonus
return bonus
@property
def power_bonus(self):
bonus = 0
if self.main_hand and self.main_hand.equippable:
bonus += self.main_hand.equippable.power_bonus
if self.off_hand and self.off_hand.equippable:
bonus += self.off_hand.equippable.power_bonus
return bonus
@property
def defense_bonus(self):
bonus = 0
if self.main_hand and self.main_hand.equippable:
bonus += self.main_hand.equippable.defense_bonus
if self.off_hand and self.off_hand.equippable:
bonus += self.off_hand.equippable.defense_bonus
return bonus
@property
def max_mana_bonus(self):
bonus = 0
if self.main_hand and self.main_hand.equippable:
bonus += self.main_hand.equippable.max_mana_bonus
if self.off_hand and self.off_hand.equippable:
bonus += self.main_hand.equippable.max_mana_bonus
return bonus
@property
def max_focus_bonus(self):
bonus = 0
if self.main_hand and self.main_hand.equippable:
bonus += self.main_hand.equippable.max_focus_bonus
if self.off_hand and self.off_hand.equippable:
bonus += self.main_hand.equippable.max_focus_bonus
return bonus
def toggle_equip(self, equippable_entity):
results = []
slot = equippable_entity.equippable.slot
if slot == EquipmentSlots.MAIN_HAND:
if self.main_hand == equippable_entity:
self.main_hand = None
results.append({'dequipped': equippable_entity})
else:
if self.main_hand:
results.append({'dequipped': self.main_hand})
self.main_hand = equippable_entity
results.append({'equipped': equippable_entity})
elif slot == EquipmentSlots.OFF_HAND:
if self.off_hand == equippable_entity:
self.off_hand = None
results.append({'dequipped': equippable_entity})
else:
if self.off_hand:
results.append({'dequipped': self.off_hand})
self.off_hand = equippable_entity
results.append({'equipped': equippable_entity})
return results
| [
"ignaoya93@gmail.com"
] | ignaoya93@gmail.com |
b0ebcf408ec96db2f5de565245fba1fe6890b293 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/6e9ea2f74da3868e106375d8efe39de34707e2ee-<_check_result>-bug.py | 7dbf8d31b95766ecf041fb78095a44679e970643 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,414 | py |
def _check_result(x, fun, status, slack, con, lb, ub, tol, message):
'\n Check the validity of the provided solution.\n\n A valid (optimal) solution satisfies all bounds, all slack variables are\n negative and all equality constraint residuals are strictly non-zero.\n Further, the lower-bounds, upper-bounds, slack and residuals contain\n no nan values.\n\n Parameters\n ----------\n x : 1D array\n Solution vector to original linear programming problem\n fun: float\n optimal objective value for original problem\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n slack : 1D array\n The (non-negative) slack in the upper bound constraints, that is,\n ``b_ub - A_ub @ x``\n con : 1D array\n The (nominally zero) residuals of the equality constraints, that is,\n ``b - A_eq @ x``\n lb : 1D array\n The lower bound constraints on the original variables\n ub: 1D array\n The upper bound constraints on the original variables\n message : str\n A string descriptor of the exit status of the optimization.\n tol : float\n Termination tolerance; see [1]_ Section 4.5.\n\n Returns\n -------\n status : int\n An integer representing the exit status of the optimization::\n\n 0 : Optimization terminated successfully\n 1 : Iteration limit reached\n 2 : Problem appears to be infeasible\n 3 : Problem appears to be unbounded\n 4 : Serious numerical difficulties encountered\n\n message : str\n A string descriptor of the exit status of the optimization.\n '
tol = (np.sqrt(tol) * 10)
contains_nans = (np.isnan(x).any() or np.isnan(fun) or np.isnan(slack).any() or np.isnan(con).any())
if contains_nans:
is_feasible = False
else:
invalid_bounds = ((x < (lb - tol)).any() or (x > (ub + tol)).any())
invalid_slack = ((status != 3) and (slack < (- tol)).any())
invalid_con = ((status != 3) and (np.abs(con) > tol).any())
is_feasible = (not (invalid_bounds or invalid_slack or invalid_con))
if ((status == 0) and (not is_feasible)):
status = 4
message = 'The solution does not satisfy the constraints, yet no errors were raised and there is no certificate of infeasibility or unboundedness. This is known to occur if the `presolve` option is False and the problem is infeasible. If you encounter this under different circumstances, please submit a bug report. Otherwise, please enable presolve.'
elif ((status == 0) and contains_nans):
status = 4
message = "Numerical difficulties were encountered but no errors were raised. This is known to occur if the 'presolve' option is False, 'sparse' is True, and A_eq includes redundant rows. If you encounter this under different circumstances, please submit a bug report. Otherwise, remove linearly dependent equations from your equality constraints or enable presolve."
elif ((status == 2) and is_feasible):
raise ValueError(message)
return (status, message)
| [
"dg1732004@smail.nju.edu.cn"
] | dg1732004@smail.nju.edu.cn |
c2dc614ebb35d37b1f02d60a7a2b4379aa756714 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/eventgrid/v20200601/list_domain_shared_access_keys.py | 1f55623514778ec7b92005163dffba4572484403 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 2,637 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListDomainSharedAccessKeysResult',
'AwaitableListDomainSharedAccessKeysResult',
'list_domain_shared_access_keys',
]
@pulumi.output_type
class ListDomainSharedAccessKeysResult:
"""
Shared access keys of the Domain.
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> Optional[str]:
"""
Shared access key1 for the domain.
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> Optional[str]:
"""
Shared access key2 for the domain.
"""
return pulumi.get(self, "key2")
class AwaitableListDomainSharedAccessKeysResult(ListDomainSharedAccessKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDomainSharedAccessKeysResult(
key1=self.key1,
key2=self.key2)
def list_domain_shared_access_keys(domain_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDomainSharedAccessKeysResult:
"""
Use this data source to access information about an existing resource.
:param str domain_name: Name of the domain.
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['domainName'] = domain_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20200601:listDomainSharedAccessKeys', __args__, opts=opts, typ=ListDomainSharedAccessKeysResult).value
return AwaitableListDomainSharedAccessKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
d4d226e3bce3c4641da843ae4ccd09546e6dfbed | 7e3ddf4db2cc7007aec900894bf5a1fa26af683d | /mictnet/utils/metrics.py | 658c6b9642943493706eb12cb19aafdaab903adc | [
"Apache-2.0"
] | permissive | scenarios/MiCT-Net-PyTorch | b7a1524add0854332ae4d1ba3db976aa77d57288 | fdd72270f70cfe81c24e5cb1daa9df65ecc5b035 | refs/heads/master | 2022-04-07T09:37:42.459989 | 2020-02-18T15:44:32 | 2020-02-18T15:44:32 | 295,692,958 | 1 | 0 | Apache-2.0 | 2020-09-15T10:30:15 | 2020-09-15T10:30:14 | null | UTF-8 | Python | false | false | 1,153 | py | import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.div_(batch_size))
return res
| [
"noreply@github.com"
] | noreply@github.com |
aa0eb817d55c9b1f59e8368528f940d9451cafe1 | a5279a284802084e694be1d419dd816abfd87be4 | /C-IDK.py | d9522a87e45c2dc37f9c71645fad257be56dae16 | [] | no_license | raghunandan15102009/MOVE | ffbafdca0d341a5a1a3943063aa3c99c03503f41 | efb2f86bbe0cedfdbb050d6c7bb4e99308b0a566 | refs/heads/main | 2023-07-06T00:15:17.227518 | 2021-08-09T07:35:48 | 2021-08-09T07:35:48 | 394,195,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import dropbox
class TransferData:
def __init__(self, access_token):
self.access_token = access_token
def upload_file(self, file_from, file_to):
dbx = dropbox.Dropbox(self.access_token)
f = open(file_from, 'rb')
dbx.files_upload(f.read(), file_to)
def main():
access_token = 'sl.A2M0jU7ix4SP8kH1Ix3w2-eyvOuJoxwQzZhxVMSZDmnqbuD6miY7_F7KSvl2ZJQ_49c8-q4ajM_IlnXxbfIDgccQXMeuCPcxeohDMzxN7LjafVhaxdhM2OUz35Ity4P6LiX0-Tk'
transferData = TransferData(access_token)
file_from = input("C:\1. Office\Hack\Module 3\projects\C-98")
file_to = input("C:\1. Office\Hack\Module 3\projects\C-IDK ")
transferData.upload_file(file_from, file_to)
print("Just a sample")
main()
| [
"noreply@github.com"
] | noreply@github.com |
782df01ee7388692ea2870c9a5f8b636234f32e9 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/isosurface/colorbar/title/font/_color.py | 3d45a80274dd4a6a4d2f5e5279d0ca5accc9ccbe | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 497 | py | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='isosurface.colorbar.title.font',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
237b5db6e779a7de6c8b385bcac3bf982604e07e | 931aa9c6a44f86e86440c17de62801b26b66fce8 | /constance/LV/getLineUnbalanceAndLosses.py | f92c4871027b8e1d87960321b14354a1e8ea4bb7 | [] | no_license | Constancellc/epg-psopt | 3f1b4a9f9dcaabacf0c7d2a5dbc10947ac0e0510 | 59bdc7951bbbc850e63e813ee635474012a873a4 | refs/heads/master | 2021-06-08T11:33:57.467689 | 2020-04-01T13:19:18 | 2020-04-01T13:19:18 | 96,895,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,785 | py | import csv
import random
import copy
import numpy as np
import matplotlib.pyplot as plt
from lv_optimization_new import LVTestFeeder
import pickle
#outfile = '../../../Documents/simulation_results/LV/voltages.csv'
stem = '../../../Documents/ccModels/eulv/'
alpha = 0.328684513701
g = open(stem+'lnsYprims.pkl','rb')
data = pickle.load(g)
g.close()
# first get phases
lds = np.load('../../../Documents/ccModels/loadBuses/eulvLptloadBusesCc-24.npy')
lds = lds.flatten()[0]
phase = []
for i in range(len(lds)):
bus = lds['load'+str(i+1)]
if bus[-1] == '1':
phase.append('A')
elif bus[-1] == '2':
phase.append('B')
elif bus[-1] == '3':
phase.append('C')
# data is a dictionary where the key is the line number and it points to
# [bus a, bus b, Yprim]
# so we need to build up a dictionary of the voltages
a = np.load(stem+'eulvLptaCc060.npy')
My = np.load(stem+'eulvLptMyCc060.npy')
v0 = np.load(stem+'eulvLptV0Cc060.npy')
Y = np.load(stem+'eulvLptYbusCc060.npy')
Y = Y.flatten()[0]
Y = Y.conj()
YNodeOrder = np.load(stem+'eulvNmtYNodeOrderCc060.npy')
buses = []
for node in YNodeOrder:
buses = buses+[node.split('.')[0]]
def get_losses(Vtot):
losses = {}
for line in data:
data0 = data[line]
bus1 = data0[0]
bus2 = data0[1]
Yprim = data0[2]
idx1 = [i for i, x in enumerate(buses) if x == bus1]
idx2 = [i for i, x in enumerate(buses) if x == bus2]
Vidx = Vtot[idx1+idx2]
Iphs = Yprim.dot(Vidx)
Sinj = Vidx*(Iphs.conj())
Sloss = sum(Sinj)
losses[line] = [bus1,bus2,Sloss.real]
return losses
def get_unbalance(Vtot):
unbalance = {}
a = complex(-0.5,0.866)
A = np.array([[complex(1,0),complex(1,0),complex(1,0)],
[complex(1,0),a,a*a],
[complex(1,0),a*a,a]])
A = A*0.333
for line in data:
data0 = data[line]
bus1 = data0[0]
bus2 = data0[1]
Yprim = data0[2]
idx1 = [i for i, x in enumerate(buses) if x == bus1]
idx2 = [i for i, x in enumerate(buses) if x == bus2]
Vidx = Vtot[idx1+idx2]
Iphs = Yprim.dot(Vidx)
Is = np.matmul(A,Iphs[:3])
unbalance[line] = [bus1,bus2,abs(Is[0]),abs(Is[1]),abs(Is[2])]
return unbalance
fdr = LVTestFeeder('manc_models/1',1)
fdr.set_households_NR('../../../Documents/netrev/TC2a/03-Dec-2013.csv')
fdr.set_evs_MEA('../../../Documents/My_Electric_Avenue_Technical_Data/'+
'constance/ST1charges/')
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_no_evs = {}
ub_no_evs = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_losses(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_no_evs:
losses_no_evs[l] = 0
ub_no_evs[l] = [0]*3
losses_no_evs[l] += ls[l][2]
for i in range(3):
ub_no_evs[l][i] += ub[l][2+i]
fdr.uncontrolled()
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_unc = {}
ub_unc = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_unc:
losses_unc[l] = 0
ub_unc[l] = [0]*3
losses_unc[l] += ls[l][2]
for i in range(3):
ub_unc[l][i] += ub[l][2+i]
fdr.load_flatten()
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_lf = {}
ub_lf = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_lf:
losses_lf[l] = 0
ub_lf[l] = [0]*3
losses_lf[l] += ls[l][2]
for i in range(3):
ub_lf[l][i] += ub[l][2+i]
fdr.loss_minimise()
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_lm = {}
ub_lm = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_lm:
losses_lm[l] = 0
ub_lm[l] = [0]*3
losses_lm[l] += ls[l][2]
for i in range(3):
ub_lm[l][i] += ub[l][2+i]
fdr.balance_phase2(phase)
voltages = fdr.get_all_voltages(My,a,alpha,v0)
losses_p = {}
ub_p = {}
print(fdr.predict_losses())
for t in voltages:
ls = get_unbalance(voltages[t])
ub = get_unbalance(voltages[t])
for l in ls:
if l not in losses_p:
losses_p[l] = 0
ub_p[l] = [0]*3
losses_p[l] += ls[l][2]
for i in range(3):
ub_p[l][i] += ub[l][2+i]
for i in range(3):
with open('lv test/branch_'+str(i)+'.csv','w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['line','no evs','unc','lf','lm','p'])
for l in losses_unc:
writer.writerow([l,ub_no_evs[l][i],ub_unc[l][i],ub_lf[l][i],
ub_lm[l][i],ub_p[l][i]])
with open('lv test/branch_losses.csv','w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['line','no evs','unc','lf','lm','p'])
for l in losses_unc:
writer.writerow([l,losses_no_evs[l],losses_unc[l],losses_lf[l],
losses_lm[l],losses_p[l]])
'''
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/no_evs.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/uncontrolled.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/lf.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
busV = {}
for i in range(907):
busV[i+1] = [complex(0,0)]*3
for i in range(3):
busV[1][i] = v0[i]
for i in range(len(voltages)):
bn = int(i/3)+2
pn = i%3
busV[bn][pn] = voltages[i]
lineI = {}
for l in data:
b1 = data[l][0]
b2 = data[l][1]
Yp = data[l][2]
v_ = np.hstack((busV[int(b1)],busV[int(b2)]))
i = np.matmul(Yp,v_)[:3]
iT = 0
for ii in range(3):
iT += abs(i[ii]/1000)
lineI[l] = iT
with open('lv test/lm.csv','w') as csvfile:
writer = csv.writer(csvfile)
for l in lineI:
writer.writerow([l,lineI[l]])
# now I need to work out the line flows from the current injections
'''
| [
"constancellc@gmail.com"
] | constancellc@gmail.com |
b8709100989393624db5f2ddeddd1a9db4dfe293 | 1eaef7b2f8d138cfd377abc4427aa8dfeffca2ff | /QuickFTP.py | 4b5895298b2ba302b3c80db2293f3b788b5771df | [] | no_license | ratchet-Inc/QuickFTP_UE4_PY | 2ad326d41c0937cc0fec64c1f0bee6d97cf034d5 | 1f9d0f092297b74d68a12c6e584111ffc7155c4a | refs/heads/master | 2023-03-16T19:20:44.921600 | 2021-03-09T08:23:08 | 2021-03-09T08:23:08 | 333,596,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | import SocketController
class QuickFTP(object):
"""
description:- A simple file transfer class to send binary files over a socket.
"""
def __init__(self, dir: str, socket: SocketController.SocketController):
self.filesDirectory = dir
self.curFile = None
self.sock = socket
pass
pass
| [
"deniroturboman@hotmail.com"
] | deniroturboman@hotmail.com |
1e877888ec765a400293dfc038262acb74aba999 | 3baad9ca9756a8dbe6463df6e7f535aa2e0bffa3 | /{{ cookiecutter.site_name }}/{{ cookiecutter.main_module }}.py | 31d3b2b7913d472088c2dc695f0841b9d91b3e82 | [
"MIT"
] | permissive | brettcannon/python-azure-web-app-cookiecutter | 7fcaece747e7cef6d584c236aad4b842b63fa2f0 | e7a3fbc3a724b7bbde43eb5904881d2e0cc07c42 | refs/heads/master | 2023-07-12T07:10:22.594048 | 2017-02-27T20:00:29 | 2017-02-27T20:00:29 | 63,901,465 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | {% if cookiecutter.site_type == "socket" %}
"""An example HTTP server using sockets on Azure Web Apps."""
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import os
import sys
class PythonVersionHandler(BaseHTTPRequestHandler):
def do_GET(self):
charset = "utf-8"
self.send_response(200)
self.send_header("Content-type", "text/plain; charset={}".format(charset))
self.send_header("Content-Length", len(sys.version))
self.end_headers()
self.wfile.write(sys.version.encode(charset))
if __name__ == "__main__":
server_address = "127.0.0.1", int(os.environ.get("PORT", 5555))
server = HTTPServer(server_address, PythonVersionHandler)
server.serve_forever()
{% else %}
"""An example WSGI server on Azure Web Apps."""
import sys
def wsgi_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
yield sys.version.encode()
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 5555, wsgi_app)
httpd.serve_forever()
{% endif %}
| [
"brett@python.org"
] | brett@python.org |
233772e33dc528ca05905cbb759c6e51a5b29fe8 | 0cf55907660c0ea25079f6019cc98af12ccf4073 | /EditDistance.py | bf7d68c24e36ac66ef690c4a9543dfe5e00a5e2b | [] | no_license | WeiTang1/DailyCoding | 85c6276572acd501e1093c2e6258189078389935 | 2b6e5a0d974152562d82fff59232448fc5e32280 | refs/heads/master | 2020-04-04T11:02:17.982293 | 2019-02-11T04:11:04 | 2019-02-11T04:11:04 | 155,876,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | def editDistDP(str1, str2):
# Create a table to store results of subproblems
dp = [[0 for i in range(len(str2) + 1)] for _ in range(len(str1) + 1)]
# Fill d[][] in bottom up manner
for i in range(len(str1) + 1):
for j in range(len(str2) + 1):
# If first string is empty, only option is to
# insert all characters of second string
if i == 0:
dp[i][j] = j # Min. operations = j
# If second string is empty, only option is to
# remove all characters of second string
elif j == 0:
dp[i][j] = i # Min. operations = i
# If last characters are same, ignore last char
# and recur for remaining string
elif str1[i - 1] == str2[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
# If last character are different, consider all
# possibilities and find minimum
else:
dp[i][j] = 1 + min(dp[i][j - 1], # Insert
dp[i - 1][j], # Remove
dp[i - 1][j - 1]) # Replace
for d in dp:
print d
return dp[len(str1)][len(str2)]
print editDistDP("ab","acd")
| [
"donatshanghai@gmail.com"
] | donatshanghai@gmail.com |
d9dfa81dcdef58d13ec70d0d1b2dbd983f6dba11 | 8e6be7cac8eb2c95f2d659e6a2a78774cafe5826 | /InsaneLearner.py | 40c3b6da39f0f724b170a8d5371028561b3f4ac2 | [] | no_license | Neil621/Decision_Tree | bffa7baefa7b0100f98142a244a0f0405d594737 | 40dc2f425f5a20df7d50bfa82fe66b83590694bb | refs/heads/master | 2020-07-31T02:42:49.157094 | 2019-09-30T18:56:12 | 2019-09-30T18:56:12 | 210,456,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py |
import BagLearner as bl
import LinRegLearner as lrl
class InsaneLearner(object):
def __init__(self, verbose=False):
self.learner = bl.BagLearner(learner=bl.BagLearner, kwargs={"learner": lrl.LinRegLearner, "kwargs": {}, "bags": 20, "boost": False, "verbose": verbose}, bags=20, boost=False, verbose=verbose)
def author(self):
return "nwatt3"
def addEvidence(self, dataX, dataY):
self.learner.addEvidence(dataX, dataY)
def query(self, points):
return self.learner.query(points) | [
"neil.watt@ymail.com"
] | neil.watt@ymail.com |
a89d0a7db49b9c97787f5713a000415bb2870f84 | a97db7d2f2e6de010db9bb70e4f85b76637ccfe6 | /leetcode/743-Network-Delay-Time.py | 89a0689140f2f23f05b225a027399d92382c2f3c | [] | no_license | dongxiaohe/Algorithm-DataStructure | 34547ea0d474464676ffffadda26a92c50bff29f | a9881ac5b35642760ae78233973b1608686730d0 | refs/heads/master | 2020-05-24T20:53:45.689748 | 2019-07-19T03:46:35 | 2019-07-19T03:46:35 | 187,463,938 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | class Solution(object):
def networkDelayTime(self, times, N, K):
routes, seen, minHeap = collections.defaultdict(list), {}, []
for u, v, w in times:
routes[u].append([v, w])
heapq.heappush(minHeap, [0, K])
while minHeap:
time, tmpNode = heapq.heappop(minHeap)
if tmpNode not in seen:
seen[tmpNode] = time
for v, w in routes[tmpNode]:
heapq.heappush(minHeap, [time + w, v])
return max(seen.values()) if N == len(seen) else -1
| [
"ddong@zendesk.com"
] | ddong@zendesk.com |
f68f3963d1b07205e946987a8cdae6983f09b17b | b32fa26f60e71311a51055122a21fc908d4e9566 | /0x04-python-more_data_structures/3-common_elements.py | 4d0ebf389400a9e00ea67c424e45d465d8bc12a8 | [] | no_license | felipeserna/holbertonschool-higher_level_programming | 3ac4fdc91bf70477285994a1d41a72cd6987a277 | 9529bcdd50834569e25f1e0407922b3703807d45 | refs/heads/master | 2023-06-30T04:34:49.806549 | 2021-08-04T02:42:35 | 2021-08-04T02:42:35 | 259,475,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | #!/usr/bin/python3
def common_elements(set_1, set_2):
common = set_1 & set_2
return(common)
| [
"feserna86@gmail.com"
] | feserna86@gmail.com |
8cd013a5cfbea88a36682c33babb0f3b7dae5129 | b0c39c21ea63904d3e3c610a06c1e11b0a0c80d9 | /setup.py | 3998246ca1e02d4c827786524e5a89b7b902ab42 | [
"Apache-2.0"
] | permissive | kevenli/FeedIn | d9893d6f7c29d818460da875d5abcb5b9f25b958 | 9b45ba9090d279834ac59887a24154e6ac7f4593 | refs/heads/master | 2021-01-23T00:48:30.404336 | 2015-05-26T06:33:05 | 2015-05-26T06:33:05 | 27,056,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | from distutils.core import setup
from setuptools import find_packages
setup(name='FeedIn',
version='0.1',
author='Keven Li',
author_email='kevenli@users.noreply.github.com',
url='https://github.com/kevenli/FeedIn',
download_url='https://github.com/kevenli/FeedIn',
description='Web data fetching engine.',
long_description='A web data fetching engine which can be used in \
easy configuration and has multiple build-in modules.',
packages=find_packages(exclude=('tests', 'tests.*')),
provides=['feedin'],
keywords='web data python fetching',
license='Apache License, Version 2.0',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'lxml',
'BeautifulSoup',
],
) | [
"pbleester@gmail.com"
] | pbleester@gmail.com |
4cef21cb04ce0dbc44850ee4541694bc74724d45 | beae8c1491857a48c1d0cc4d0248f64eb43bd575 | /quest_data.py | 29feb4c61e3b4b23b0c3ec1d37496ed1a3d44e92 | [] | no_license | qazlal/Runescape_Quest_Manager | 838b852d3affbd5d18655da6425178b17a176432 | b53ca8f65c7ecba58ee2511a2d2176261ba14d77 | refs/heads/master | 2021-01-10T04:49:34.952778 | 2016-02-05T07:20:05 | 2016-02-05T07:20:05 | 50,892,538 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 15,689 | py | # quest ordering is mainly based off release date. the information was taken from
# http://runescape.wikia.com/wiki/List_of_quest_release_dates
# quests are owned by Jagex
''' 0: number
1: name
2: skill req
3: skill rec
4: quest req
5: quest rec
'''
quests = []
#this is an empty entry so that the indexing can start at 1 instead of 0
quests.append([0,'',{},{},[],[]])
quests.append([1,"Cook's Assistant",{},{},[],[]])
quests.append([2,'Demon Slayer',{},{},[],[]])
quests.append([3,'The Restless Ghost',{},{},[],[]])
quests.append([4,"Gunnar's Ground",{'CRAFTING':5},{},[],[]]) #used to be Romeo and Juliet
#note: this quest was released in between quest 166 and 167, but the indexing
#(at least on rs wiki) got messed up because sheep shearer and witch's potion
#became miniquests. because of that, i'm going to put these first two quests in
#the index where these discontinued quests would have gone, but this is
#arbitrary and is just to make sure the numbers turn out okay
# quests.append([5,'Discontinued: Sheep Shearer',{},{},[],[]])#now a miniquest
quests.append([5,'Quiet Before the Swarm',{'ATTACK':35,'STRENGTH':42},{},[9,88],[]])
quests.append([6,'Shield of Arrav',{},{},[],[]])
quests.append([7,'Ernest the Chicken',{},{},[],[]])
quests.append([8,'Vampyre Slayer',{},{},[],[]])
quests.append([9,'Imp Catcher',{},{},[],[]])
quests.append([10,'Stolen Hearts',{},{},[],[]])
quests.append([11,"What's Mine is Yours",{'SMITHING':5},{},[],[]])
quests.append([12,'The Death of Chivalry',{},{},[],[]])
#note: this quest was released in between quest 166 and 167, but the indexing
#(at least on rs wiki) got messed up because sheep shearer and witch's potion
#became miniquests. because of that, i'm going to put these first two quests in
#the index where these discontinued quests would have gone, but this is
#arbitrary and is just to make sure the numbers turn out okay
# quests.append([13,'Discontinued: Witch's Potion',{},{},[],[]])#now a miniquest
quests.append([13,'Love Story',{'MAGIC':77,'CONSTRUCTION':68,'SMITHING':68,'CRAFTING':67},{},[104,100],[]])
quests.append([14,"The Knight's Sword",{'MINING':10},{},[],[]])
quests.append([15,'Goblin Diplomacy',{},{},[],[]])
quests.append([16,"Pirate's Treasure",{},{},[],[]])
quests.append([17,'Dragon Slayer',{'QUEST POINTS':33},{'MAGIC':33,'PRAYER':43},[],[]])
quests.append([18,'Druidic Ritual',{},{},[],[]])
quests.append([19,'Lost City',{'CRAFTING':31,'WOODCUTTING':36},{},[],[]])
quests.append([20,"Witch's House",{},{},[],[]])
quests.append([21,"Merlin's Crystal",{},{},[],[]])
quests.append([22,"Heroes' Quest",{'COOKING':53,'FISHING':53,'HERBLORE':25,'MINING':50,'DEFENCE':25,
'QUEST POINTS':56},{},[6,19,17,21,18],[]])
quests.append([23,'Scorpion Catcher',{'PRAYER':31},{},[],[]])
quests.append([24,'Family Crest',{'CRAFTING':40,'SMITHING':40,'MINING':40,'MAGIC':59},{},[],[]])
quests.append([25,'Tribal Totem',{'THIEVING':21},{},[],[]])
quests.append([26,'Fishing Contest',{'FISHING':10},{},[],[]])
quests.append([27,"Monk's Friend",{},{},[],[]])
quests.append([28,'Temple of Ikov',{'THIEVING':42,'RANGED':40},{},[],[]])
quests.append([29,'Clock Tower',{},{},[],[]])
quests.append([30,'Holy Grail',{'ATTACK':30},{},[21],[]])
quests.append([31,'Tree Gnome Village',{},{},[],[]])
quests.append([32,'Fight Arena',{},{},[],[]])
quests.append([33,'Hazeel Cult',{},{},[],[]])
quests.append([34,'Sheep Herder',{},{},[],[]])
quests.append([35,'Plague City',{},{},[],[]])
quests.append([36,'Sea Slug',{'FIREMAKING':30},{},[],[]])
quests.append([37,'Waterfall Quest',{},{},[],[]])
quests.append([38,'Biohazard',{},{},[35],[]])
quests.append([39,'Jungle Potion',{'HERBLORE':3},{},[18],[]])
quests.append([40,'The Grand Tree',{},{},[],[31]])
quests.append([41,'Shilo Village',{'CRAFTING':20,'AGILITY':32},{'PRAYER':43},[39],[]])
quests.append([42,'Underground Pass',{'RANGED':25},{'MAGIC':34,'AGILITY':50,'PRAYER':43,'THIEVING':50},[38],[]])
quests.append([43,'Observatory Quest',{},{},[],[]])
quests.append([44,'The Tourist Trap',{'FLETCHING':10,'SMITHING':20},{},[],[]])
quests.append([45,'Watchtower',{'HERBLORE':14,'MAGIC':14,'TTHIEVING':15,'AGILITY':25,'MINING':40},{},[],[]])
quests.append([46,'Dwarf Cannon',{},{},[],[]])
quests.append([47,'Murder Mystery',{},{},[],[]])
quests.append([48,'The Dig Site',{'THIEVING':25,'AGILITY':10,'HERBLORE':10},{},[],[]])
quests.append([49,"Gertrude's Cat",{},{},[],[]])
quests.append([50,"Legends' Quest",{'AGILITY':50,'CRAFTING':50,'HERBLORE':45,'MINING':52,'PRAYER':42,
'SMITHING':50,'STRENGTH':50,'THIEVING':50,'WOODCUTTING':50,'QUEST POINTS':108},{'PRAYER':43},[24,22,41,42,37],[]])
quests.append([51,'Rune Mysteries',{},{},[],[]])
quests.append([52,'Big Chompy Bird Hunting',{'COOKING':30,'RANGED':30,'FLETCHING':5},{},[],[]])
quests.append([53,'Elemental Workshop I',{'MINING':20,'SMITHING':20,'CRAFTING':20},{},[],[]])
quests.append([54,'Priest in Peril',{},{},[],[]])
quests.append([55,'Nature Spirit',{},{'CRAFTING':18,'PRAYER':10},[3,54],[]])
quests.append([56,'Death Plateau',{},{},[],[]])
quests.append([57,'Troll Stronghold',{'AGILITY':15,'THIEVING':30},{},[56],[]])
quests.append([58,'Tai Bwo Wannai Trio',{'AGILITY':15,'FISHING':5,'COOKING':30},{},[39],[]])
quests.append([59,'Regicide',{'AGILITY':56,'CRAFTING':10},{},[42],[]])
quests.append([60,"Eadgar's Ruse",{'HERBLORE':31,'AGILITY':15},{},[57,18],[]])
quests.append([61,"Shades of Mort'ton",{'CRAFTING':20,'FIREMAKING':5,'HERBLORE':15},{},[],[]])
quests.append([62,'The Fremennik Trials',{'CRAFTING':40,'FLETCHING':25,'WOODCUTTING':40},{},[],[]])
quests.append([63,'Horror from the Deep',{'AGILITY':35},{'PRAYER':43,'CONSTITUTION':50},[],[]])
quests.append([64,'Throne of Miscellania',{'HERBLORE':35,'FARMING':10,'WOODCUTTING':45,'FISHING':40,'MINING':30},{},[22,62],[]])
quests.append([65,'Monkey Madness',{},{'PRAYER':43,'THIEVING':10},[40,31],[]])
quests.append([66,'Haunted Mine',{'CRAFTING':35},{'AGILITY':15},[54,55],[]])#need to double check that priest in peril is required
quests.append([67,'Troll Romance',{'AGILITY':28},{'MAGIC':61,'PRAYER':43},[57],[]])
quests.append([68,'In Search of the Myreque',{'AGILITY':25},{},[55],[]])
quests.append([69,'Creature of Fenkenstrain',{'CRAFTING':20,'THIEVING':25},{'SMITHING':20},[],[]])
quests.append([70,'Roving Elves',{},{},[37,59],[]])
quests.append([71,'Ghosts Ahoy',{'AGILITY':25,'COOKING':20},{},[54,3],[]])
quests.append([72,'One Small Favour',{'HERBLORE':18,'CRAFTING':25,'SMITHING':30,'AGILITY':36},{},[51,41],[]])
quests.append([73,'Mountain Daughter',{'AGILITY':20},{},[],[]])
quests.append([74,'Between a Rock...',{'DEFENCE':30,'MINING':40,'SMITHING':50},{},[46,26],[]])
quests.append([75,'The Feud',{'THIEVING':30},{},[],[]])
quests.append([76,'The Golem',{'CRAFTING':20,'THIEVING':25},{},[],[]])
quests.append([77,'Desert Treasure',{'SLAYER':10,'FIREMAKING':50,'MAGIC':50,'THIEVING':53,'MINING':50},{'AGILITY':47},[48,44,28,54,57,37],[]])
quests.append([78,"Icthlarin's Little Helper",{},{},[3,49,185],[]])
quests.append([79,'Tears of Guthix',{'FIREMAKING':49,'MINING':20,'CRAFTING':20,'QUEST POINTS':44},{'SMITHING':49,'CRAFTING':49},[],[]])
quests.append([80,'Zogre Flesh Eaters',{'SMITHING':4,'HERBLORE':8,'STRENGTH':20,'RANGED':30},{'FLETCHING':30},[39,52],[]])
quests.append([81,'The Lost Tribe',{'AGILITY':13,'MINING':17,'THIEVING':13},{},[15],[]])
quests.append([82,'The Giant Dwarf',{'CRAFTING':12,'FIREMAKING':16,'MAGIC':33,'THIEVING':14},{},[],[]])
quests.append([83,'Recruitment Drive',{},{},[18],[]])
quests.append([84,"Mourning's Ends Part I",{'RANGED':60,'THIEVING':50},{},[52,34,70],[]])
quests.append([85,'Forgettable Tale of a Drunken Dwarf ',{'COOKING':22,'FARMING':17},{},[82,26],[]])
quests.append([86,'Garden of Tranquillity',{'FARMING':25},{},[69],[]])
quests.append([87,'A Tail of Two Cats',{},{},[78],[]])
quests.append([88,'Wanted!',{'QUEST POINTS':33},{},[54,81,83],[71]])
quests.append([89,"Mourning's Ends Part II",{},{'PRAYER':43,'AGILITY':60},[84],[]])
quests.append([90,'Rum Deal',{'FARMING':40,'CRAFTING':42,'PRAYER':47,'FISHING':50,'SLAYER':42},{},[54,80],[]])
quests.append([91,'Shadow of the Storm',{'CRAFTING':30},{},[2,76],[]])
quests.append([92,'Making History',{},{'CRAFTING':24,'SMITHING':40,'MINING':40},[3,54],[]])#not sure about priest in peril
quests.append([93,'Ratcatchers',{},{'HERBLORE':54},[78],[]])
quests.append([94,'Spirits of the Elid',{'MAGIC':33,'RANGED':37,'MINING':37,'THIEVING':37},{},[],[]])
quests.append([95,'Devious Minds',{'SMITHING':65,'RUNECRAFTING':50,'FLETCHING':50},{},[88,11,57],[]])
quests.append([96,'The Hand in the Sand',{'THIEVING':17,'CRAFTING':49},{},[],[]])
quests.append([97,"Enakhra's Lament",{'CRAFTING':50,'FIREMAKING':45,'MAGIC':13},{'MINING':45},[],[]])#wiki for details on skills
quests.append([98,'Cabin Fever',{'AGILITY':42,'CRAFTING':45,'SMITHING':50,'RANGED':40},{},[16,90],[]])
quests.append([99,'Fairy Tale I - Growing Pains',{},{},[19,55],[39]])
quests.append([100,'Recipe for Disaster',{'COOKING':70,'MAGIC':59,'THIEVING':53,'FISHING':53,'MINING':52,
'CRAFTING':50,'FIREMAKING':50,'WOODCUTTING':50,'AGILITY':50,'RANGED':40,'HERBLORE':45,'FLETCHING':10,
'SLAYER':10,'SMITHING':40,'QUEST POINTS':176},{},[1,15,26,49,91,52,38,2,47,55,20,19,50,65,77,63],[]])#dont know how to deal with this one
# quests.append([101,'In Aid of the Myreque',{},{},[],[]])
# quests.append([102,"A Soul's Bane",{},{},[],[]])
# quests.append([103,'Rag and Bone Man',{},{},[],[]])
# quests.append([104,'Swan Song',{},{},[],[]])
# quests.append([105,'Royal Trouble',{},{},[],[]])
quests.append([106,'Death to the Dorgeshuun',{'THIEVING':23,'AGILITY':23},{'COMBAT':80},[81],[]])
# quests.append([107,'A Fairy Tale Part II',{},{},[],[]])
# quests.append([108,'Lunar Diplomacy',{},{},[],[]])
# quests.append([109,'The Eyes of Glouphrie',{},{},[],[]])
# quests.append([110,'Darkness of Hallowvale',{},{},[],[]])
quests.append([111,'The Slug Menace',{'CRAFTING':30,'RUNECRAFTING':30,'SLAYER':30,'THIEVING':30},{},[36,88],[]])
# quests.append([112,'Elemental Workshop II',{},{},[],[]])
# quests.append([113,"My Arm's Big Adventure",{},{},[],[]])
# quests.append([114,'Enlightened Journey',{},{},[],[]])
# quests.append([115,"Eagles' Peak",{},{},[],[]])
# quests.append([116,'Animal Magnetism',{},{},[],[]])
# quests.append([117,'Contact!',{},{},[],[]])
# quests.append([118,'Cold War',{},{},[],[]])
# quests.append([119,'The Fremennik Isles',{},{},[],[]])
# quests.append([120,'Tower of Life',{},{},[],[]])
quests.append([121,'The Great Brain Robbery',{'CRAFTING':16,'CONSTRUCTION':30,'PRAYER':50},{},[69,100,98],[]])
# quests.append([122,'What Lies Below',{},{},[],[]])
# quests.append([123,"Olaf's Quest",{},{},[],[]])
quests.append([124,'Another Slice of H.A.M.',{'ATTACK':15,'PRAYER':25},{},[48,82,106],[]])
# quests.append([125,'Dream Mentor',{},{},[],[]])
# quests.append([126,'Grim Tales',{},{},[],[]])
# quests.append([127,"King's Ransom",{},{},[],[]])
# quests.append([128,'The Path of Glouphrie',{},{},[],[]])
# quests.append([129,'Back to my Roots',{},{},[],[]])
quests.append([130,'Land of the Goblins',{'PRAYER':30,'AGILITY':36,'FISHING':36,'THIEVING':36,'HERBLORE':37},{},[124,26],[]])
# quests.append([131,'Dealing with Scabaras',{},{},[],[]])
# quests.append([132,'Wolf Whistle',{},{},[],[]])
# quests.append([133,'As a First Resort',{},{},[],[]])
# quests.append([134,'Catapult Construction',{},{},[],[]])
# quests.append([135,"Kennith's Concerns",{},{},[],[]])
# quests.append([136,'Legacy of Seergaze',{},{},[],[]])
# quests.append([137,'Perils of Ice Mountain',{},{},[],[]])
# quests.append([138,'TokTz-Ket-Dill',{},{},[],[]])
# quests.append([139,'Smoking Kills',{},{},[],[]])
quests.append([140,'Rocking Out',{'AGILITY':60,'THIEVING':63,'CRAFTING':66,'SMITHING':69},{},[121],[]])
# quests.append([141,'Spirit of Summer',{},{},[],[]])
# quests.append([142,'Meeting History',{},{},[],[]])
# quests.append([143,'All Fired Up',{},{},[],[]])
# quests.append([144,"Summer's End",{},{},[],[]])
# quests.append([145,'Defender of Varrock',{},{},[],[]])
# quests.append([146,'Swept Away',{},{},[],[]])
# quests.append([147,'While Guthix Sleeps',{},{},[],[]])#
# quests.append([148,'Myths of the White Lands',{},{},[],[]])
# quests.append([149,'In Pyre Need',{},{},[],[]])
quests.append([150,'The Chosen Commander',{'AGILITY':46,'STRENGTH':46,'THIEVING':46},{},[130],[]])
# quests.append([151,'Glorious Memories',{},{},[],[]])
# quests.append([152,'The Tale of the Muspah',{},{},[],[]])
# quests.append([153,'Missing My Mummy',{},{},[],[]])
# quests.append([154,'Hunt for Red Raktuber',{},{},[],[]])
# quests.append([155,'The Curse of Arrav',{},{},[],[]])
# quests.append([156,"Fur 'n' Seek",{},{},[],[]])
# quests.append([157,'Forgiveness of a Chaos Dwarf ',{},{},[],[]])
# quests.append([158,'Within the Light',{},{},[],[]])
# quests.append([159,'The Temple at Senntisten',{},{},[],[]])#
# quests.append([160,'Blood Runs Deep',{},{},[],[]])
# quests.append([161,"Nomad's Requiem",{},{},[],[]])
# quests.append([162,'Rune Mechanics',{},{},[],[]])
# quests.append([163,'The Blood Pact',{},{},[],[]])
# quests.append([164,'Buyers and Cellars',{},{},[],[]])
# quests.append([165,'Fairy Tale III - Orks Rift',{},{},[],[]])#
# quests.append([166,'Elemental Workshop III',{},{},[],[]])
# quests.append([167,'A Void Dance',{},{},[],[]])#see entries 5 and 13
# quests.append([168,'The Void Stares Back',{},{},[],[]])#see entires 5 and 13
# quests.append([169,'Do No Evil',{},{},[],[]])
# quests.append([170,'King of the Dwarves',{},{},[],[]])
# quests.append([171,'The Prisoner of Glouphrie',{},{},[],[]])
# quests.append([172,'Elemental Workshop IV',{},{},[],[]])
# quests.append([173,'A Clockwork Syringe',{},{},[],[]])
# quests.append([174,'Deadliest Catch',{},{},[],[]])
# quests.append([175,'Salt in the Wound',{},{},[],[]])
# quests.append([176,'The Branches of Darkmeyer',{},{},[],[]])
quests.append([177,'Ritual of the Mahjarrat',{'CRAFTING':76,'AGILITY':77,'MINING':76},{},[147,159,87,111,140,33,32,165,97],[]])
# quests.append([178,'One Piercing Note',{},{},[],[]])
# quests.append([179,"The Firemaker's Curse",{},{},[],[]])#
# quests.append([180,'Let Them Eat Pie',{},{},[],[]])
# quests.append([181,'The Elder Kiln',{},{},[],[]])
# quests.append([182,'Song from the Depths',{},{},[],[]])
# quests.append([183,'Carnillean Rising',{},{},[],[]])
# quests.append([184,'Some Like it Cold',{},{},[],[]])
quests.append([185,'Diamond in the Rough',{},{},[10],[]])#isn't this just a remake??
# quests.append([186,'Rune Memories',{},{},[],[]])
# quests.append([187,'The Brink of Extinction',{},{},[],[]])
# quests.append([188,'The World Wakes',{},{},[],[]])#
# quests.append([189,'Bringing Home the Bacon',{},{},[],[]])
# quests.append([190,'Birthright of the Dwarves',{},{},[],[]])
quests.append([191,'Missing, Presumed Death',{},{},[],[188,150]])
# quests.append([192,'One of a Kind',{},{},[],[]])
quests.append([193,'Fate of the Gods',{'SUMMONING':67,'AGILITY':73,'DIVINATION':75,'SLAYER':76,'MAGIC':79},{},[191],[188,179,177]])
# quests.append([194,'A Shadow over Ashdale',{},{},[],[]])
# quests.append([195,'The Mighty Fall',{},{},[],[]])
# quests.append([196,"Plague's End",{},{},[],[]])
# quests.append([197,'Broken Home',{},{},[],[]])
# quests.append([198,'Heart of Stone',{},{},[],[]])
# quests.append([199,'Dishonour among Thieves',{},{},[],[]])
# quests.append([200,'Dimension of Disaster',{},{},[],[]])
# quests.append([201,"Hero's Welcome",{},{},[],[]])
# quests.append([202,'The Light Within',{},{},[],[]])
# quests.append([203,'The Lord of Vampyrium',{},{},[],[]])
# quests.append([204,'Call of the Ancestors',{},{},[],[]])
# quests.append([205,'Beneath Cursed Tides',{},{},[],[]])
# quests.append([206,'',{},{},[],[]])
# quests.append([0,'',{},{},[],[]])
| [
"qazlal.the.shark@gmail.com"
] | qazlal.the.shark@gmail.com |
2e17e9328da49803d12a9e44b705c42f7ddaded9 | 6eca5db3070fb8d0b21bf9a627983fa1448ab020 | /kata/5kyu/pagination_helper.py | de8a5ba3549beaff4b4e912d115c8afb84d430ca | [] | no_license | stevenedixon/codewars-python | 9f1e20e8afa064da82d2492a8e93fd736b78c58f | 8bf393751a4417ed6a0f2d24542b58ccb3897067 | refs/heads/master | 2021-02-03T21:56:26.896727 | 2020-03-11T15:17:59 | 2020-03-11T15:17:59 | 243,552,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,585 | py | # For this exercise you will be strengthening your page-fu mastery. You will complete the PaginationHelper class,
# which is a utility class helpful for querying paging information related to an array.
#
# The class is designed to take in an array of values and an integer indicating how many items will be allowed per
# each page. The types of values contained within the collection/array are not relevant.
#
# The following are some examples of how this class is used:
#
# helper = PaginationHelper(['a','b','c','d','e','f'], 4)
# helper.page_count # should == 2
# helper.item_count # should == 6
# helper.page_item_count(0) # should == 4
# helper.page_item_count(1) # last page - should == 2
# helper.page_item_count(2) # should == -1 since the page is invalid
#
# # page_ndex takes an item index and returns the page that it belongs on
# helper.page_index(5) # should == 1 (zero based index)
# helper.page_index(2) # should == 0
# helper.page_index(20) # should == -1
# helper.page_index(-10) # should == -1 because negative indexes are invalid
import math
class PaginationHelper:
# The constructor takes in an array of items and a integer indicating
# how many items fit within a single page
def __init__(self, collection, items_per_page):
self.collection = collection
self.items_per_page = items_per_page
# returns the number of items within the entire collection
def item_count(self):
return len(self.collection)
# returns the number of pages
def page_count(self):
return math.ceil(len(self.collection) / self.items_per_page)
# returns the number of items on the current page. page_index is zero based
# this method should return -1 for page_index values that are out of range
def page_item_count(self, page_index):
if page_index >= self.page_count():
return -1
elif page_index == self.page_count() - 1:
return len(self.collection) % self.items_per_page or self.items_per_page
else:
return self.items_per_page
# determines what page an item is on. Zero based indexes.
# this method should return -1 for item_index values that are out of range
def page_index(self, item_index):
if item_index >= len(self.collection) or item_index < 0:
return -1
else:
return math.floor(item_index / self.items_per_page)
# Basic Tests
collection = range(1, 25)
helper = PaginationHelper(collection, 10)
print(helper.page_count()) # 3
print(helper.page_index(23)) # 2
print(helper.item_count()) # 24
| [
"stevenedixon@outlook.com"
] | stevenedixon@outlook.com |
37c24b3960134c61b5a8710012b9ad3ebf8a62fe | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Python/Scripts/Auto py to exe/build/lib/auto_py_to_exe/dialogs.py | 08ced7a66201b6e9c57607cc3cabb9a7329be462 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:17632a1084b74f79b082631a021c864a01bee63a94b1fb5768945e30f05a405b
size 2899
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
7461b94a60fcbe15ed116a2853262476e06aaafd | c06d18ac5b87b3b82fc486454c422b119d6c1ee9 | /src/demo/_tensorflow/linear/linear.py | 70f197e8d2ad5074603c813b803127c0355fe803 | [
"MIT"
] | permissive | tangermi/nlp | b3a4c9612e6049463bf12bc9abb7aff06a084ace | aa36b8b20e8c91807be73a252ff7799789514302 | refs/heads/master | 2022-12-09T12:33:15.009413 | 2020-04-03T04:03:24 | 2020-04-03T04:03:24 | 252,056,010 | 0 | 0 | null | 2022-12-08T07:26:55 | 2020-04-01T02:55:05 | Jupyter Notebook | UTF-8 | Python | false | false | 1,092 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
class Linear(tf.keras.Model):
def __init__(self):
super().__init__()
self.dense = tf.keras.layers.Dense(
units=1,
activation=None,
kernel_initializer=tf.zeros_initializer(),
bias_initializer=tf.zeros_initializer()
)
def call(self, input):
output = self.dense(input)
return output
if __name__ == '__main__':
X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
y = tf.constant([[10.0], [20.0]])
model = Linear()
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
for i in range(100):
with tf.GradientTape() as tape:
y_pred = model(X) # 调用模型 y_pred = model(X) 而不是显式写出 y_pred = a * X + b
loss = tf.reduce_mean(tf.square(y_pred - y))
grads = tape.gradient(loss, model.variables) # 使用 model.variables 这一属性直接获得模型中的所有变量
optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
print(model.variables)
| [
"n10057862@qut.edu.au"
] | n10057862@qut.edu.au |
4750d23eda0e90ffef0ab83bf69fdc986388f472 | 9901588400e1755fb050421ceb1a25ddcd78ac50 | /PythonSS/pdfminer/encodingdb.py | a6d4c3abd46779ce954a119f021ef928f5d6bd81 | [] | no_license | COMP3001D/Python | 95b4f3338f6ef9578008bd2cc23631231d0e09ad | abd542998564a7fa2396cd7f5e41da8f23c1e7a0 | refs/heads/master | 2019-01-21T23:40:27.124220 | 2013-01-09T13:27:32 | 2013-01-09T13:27:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | #!/usr/bin/env python2
import re
from psparser import PSLiteral
from glyphlist import glyphname2unicode
from latin_enc import ENCODING
## name2unicode
##
STRIP_NAME = re.compile(r'[0-9]+')
def name2unicode(name):
"""Converts Adobe glyph names to Unicode numbers."""
if name in glyphname2unicode:
return glyphname2unicode[name]
m = STRIP_NAME.search(name)
if not m: raise KeyError(name)
return unichr(int(m.group(0)))
## EncodingDB
##
class EncodingDB(object):
std2unicode = {}
mac2unicode = {}
win2unicode = {}
pdf2unicode = {}
for (name,std,mac,win,pdf) in ENCODING:
c = name2unicode(name)
if std: std2unicode[std] = c
if mac: mac2unicode[mac] = c
if win: win2unicode[win] = c
if pdf: pdf2unicode[pdf] = c
encodings = {
'StandardEncoding': std2unicode,
'MacRomanEncoding': mac2unicode,
'WinAnsiEncoding': win2unicode,
'PDFDocEncoding': pdf2unicode,
}
@classmethod
def get_encoding(klass, name, diff=None):
cid2unicode = klass.encodings.get(name, klass.std2unicode)
if diff:
cid2unicode = cid2unicode.copy()
cid = 0
for x in diff:
if isinstance(x, int):
cid = x
elif isinstance(x, PSLiteral):
try:
cid2unicode[cid] = name2unicode(x.name)
except KeyError:
pass
cid += 1
return cid2unicode
| [
"amrinderjit9211@yahoo.co.uk"
] | amrinderjit9211@yahoo.co.uk |
32ce5b17ea9bcb319e977bff38ba4db7022d77ea | a4aa9c52276db10001a54881fc2644e1872038ad | /the_wall_app/migrations/0002_comment_message.py | 4c05fd55c91e5b91235494a6b0ff1ce4ed9f4f07 | [] | no_license | ANDREW-LIVINGSTON/the_wall | 2549e6115abe65d43c87e04e2c7e35771deffcd4 | 9d41278ee48e9b4c9400f975968453e9807a1862 | refs/heads/main | 2023-03-29T19:34:32.287966 | 2021-03-26T02:22:17 | 2021-03-26T02:22:17 | 351,636,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | # Generated by Django 2.2.4 on 2021-03-25 18:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('the_wall_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='the_wall_app.User')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('message', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='the_wall_app.Message')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='the_wall_app.User')),
],
),
]
| [
"andrewlivingston@MacBook-Pro.attlocal.net"
] | andrewlivingston@MacBook-Pro.attlocal.net |
16570936b50459644a2f3a4f4eef94df97b616a0 | e198950478bea292ba0ed44c07278ea1c7e8ddcd | /show.py | cf57e226061215b08349e810c4d663ffca2eac02 | [] | no_license | ric-bianchi/radiation | 9ec1fc7d3dcfc27927e98ec9679afca67ccba4e3 | 9c4f14cef27d28d1579652e405c93c1c633c1556 | refs/heads/master | 2021-05-12T06:55:02.858656 | 2016-07-08T16:14:34 | 2016-07-08T16:14:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from astro_pi import AstroPi
ap = AstroPi()
def draw(rate,subtracted):
ap.show_message(str(rate), scroll_speed=1, text_colour=[255,0,0]) # red for count rate
ap.show_message(str(subtracted), scroll_speed=1, text_colour=[0,0,255]) #blue for faulty/noisy pixels
def calibrateMessage():
ap.show_letter("C") | [
"david.honess+github@gmail.com"
] | david.honess+github@gmail.com |
ef3126368dbc5fb7408a2d35f7fc575b6e8fb814 | 5aee5e9274aad752f4fc1940030e9844ef8be17d | /HeavyIonsAnalysis/JetAnalysis/python/jets/akPu7CaloJetSequence_pPb_jec_cff.py | d5e8f0b11759a74be3f22036f437b49b4dd08852 | [] | no_license | jiansunpurdue/5316_dmesonreco_hiforest | 1fb65af11ea673646efe1b25bd49e88de9bf3b44 | a02224ad63160d91aab00ed2f92d60a52f0fd348 | refs/heads/master | 2021-01-22T02:53:43.471273 | 2014-04-26T16:10:12 | 2014-04-26T16:10:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,574 | py |
import FWCore.ParameterSet.Config as cms
from PhysicsTools.PatAlgos.patHeavyIonSequences_cff import *
from HeavyIonsAnalysis.JetAnalysis.inclusiveJetAnalyzer_cff import *
akPu7Calomatch = patJetGenJetMatch.clone(
src = cms.InputTag("akPu7CaloJets"),
matched = cms.InputTag("ak7HiGenJetsCleaned")
)
akPu7Caloparton = patJetPartonMatch.clone(src = cms.InputTag("akPu7CaloJets"),
matched = cms.InputTag("genParticles")
)
akPu7Calocorr = patJetCorrFactors.clone(
useNPV = False,
# primaryVertices = cms.InputTag("hiSelectedVertex"),
levels = cms.vstring('L2Relative','L3Absolute'),
src = cms.InputTag("akPu7CaloJets"),
payload = "AKPu7Calo_HI"
)
akPu7CalopatJets = patJets.clone(jetSource = cms.InputTag("akPu7CaloJets"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu7Calocorr")),
genJetMatch = cms.InputTag("akPu7Calomatch"),
genPartonMatch = cms.InputTag("akPu7Caloparton"),
jetIDMap = cms.InputTag("akPu7CaloJetID"),
addBTagInfo = False,
addTagInfos = False,
addDiscriminators = False,
addAssociatedTracks = False,
addJetCharge = False,
addJetID = False,
getJetMCFlavour = False,
addGenPartonMatch = True,
addGenJetMatch = True,
embedGenJetMatch = True,
embedGenPartonMatch = True,
embedCaloTowers = False,
embedPFCandidates = False
)
akPu7CaloJetAnalyzer = inclusiveJetAnalyzer.clone(jetTag = cms.InputTag("akPu7CalopatJets"),
genjetTag = 'ak7HiGenJetsCleaned',
rParam = 0.7,
matchJets = cms.untracked.bool(False),
matchTag = 'akPu7PFpatJets',
pfCandidateLabel = cms.untracked.InputTag('particleFlowTmp'),
trackTag = cms.InputTag("generalTracks"),
fillGenJets = True,
isMC = True,
genParticles = cms.untracked.InputTag("genParticles"),
eventInfoTag = cms.InputTag("generator")
)
akPu7CaloJetSequence_mc = cms.Sequence(
akPu7Calomatch
*
akPu7Caloparton
*
akPu7Calocorr
*
akPu7CalopatJets
*
akPu7CaloJetAnalyzer
)
akPu7CaloJetSequence_data = cms.Sequence(akPu7Calocorr
*
akPu7CalopatJets
*
akPu7CaloJetAnalyzer
)
akPu7CaloJetSequence_jec = akPu7CaloJetSequence_mc
akPu7CaloJetSequence_mix = akPu7CaloJetSequence_mc
akPu7CaloJetSequence = cms.Sequence(akPu7CaloJetSequence_jec)
akPu7CaloJetAnalyzer.genPtMin = cms.untracked.double(1)
| [
"sun229@purdue.edu"
] | sun229@purdue.edu |
4c7db02e19474a523a3347f94899ab002bba224e | f36ddb95c743c89bfc4bdca3507a5f02337343ba | /Test/lib/data_process.py | 318255c6a8c14cc313a3d30d49521d8f27947d19 | [] | no_license | nanahebao/Test | b6b151cbc7de0347b70179eb8aee87dec25ff5b5 | c031caaa3a096e0ecab49233954258f50b41b75a | refs/heads/master | 2020-06-06T07:56:48.886521 | 2019-06-19T07:47:11 | 2019-06-19T07:48:39 | 192,681,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # encoding=utf-8
import json
from lib.file_process import read_file
def to_json(content):
# print(type(content))
if isinstance(content, str):
return json.loads(content)
elif isinstance(content, bytes):
content = content.decode('utf-8')
# print('type after decode',type(content))
return json.loads(content)
else:
print("don't support this type:", type(content))
if __name__ == '__main__':
file = '/Users/yangcaihua/Documents/Dev/Test/personas/test_positive.json'
rs = read_file(file)
print(to_json(rs)) | [
"liuna@nexbrio.com"
] | liuna@nexbrio.com |
ee15d8adddc7d8636aa76e3443668b9c91ffc9b0 | fdaed901af8cbc0347f53146b53f021869e9872d | /documentum_app/admin.py | 5f10ea5ae4f9f4ef2dfd39cb1771838d5ebf788a | [] | no_license | surajprasadgupta/Documentum | 92e29220ac51ae6c2fbe96c751d6cd8e2cfc4edd | f1931ddf05c83ed0602b02d483cec158d123072b | refs/heads/master | 2022-11-30T12:24:01.591843 | 2019-11-26T10:25:43 | 2019-11-26T10:25:43 | 211,777,874 | 0 | 0 | null | 2022-11-22T04:37:14 | 2019-09-30T04:53:08 | CSS | UTF-8 | Python | false | false | 188 | py | from django.contrib import admin
from documentum_app.models import *
# Register your models here.
admin.site.register(DocumentumFiles)
admin.site.register(Tags)
admin.site.register(Post)
| [
"spgupta082@gmail.com"
] | spgupta082@gmail.com |
9c541ff8948b8d049f61e4e3e61cfa30a9bb0056 | 33170e7fc26b6af2ab61b67aa520c307bbd0e118 | /py/trash/947_predict_0228-4.py | 09ef21e955ea5f5f8ebc8ba007660cc1fa85d498 | [
"MIT"
] | permissive | alaskaw/Microsoft-Malware-Prediction | 26e56adb803184328d1a8f5a3423d5edda7fc400 | 103cbf7c4fc98ae584e1aa9d1c220bb79ddbbd80 | refs/heads/master | 2020-04-28T21:22:06.403542 | 2019-03-14T04:36:01 | 2019-03-14T04:36:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,407 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 16:52:33 2019
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from sklearn.externals import joblib
from sklearn.metrics import roc_auc_score
import utils , utils_cat
utils.start(__file__)
#==============================================================================
SUBMIT_FILE_PATH = '../output/0228-4.csv.gz'
COMMENT = 'nejumi + f009 f014 top50(f019)'
EXE_SUBMIT = True
SEED = np.random.randint(9999)
print('SEED:', SEED)
param = {
'boosting_type': 'gbdt',
'class_weight': None,
'colsample_bytree': 0.71,
'learning_rate': 0.05,
'max_depth': -1,
'min_child_samples': 10,
'min_child_weight': 5,
'min_split_gain': 0,
# 'n_estimators': n_estimators,
'n_jobs': -1,
'num_leaves': 64,
'objective': 'binary',
# 'random_state': seed,
'reg_alpha': 0,
'reg_lambda': 0,
'subsample': 0.71,
'subsample_for_bin': 50000,
'subsample_freq': 1,
'max_bin': 255,
'metric': 'auc',
'nthread': -1,
'verbose': -1,
# 'seed': seed,
# 'device': 'gpu',
# 'gpu_use_dp': False
}
NROUND = 19999
NFOLD = 5
VERBOSE_EVAL = 100
ESR = 100
col_drop = [
'Census_SystemVolumeTotalCapacity',
]
USE_PREF_f019 = ['f019']
feature_f019 = pd.read_csv('LOG/imp_f019.csv').head(50).feature.tolist()
USE_PREF_all = ['f009', 'f014']
RESULT_DICT = {}
RESULT_DICT['file'] = SUBMIT_FILE_PATH
# =============================================================================
# def
# =============================================================================
def get_files(search:str, prefs:list):
files = sorted(glob(search))
# USE_PREF
li = []
for i in files:
for j in prefs:
if j in i:
li.append(i)
break
files = li
[print(i,f) for i,f in enumerate(files)]
return files
# =============================================================================
# load
# =============================================================================
files_tr_f019 = get_files('../data/train_f*.f', USE_PREF_f019)
X_train_f019 = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr_f019, mininterval=30)
], axis=1)[feature_f019]
files_tr_all = get_files('../data/train_f*.f', USE_PREF_all)
X_train_all = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr_all, mininterval=30)
], axis=1)
X_train = pd.concat([X_train_f019, X_train_all, joblib.load('../external/X_train_nejumi.pkl.gz')],
axis=1)
del X_train_f019, X_train_all; gc.collect()
y_train = utils.load_target()['HasDetections']
# drop
if len(col_drop) > 0:
X_train.drop(col_drop, axis=1, inplace=True)
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
gc.collect()
CAT = list( set(X_train.columns)&set(utils_cat.ALL))
print(f'CAT: {CAT}')
COL = X_train.columns.tolist()
RESULT_DICT['feature size'] = len(COL)
RESULT_DICT['category feature size'] = len(CAT)
# =============================================================================
# all sample
# =============================================================================
dtrain = lgb.Dataset(X_train, y_train.values,
categorical_feature=CAT,
free_raw_data=False)
gc.collect()
#models = []
#for i in range(LOOP):
# param['seed'] = np.random.randint(9999)
# model = lgb.train(params=param, train_set=dtrain,
# num_boost_round=NROUND,
# )
# model.save_model(f'../data/lgb{i}.model')
# models.append(model)
# CV
param['seed'] = np.random.randint(9999)
ret, models = lgb.cv(param, dtrain, NROUND,
nfold=NFOLD,
stratified=True, shuffle=True,
feval=ex.eval_auc,
early_stopping_rounds=ESR,
verbose_eval=VERBOSE_EVAL,
categorical_feature=CAT,
seed=SEED)
for i, model in enumerate(models):
model.save_model(f'../data/lgb{i}.model')
#models = []
#for i in range(LOOP):
# model = lgb.Booster(model_file=f'../data/lgb{i}.model')
# models.append(model)
imp = ex.getImp(models)
imp['split'] /= imp['split'].max()
imp['gain'] /= imp['gain'].max()
imp['total'] = imp['split'] + imp['gain']
imp.sort_values('total', ascending=False, inplace=True)
imp.reset_index(drop=True, inplace=True)
imp.to_csv(f'LOG/imp_{__file__}.csv', index=False)
utils.savefig_imp(imp, f'LOG/imp_{__file__}.png', x='total')
RESULT_DICT['nfold'] = NFOLD
RESULT_DICT['seed'] = SEED
RESULT_DICT['eta'] = param['learning_rate']
RESULT_DICT['NROUND'] = NROUND
RESULT_DICT['train AUC'] = ret['auc-mean'][-1]
del dtrain, X_train, y_train; gc.collect()
# =============================================================================
# test
# =============================================================================
files_te = get_files('../data/test_f*.f', USE_PREF_f019+USE_PREF_all)
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=30)
]+[joblib.load('../external/X_test_nejumi.pkl.gz')], axis=1)[COL]
gc.collect()
if X_test.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_test.columns[X_test.columns.duplicated()] }')
print('no dup :) ')
print(f'X_test.shape {X_test.shape}')
y_pred = pd.Series(0, index=X_test.index)
for model in tqdm(models):
y_pred += pd.Series(model.predict(X_test)).rank()
y_pred /= y_pred.max()
sub = pd.read_csv('../input/sample_submission.csv.zip')
sub['HasDetections'] = y_pred.values
print('corr with best')
sub_best = pd.read_csv(utils.SUB_BEST)
print('with mybest:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
sub_best['HasDetections'] = np.load(utils.SUB_nejumi)
print('with nejumi:', sub['HasDetections'].corr( sub_best['HasDetections'], method='spearman') )
print("""
# =============================================================================
# write down these info to benchmark.xlsx
# =============================================================================
""")
[print(f'{k:<25}: {RESULT_DICT[k]}') for k in RESULT_DICT]
print("""
# =============================================================================
""")
# save
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
#utils.to_pkl_gzip(sub[['HasDetections']], SUBMIT_FILE_PATH.replace('.csv.gz', f'_{SEED}.pkl'))
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
#==============================================================================
utils.end(__file__)
#utils.stop_instance()
| [
"luvsic02@gmail.com"
] | luvsic02@gmail.com |
7c147996d68715789c74b3b96e8b0caa93ce93b5 | 84ef17f09c47fdef4619c40242f15e96ceff693e | /programas_ramificados.py | 4a38e1c147355f5acc3b14192e7252065086b693 | [] | no_license | nicolo0312/curso-pensamiento-computacional | c3bc4782677f28c6730a808652b43d02cd5dd971 | 4d3af6adbe3cacbf924319b34a3743a256894fd3 | refs/heads/main | 2023-03-31T23:37:17.402616 | 2021-04-09T15:15:52 | 2021-04-09T15:15:52 | 356,312,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | numero_1 = int(input('Escoge un entero: '))
numero_2 = int(input('Escoge otro entero: '))
if numero_1 < numero_2:
print('El primer numero es menor que el segundo')
elif numero_1 > numero_2:
print('El primer numero es mayor que el segundo')
elif numero_1 == numero_2:
print('El primer numero es igual al segundo')
| [
"moranni@globalhitss.com"
] | moranni@globalhitss.com |
8771d96e92a6351aa1051fd247148c3df97ae325 | f27996a45d59afbd9619f2cb92639e088e6bea3c | /python/geodjango/fishtracking_receivers/manage.py | 5718fae7f83fc3162c599c88b338320c96e1adb6 | [] | no_license | bopopescu/snippets | d7e689b5c74207f716b0f9c57a342b86662f39a5 | 1924cd8c7938dc32b6c1a50137cc7f053d4aafb2 | refs/heads/master | 2021-05-31T12:04:26.588555 | 2016-05-04T14:05:26 | 2016-05-04T14:05:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fishtracking_receivers.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"bart.aelterman@gmail.com"
] | bart.aelterman@gmail.com |
ec54451f6f85a1afd92fbcec989020e9e73f4419 | 875e1271fe2cdf87545bc24da31851088da58418 | /tests/caffe_test_runner.py | ad002885401bd3f7f9eba88f86697e4311f43cec | [
"Apache-2.0"
] | permissive | zhen8838/nncase | 2278d245560f5b9e6736784dff73bb9908da3df7 | 5d2e0223e4c1a8e54883db3b9e7b46bb9c7d4f58 | refs/heads/master | 2022-05-28T13:33:01.676950 | 2021-11-29T13:32:57 | 2021-11-29T13:32:57 | 377,750,282 | 2 | 1 | Apache-2.0 | 2021-07-27T12:54:36 | 2021-06-17T07:56:40 | C++ | UTF-8 | Python | false | false | 2,522 | py | import caffe
from test_runner import *
import os
import shutil
import numpy as np
# from typing import Dict, List, Tuple, Union
class CaffeTestRunner(TestRunner):
def __init__(self, case_name, targets=None, overwrite_configs: dict = None):
super().__init__(case_name, targets, overwrite_configs)
self.model_type = "caffe"
def run(self, model_file_list):
super().run(model_file_list)
def parse_model_input_output(self, model_path: Union[List[str], str]):
caffe_model = caffe.Net(model_path[0], model_path[1], caffe.TEST)
for i, name in enumerate(caffe_model._layer_names):
if (caffe_model.layers[i].type == "Input"):
input_dict = {}
input_dict['name'] = name
input_dict['dtype'] = np.float32
input_dict['model_shape'] = list(caffe_model.blobs[name].data.shape)
self.inputs.append(input_dict)
self.calibs.append(copy.deepcopy(input_dict))
self.dump_range_data.append(copy.deepcopy(input_dict))
used_inputs = set([name for _, l in caffe_model.bottom_names.items() for name in l])
seen_outputs = set()
for n in [name for _, l in caffe_model.top_names.items() for name in l]:
if not n in used_inputs and not n in seen_outputs:
seen_outputs.add(n)
output_dict = {}
output_dict['name'] = n
output_dict['dtype'] = np.float32
output_dict['model_shape'] = list(caffe_model.blobs[n].data.shape)
self.outputs.append(output_dict)
def cpu_infer(self, case_dir: str, model_file_list, type: str):
caffe_model = caffe.Net(model_file_list[0], model_file_list[1], caffe.TEST)
for input in self.inputs:
caffe_model.blobs[input['name']].data[...] = self.transform_input(
self.data_pre_process(input['data']), "float32", "CPU")
outputs = caffe_model.forward()
for i, output in enumerate(self.outputs):
result = outputs[output['name']]
self.output_paths.append((
os.path.join(case_dir, f'cpu_result_{i}.bin'),
os.path.join(case_dir, f'cpu_result_{i}.txt')))
result.tofile(self.output_paths[-1][0])
self.totxtfile(self.output_paths[-1][1], result)
def import_model(self, compiler, model_content, import_options):
compiler.import_caffe(model_content[1], model_content[0])
| [
"noreply@github.com"
] | noreply@github.com |
f1e12a8157807f471f58de6ec2c92fe27718cbc3 | a7f0b22a58d3b4a192a33c4e61f6cdb48bf3c3e0 | /restapi/urls.py | d59f31e35bdd866ffffb2b03b846de756152e8eb | [] | no_license | dishantr16/django-restapi | c704ee078d754e5ff02db646b50a031ee223feb3 | 4bc27d935ab3910c72dc68b420f388b12020ca6a | refs/heads/main | 2023-03-14T09:13:39.990762 | 2021-03-03T05:12:57 | 2021-03-03T05:12:57 | 343,438,888 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | """restapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('updates.urls')),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | [
"dishant16rathi@gmail.com"
] | dishant16rathi@gmail.com |
f13ea54a8025edf6aefe70cbdd3ef7f3eb03266a | b9e58a0be125f8daa265b9d45f18641145bed723 | /Dictionaries and Sets/key_match.py | 3752fca2476542a1433dceeb307c07b3121039d9 | [] | no_license | CharlesMontgomery2/Python-Class-Exercises | 4bba7d5981be0fbc5f78625610b9091d0bdfeefe | b2e4b80558a68558e615ea8cac40f6f49fff46e7 | refs/heads/main | 2023-01-29T11:08:18.140052 | 2020-12-17T06:59:16 | 2020-12-17T06:59:16 | 322,202,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | ############## Match key values in two dictionaries #####################################
print("Dogs") # Display the title of the code
d1 = {"Name": "Bruno", "Age": 5, "Breed": "Bulldog"} # create a dictionary
d2 = {"Name": "Bruno", "Age": 2, "Breed": "Pitbull"} #Create another dictionary
for (key, value) in set(d1.items()) & set(d2.items()): # Loop throught the set in each dictionary using the item function to return all
# dictionaries with the same keys and values.
print("%s: %s is present in both d1 and d2" % (key, value)) # using %s to make the display code more readable is like
# "{0}, {1}", (a, b) in C# language
# meaning the first %s (s stands for string), is represented as the key and the second %s is represented as value as shown after
# the quotes inside its own perenthesis. | [
"charles.montgomery2@outlook.com"
] | charles.montgomery2@outlook.com |
2a6303f5265df9a0d6b5ecd491ed8110ad0aed64 | 4b2d84b6e423e8435a8cd800970d75720e362763 | /gestionVotos/forms.py | 9295d10b406310d34353f52e0f73d943d93879d6 | [
"Apache-2.0"
] | permissive | rmatico1306/rmatico | f23a938edb4867e3bef1af3d1f76e472a16849ab | bf2a54cb18728c0e6f1e85055a06c85751883ae1 | refs/heads/main | 2023-05-10T04:30:06.860205 | 2021-06-09T17:22:13 | 2021-06-09T17:22:13 | 372,636,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from django import forms
class FormularioContacto(forms.Form):
asunto= forms.CharField()
email=forms.EmailField()
mensaje= forms.CharField()
class FormularioActa(forms.Form):
seccion=forms.CharField(label='BUSQUEDA POR SECCION', max_length=4) | [
"rmatico13@gmail.com"
] | rmatico13@gmail.com |
450649f150b276a33ad258ee1acecf89541b6c41 | 1b2ea153866cc11c47ed43cf0d0cb6c7ad3006b6 | /Verenicy_na_kosmicheskih_snimkax/kontur.py | 1e2ed2abd3dbfdea0930ba398f3ee2a61cb012a6 | [] | no_license | irina19951908/project | c69d02dabbe599d8ff1289bd05225b5cc698ce3b | 97fac91e98146d2cb532b2d385085599208bb89e | refs/heads/master | 2020-06-24T13:10:28.388285 | 2019-07-26T08:23:26 | 2019-07-26T08:23:26 | 198,970,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 15:49:55 2018
@author: User
"""
#контурирование
#import random
import glob
from PIL import Image, ImageDraw #Подключим необходимые библиотеки.
import numpy
import scipy
from scipy import ndimage
#image = Image.open("C:/Users/User/Desktop/image/original.jpg") #Открываем изображение.
path="C:/Users/User/Desktop/galaxy/2/binary"
images=glob.glob(path + "/*.jpg")
number=0
for image in images:
#img = Image.open(image)
im = scipy.misc.imread(image)
im = im.astype('int32')
dx = ndimage.sobel(im, 1) # horizontal derivative
dy = ndimage.sobel(im, 0) # vertical derivative
mag = numpy.hypot(dx, dy) # magnitude
mag *= 255.0 / numpy.max(mag) # normalize (Q&D)
number=number+1
scipy.misc.imsave(path + "/kontur/" + str(number) + ".jpg",mag)
#del draw
#сохраняем результат
#image.save("C:/Users/User/Desktop/image/result3factor10.jpg", "JPEG")
#del draw | [
"noreply@github.com"
] | noreply@github.com |
e90dcd78bc4629be7d9cf48e3f2d6f93f21ae201 | cdcbe6ea97dd870357998b17f0cdedec0636781d | /extra_apps/xadmin/views/dashboard.py | a6b43c1b335ff64a990a360ffc76b0dbe1ea8264 | [] | no_license | supermanfeng/eduplatform | 8815fad056ac9d1206f219220453f9f7e7382128 | a4288c7af7f4dd980a3f4f2e337899cdf9d15b43 | refs/heads/master | 2022-12-09T23:04:57.239321 | 2018-04-10T11:11:11 | 2018-04-10T11:11:11 | 128,721,691 | 1 | 0 | null | 2022-12-08T00:51:44 | 2018-04-09T05:57:55 | Python | UTF-8 | Python | false | false | 23,644 | py | from django import forms
from django.apps import apps
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.context_processors import csrf
from django.db.models.base import ModelBase
from django.forms.forms import DeclarativeFieldsMetaclass
from django.forms.utils import flatatt
from django.template import loader
from django.http import Http404
from django.test.client import RequestFactory
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.http import urlencode, urlquote
from django.views.decorators.cache import never_cache
from xadmin import widgets as exwidgets
from xadmin.layout import FormHelper
from xadmin.models import UserSettings, UserWidget
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views.base import CommAdminView, ModelAdminView, filter_hook, csrf_protect_m
from xadmin.views.edit import CreateAdminView
from xadmin.views.list import ListAdminView
from xadmin.util import unquote
import copy
class WidgetTypeSelect(forms.Widget):
def __init__(self, widgets, attrs=None):
super(WidgetTypeSelect, self).__init__(attrs)
self._widgets = widgets
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
final_attrs['class'] = 'nav nav-pills nav-stacked'
output = [u'<ul%s>' % flatatt(final_attrs)]
options = self.render_options(force_unicode(value), final_attrs['id'])
if options:
output.append(options)
output.append(u'</ul>')
output.append('<input type="hidden" id="%s_input" name="%s" value="%s"/>' %
(final_attrs['id'], name, force_unicode(value)))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choice, widget, id):
if widget.widget_type == selected_choice:
selected_html = u' class="active"'
else:
selected_html = ''
return (u'<li%s><a onclick="' +
'javascript:$(this).parent().parent().find(\'>li\').removeClass(\'active\');$(this).parent().addClass(\'active\');' +
'$(\'#%s_input\').attr(\'value\', \'%s\')' % (id, widget.widget_type) +
'"><h4><i class="%s"></i> %s</h4><p>%s</p></a></li>') % (
selected_html,
widget.widget_icon,
widget.widget_title or widget.widget_type,
widget.description)
def render_options(self, selected_choice, id):
# Normalize to strings.
output = []
for widget in self._widgets:
output.append(self.render_option(selected_choice, widget, id))
return u'\n'.join(output)
class UserWidgetAdmin(object):
model_icon = 'fa fa-dashboard'
list_display = ('widget_type', 'page_id', 'user')
list_filter = ['user', 'widget_type', 'page_id']
list_display_links = ('widget_type',)
user_fields = ['user']
hidden_menu = True
wizard_form_list = (
(_(u"Widget Type"), ('page_id', 'widget_type')),
(_(u"Widget Params"), {'callback':
"get_widget_params_form", 'convert': "convert_widget_params"})
)
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == 'widget_type':
widgets = widget_manager.get_widgets(self.request.GET.get('page_id', ''))
form_widget = WidgetTypeSelect(widgets)
return forms.ChoiceField(choices=[(w.widget_type, w.description) for w in widgets],
widget=form_widget, label=_('Widget Type'))
if 'page_id' in self.request.GET and db_field.name == 'page_id':
kwargs['widget'] = forms.HiddenInput
field = super(
UserWidgetAdmin, self).formfield_for_dbfield(db_field, **kwargs)
return field
def get_widget_params_form(self, wizard):
data = wizard.get_cleaned_data_for_step(wizard.steps.first)
widget_type = data['widget_type']
widget = widget_manager.get(widget_type)
fields = copy.deepcopy(widget.base_fields)
if 'id' in fields:
del fields['id']
return DeclarativeFieldsMetaclass("WidgetParamsForm", (forms.Form,), fields)
def convert_widget_params(self, wizard, cleaned_data, form):
widget = UserWidget()
value = dict([(f.name, f.value()) for f in form])
widget.set_value(value)
cleaned_data['value'] = widget.value
cleaned_data['user'] = self.user
def get_list_display(self):
list_display = super(UserWidgetAdmin, self).get_list_display()
if not self.user.is_superuser:
list_display.remove('user')
return list_display
def queryset(self):
if self.user.is_superuser:
return super(UserWidgetAdmin, self).queryset()
return UserWidget.objects.filter(user=self.user)
def update_dashboard(self, obj):
try:
portal_pos = UserSettings.objects.get(
user=obj.user, key="dashboard:%s:pos" % obj.page_id)
except UserSettings.DoesNotExist:
return
pos = [[w for w in col.split(',') if w != str(
obj.id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
def delete_model(self):
self.update_dashboard(self.obj)
super(UserWidgetAdmin, self).delete_model()
def delete_models(self, queryset):
for obj in queryset:
self.update_dashboard(obj)
super(UserWidgetAdmin, self).delete_models(queryset)
site.register(UserWidget, UserWidgetAdmin)
class WidgetManager(object):
_widgets = None
def __init__(self):
self._widgets = {}
def register(self, widget_class):
self._widgets[widget_class.widget_type] = widget_class
return widget_class
def get(self, name):
return self._widgets[name]
def get_widgets(self, page_id):
return self._widgets.values()
widget_manager = WidgetManager()
class WidgetDataError(Exception):
def __init__(self, widget, errors):
super(WidgetDataError, self).__init__(str(errors))
self.widget = widget
self.errors = errors
class BaseWidget(forms.Form):
template = 'xadmin/widgets/base.html'
description = 'Base Widget, don\'t use it.'
widget_title = None
widget_icon = 'fa fa-plus-square'
widget_type = 'base'
base_title = None
id = forms.IntegerField(label=_('Widget ID'), widget=forms.HiddenInput)
title = forms.CharField(label=_('Widget Title'), required=False, widget=exwidgets.AdminTextInputWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
self.admin_site = dashboard.admin_site
self.request = dashboard.request
self.user = dashboard.request.user
self.convert(data)
super(BaseWidget, self).__init__(data)
if not self.is_valid():
raise WidgetDataError(self, self.errors.as_text())
self.setup()
def setup(self):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
self.helper = helper
self.id = self.cleaned_data['id']
self.title = self.cleaned_data['title'] or self.base_title
if not (self.user.is_superuser or self.has_perm()):
raise PermissionDenied
@property
def widget(self):
context = {'widget_id': self.id, 'widget_title': self.title, 'widget_icon': self.widget_icon,
'widget_type': self.widget_type, 'form': self, 'widget': self}
context.update(csrf(self.request))
self.context(context)
return loader.render_to_string(self.template, context)
def context(self, context):
pass
def convert(self, data):
pass
def has_perm(self):
return False
def save(self):
value = dict([(f.name, f.value()) for f in self])
user_widget = UserWidget.objects.get(id=self.id)
user_widget.set_value(value)
user_widget.save()
def static(self, path):
return self.dashboard.static(path)
def vendor(self, *tags):
return self.dashboard.vendor(*tags)
def media(self):
return forms.Media()
@widget_manager.register
class HtmlWidget(BaseWidget):
widget_type = 'html'
widget_icon = 'fa fa-file-o'
description = _(
u'Html Content Widget, can write any html content in widget.')
content = forms.CharField(label=_(
'Html Content'), widget=exwidgets.AdminTextareaWidget, required=False)
def has_perm(self):
return True
def context(self, context):
context['content'] = self.cleaned_data['content']
class ModelChoiceIterator(object):
def __init__(self, field):
self.field = field
def __iter__(self):
from xadmin import site as g_admin_site
for m, ma in g_admin_site._registry.items():
yield ('%s.%s' % (m._meta.app_label, m._meta.model_name),
m._meta.verbose_name)
class ModelChoiceField(forms.ChoiceField):
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, *args, **kwargs):
# Call Field instead of ChoiceField __init__() because we don't need
# ChoiceField.__init__().
forms.Field.__init__(self, required, widget, label, initial, help_text,
*args, **kwargs)
self.widget.choices = self.choices
def __deepcopy__(self, memo):
result = forms.Field.__deepcopy__(self, memo)
return result
def _get_choices(self):
return ModelChoiceIterator(self)
choices = property(_get_choices, forms.ChoiceField._set_choices)
def to_python(self, value):
if isinstance(value, ModelBase):
return value
app_label, model_name = value.lower().split('.')
return apps.get_model(app_label, model_name)
def prepare_value(self, value):
if isinstance(value, ModelBase):
value = '%s.%s' % (value._meta.app_label, value._meta.model_name)
return value
def valid_value(self, value):
value = self.prepare_value(value)
for k, v in self.choices:
if value == smart_unicode(k):
return True
return False
class ModelBaseWidget(BaseWidget):
app_label = None
model_name = None
model_perm = 'change'
model = ModelChoiceField(label=_(u'Target Model'), widget=exwidgets.AdminSelectWidget)
def __init__(self, dashboard, data):
self.dashboard = dashboard
super(ModelBaseWidget, self).__init__(dashboard, data)
def setup(self):
self.model = self.cleaned_data['model']
self.app_label = self.model._meta.app_label
self.model_name = self.model._meta.model_name
super(ModelBaseWidget, self).setup()
def has_perm(self):
return self.dashboard.has_model_perm(self.model, self.model_perm)
def filte_choices_model(self, model, modeladmin):
return self.dashboard.has_model_perm(model, self.model_perm)
def model_admin_url(self, name, *args, **kwargs):
return reverse(
"%s:%s_%s_%s" % (self.admin_site.app_name, self.app_label,
self.model_name, name), args=args, kwargs=kwargs)
class PartialBaseWidget(BaseWidget):
def get_view_class(self, view_class, model=None, **opts):
admin_class = self.admin_site._registry.get(model) if model else None
return self.admin_site.get_view_class(view_class, admin_class, **opts)
def get_factory(self):
return RequestFactory()
def setup_request(self, request):
request.user = self.user
request.session = self.request.session
return request
def make_get_request(self, path, data={}, **extra):
req = self.get_factory().get(path, data, **extra)
return self.setup_request(req)
def make_post_request(self, path, data={}, **extra):
req = self.get_factory().post(path, data, **extra)
return self.setup_request(req)
@widget_manager.register
class QuickBtnWidget(BaseWidget):
widget_type = 'qbutton'
description = _(u'Quick button Widget, quickly open any page.')
template = "xadmin/widgets/qbutton.html"
base_title = _(u"Quick Buttons")
widget_icon = 'fa fa-caret-square-o-right'
def convert(self, data):
self.q_btns = data.pop('btns', [])
def get_model(self, model_or_label):
if isinstance(model_or_label, ModelBase):
return model_or_label
else:
return apps.get_model(*model_or_label.lower().split('.'))
def context(self, context):
btns = []
for b in self.q_btns:
btn = {}
if 'model' in b:
model = self.get_model(b['model'])
if not self.user.has_perm("%s.view_%s" % (model._meta.app_label, model._meta.model_name)):
continue
btn['url'] = reverse("%s:%s_%s_%s" % (self.admin_site.app_name, model._meta.app_label,
model._meta.model_name, b.get('view', 'changelist')))
btn['title'] = model._meta.verbose_name
btn['icon'] = self.dashboard.get_model_icon(model)
else:
try:
btn['url'] = reverse(b['url'])
except NoReverseMatch:
btn['url'] = b['url']
if 'title' in b:
btn['title'] = b['title']
if 'icon' in b:
btn['icon'] = b['icon']
btns.append(btn)
context.update({'btns': btns})
def has_perm(self):
return True
@widget_manager.register
class ListWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'list'
description = _(u'Any Objects list Widget.')
template = "xadmin/widgets/list.html"
model_perm = 'view'
widget_icon = 'fa fa-align-justify'
def convert(self, data):
self.list_params = data.pop('params', {})
self.list_count = data.pop('count', 10)
def setup(self):
super(ListWidget, self).setup()
if not self.title:
self.title = self.model._meta.verbose_name_plural
req = self.make_get_request("", self.list_params)
self.list_view = self.get_view_class(ListAdminView, self.model)(req)
if self.list_count:
self.list_view.list_per_page = self.list_count
def context(self, context):
list_view = self.list_view
list_view.make_result_list()
base_fields = list_view.base_list_display
if len(base_fields) > 5:
base_fields = base_fields[0:5]
context['result_headers'] = [c for c in list_view.result_headers(
).cells if c.field_name in base_fields]
context['results'] = [[o for i, o in
enumerate(filter(lambda c: c.field_name in base_fields, r.cells))]
for r in list_view.results()]
context['result_count'] = list_view.result_count
context['page_url'] = self.model_admin_url('changelist') + "?" + urlencode(self.list_params)
@widget_manager.register
class AddFormWidget(ModelBaseWidget, PartialBaseWidget):
widget_type = 'addform'
description = _(u'Add any model object Widget.')
template = "xadmin/widgets/addform.html"
model_perm = 'add'
widget_icon = 'fa fa-plus'
def setup(self):
super(AddFormWidget, self).setup()
if self.title is None:
self.title = _('Add %s') % self.model._meta.verbose_name
req = self.make_get_request("")
self.add_view = self.get_view_class(
CreateAdminView, self.model, list_per_page=10)(req)
self.add_view.instance_forms()
def context(self, context):
helper = FormHelper()
helper.form_tag = False
helper.include_media = False
context.update({
'addform': self.add_view.form_obj,
'addhelper': helper,
'addurl': self.add_view.model_admin_url('add'),
'model': self.model
})
def media(self):
return self.add_view.media + self.add_view.form_obj.media + self.vendor('xadmin.plugin.quick-form.js')
class Dashboard(CommAdminView):
widget_customiz = True
widgets = []
title = _(u"Dashboard")
icon = None
def get_page_id(self):
return self.request.path
def get_portal_key(self):
return "dashboard:%s:pos" % self.get_page_id()
@filter_hook
def get_widget(self, widget_or_id, data=None):
try:
if isinstance(widget_or_id, UserWidget):
widget = widget_or_id
else:
widget = UserWidget.objects.get(user=self.user, page_id=self.get_page_id(), id=widget_or_id)
wid = widget_manager.get(widget.widget_type)
class widget_with_perm(wid):
def context(self, context):
super(widget_with_perm, self).context(context)
context.update({'has_change_permission': self.request.user.has_perm('xadmin.change_userwidget')})
wid_instance = widget_with_perm(self, data or widget.get_value())
return wid_instance
except UserWidget.DoesNotExist:
return None
@filter_hook
def get_init_widget(self):
portal = []
widgets = self.widgets
for col in widgets:
portal_col = []
for opts in col:
try:
widget = UserWidget(user=self.user, page_id=self.get_page_id(), widget_type=opts['type'])
widget.set_value(opts)
widget.save()
portal_col.append(self.get_widget(widget))
except (PermissionDenied, WidgetDataError):
widget.delete()
continue
portal.append(portal_col)
UserSettings(
user=self.user, key="dashboard:%s:pos" % self.get_page_id(),
value='|'.join([','.join([str(w.id) for w in col]) for col in portal])).save()
return portal
@filter_hook
def get_widgets(self):
if self.widget_customiz:
portal_pos = UserSettings.objects.filter(
user=self.user, key=self.get_portal_key())
if len(portal_pos):
portal_pos = portal_pos[0].value
widgets = []
if portal_pos:
user_widgets = dict(
[(uw.id, uw) for uw in UserWidget.objects.filter(user=self.user, page_id=self.get_page_id())])
for col in portal_pos.split('|'):
ws = []
for wid in col.split(','):
try:
widget = user_widgets.get(int(wid))
if widget:
ws.append(self.get_widget(widget))
except Exception, e:
import logging
logging.error(e, exc_info=True)
widgets.append(ws)
return widgets
return self.get_init_widget()
@filter_hook
def get_title(self):
return self.title
@filter_hook
def get_context(self):
new_context = {
'title': self.get_title(),
'icon': self.icon,
'portal_key': self.get_portal_key(),
'columns': [('col-sm-%d' % int(12 / len(self.widgets)), ws) for ws in self.widgets],
'has_add_widget_permission': self.has_model_perm(UserWidget, 'add') and self.widget_customiz,
'add_widget_url': self.get_admin_url(
'%s_%s_add' % (UserWidget._meta.app_label, UserWidget._meta.model_name)) +
"?user=%s&page_id=%s&_redirect=%s" % (
self.user.id, self.get_page_id(), urlquote(self.request.get_full_path()))
}
context = super(Dashboard, self).get_context()
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response('xadmin/views/dashboard.html', self.get_context())
@csrf_protect_m
def post(self, request, *args, **kwargs):
if 'id' in request.POST:
widget_id = request.POST['id']
if request.POST.get('_delete', None) != 'on':
widget = self.get_widget(widget_id, request.POST.copy())
widget.save()
else:
try:
widget = UserWidget.objects.get(
user=self.user, page_id=self.get_page_id(), id=widget_id)
widget.delete()
try:
portal_pos = UserSettings.objects.get(user=self.user,
key="dashboard:%s:pos" % self.get_page_id())
pos = [[w for w in col.split(',') if w != str(
widget_id)] for col in portal_pos.value.split('|')]
portal_pos.value = '|'.join([','.join(col) for col in pos])
portal_pos.save()
except Exception:
pass
except UserWidget.DoesNotExist:
pass
return self.get(request)
@filter_hook
def get_media(self):
media = super(Dashboard, self).get_media() + \
self.vendor('xadmin.page.dashboard.js', 'xadmin.page.dashboard.css')
if self.widget_customiz:
media = media + self.vendor('xadmin.plugin.portal.js')
for ws in self.widgets:
for widget in ws:
media = media + widget.media()
return media
class ModelDashboard(Dashboard, ModelAdminView):
title = _(u"%s Dashboard")
def get_page_id(self):
return 'model:%s/%s' % self.model_info
@filter_hook
def get_title(self):
return self.title % force_unicode(self.obj)
def init_request(self, object_id, *args, **kwargs):
self.obj = self.get_object(unquote(object_id))
if not self.has_view_permission(self.obj):
raise PermissionDenied
if self.obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') %
{'name': force_unicode(self.opts.verbose_name), 'key': escape(object_id)})
@filter_hook
def get_context(self):
new_context = {
'has_change_permission': self.has_change_permission(self.obj),
'object': self.obj,
}
context = Dashboard.get_context(self)
context.update(ModelAdminView.get_context(self))
context.update(new_context)
return context
@never_cache
def get(self, request, *args, **kwargs):
self.widgets = self.get_widgets()
return self.template_response(self.get_template_list('views/model_dashboard.html'), self.get_context())
| [
"2252506855@qq.com"
] | 2252506855@qq.com |
abe34b21c859a40398f14095535c9e0df16c24ed | 9c4a55222e124768872d99f99b07e96796adae8f | /Exercicios/ex029.py | 163ce4024c044dc80390cc4f1593e21d938fb354 | [] | no_license | Diegodesouza7/Python-Conceitos-Exercicios | 953a5caa9a0949ac58b6255fadae396f5a8f545e | 3238733980dcf5feea0811e1527cd9f898759225 | refs/heads/master | 2023-02-03T03:55:53.464673 | 2020-12-18T14:20:39 | 2020-12-18T14:20:39 | 312,084,758 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | """Escreva um programa que leia a velocidade de um carro.
Se ele ultrapassar 80Km/h, mostre uma mensagem dizendo que ele foi multado.
A multa vai custar R$7,00 por cada Km acima do limite."""
velocidade = float(input('Qual é a velocidade atual do carro ? '))
if velocidade > 80:
print('MULTADO!! você excedeu o limite permitido que é de 80km/h')
multa = (velocidade - 80) * 7
print('Sua multa é de R${:.2f}'.format(multa))
print('Tenha uma boa viagem !') | [
"56520069+Diegodesouza7@users.noreply.github.com"
] | 56520069+Diegodesouza7@users.noreply.github.com |
f8e9f1a1e2241c12b35f3ada69cb9d3d5024bf93 | 9d49a1d08b0be3288778b474a11a33c9219e75e8 | /Tutorials Point/Data Analysis/Loading_the_Dataset.py | 84bc4d91bf3b6d2d24e568fa85ce2d098b68d2e2 | [] | no_license | ArunaMahaGamage/Python-Machine-Learning | f4e2bb3ab383e85ffbf05e509053ebc8db33f182 | bcd5b5f5a55fe906f0f32e58e032faf16e0e43d9 | refs/heads/master | 2020-04-16T09:38:37.946963 | 2019-01-13T06:05:58 | 2019-01-13T06:05:58 | 165,471,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | import pandas
data = ("pima_indians.csv")
names = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin','weight in kg','Diabetes pedigree','Age','Outcome']
dataset = pandas.read_csv(data, names=names)
print(dataset.shape)
print(dataset.head(20))
| [
"admgbit@gmail.com"
] | admgbit@gmail.com |
a35fc53dc372a7043d387c5d0f9de9ea4252766a | 788bb28fe8540272588e9fffaa8aebc1f16be7a6 | /utils.py | 3abca1f9ff96013d19ac414c048e3cdac1dd2618 | [
"MIT"
] | permissive | Asgardrobotics/HydraNet-WikiSQL | 6fccfb095854f97e678f2484bc60296998792267 | afdf1e5c32a5259bc926e7d3f095e06019eaef6d | refs/heads/master | 2023-04-14T02:03:54.110678 | 2021-04-27T08:04:33 | 2021-04-27T08:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,516 | py | import os
import json
import transformers
pretrained_weights = {
("bert", "base"): "bert-base-uncased",
("bert", "large"): "bert-large-uncased-whole-word-masking",
("roberta", "base"): "roberta-base",
("roberta", "large"): "roberta-large",
("albert", "xlarge"): "albert-xlarge-v2"
}
def read_jsonl(jsonl):
for line in open(jsonl, encoding="utf8"):
sample = json.loads(line.rstrip())
yield sample
def read_conf(conf_path):
config = {}
for line in open(conf_path, encoding="utf8"):
if line.strip() == "" or line[0] == "#":
continue
fields = line.strip().split("\t")
config[fields[0]] = fields[1]
config["train_data_path"] = os.path.abspath(config["train_data_path"])
config["dev_data_path"] = os.path.abspath(config["dev_data_path"])
return config
def create_base_model(config):
weights_name = pretrained_weights[(config["base_class"], config["base_name"])]
if config["base_class"] == "bert":
return transformers.BertModel.from_pretrained(weights_name)
elif config["base_class"] == "roberta":
return transformers.RobertaModel.from_pretrained(weights_name)
elif config["base_class"] == "albert":
return transformers.AlbertModel.from_pretrained(weights_name)
else:
raise Exception("base_class {0} not supported".format(config["base_class"]))
def create_tokenizer(config):
weights_name = pretrained_weights[(config["base_class"], config["base_name"])]
if config["base_class"] == "bert":
return transformers.BertTokenizer.from_pretrained(weights_name)
elif config["base_class"] == "roberta":
return transformers.RobertaTokenizer.from_pretrained(weights_name)
elif config["base_class"] == "albert":
return transformers.AlbertTokenizer.from_pretrained(weights_name)
else:
raise Exception("base_class {0} not supported".format(config["base_class"]))
if __name__ == "__main__":
qtokens = ['Tell', 'me', 'what', 'the', 'notes', 'are', 'for', 'South', 'Australia']
column = "string School/Club Team"
tokenizer = create_tokenizer({"base_class": "roberta", "base_name": "large"})
qsubtokens = []
for t in qtokens:
qsubtokens += tokenizer.tokenize(t, add_prefix_space=True)
print(qsubtokens)
result = tokenizer.encode_plus(column, qsubtokens, add_prefix_space=True)
for k in result:
print(k, result[k])
print(tokenizer.convert_ids_to_tokens(result["input_ids"]))
| [
"unifius.l@gmail.com"
] | unifius.l@gmail.com |
7daca2c6729cc73ee0246c7880a578a9123d3b10 | 099f581e33fdb988ab903c8533e9e6c92a6c0f6b | /FileIO.py | 97bb062761cc8ce407484e1b0c07af2ceb37a979 | [] | no_license | chryshbk/PythonSpecialization | 688bb5fe6889a1207519c63fe6d30a69b3d17a86 | 9e63f3002d2ff298d4ea687f0481fdf1ef140631 | refs/heads/master | 2020-04-21T14:56:38.953192 | 2019-02-08T20:21:40 | 2019-02-08T20:21:40 | 169,651,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | fname = input("Enter file name: ")
if len(fname) < 1 : fname = "mbox-short.txt"
# Author: Chrystian Santos
# Date: February 2, 2018
# Details: Reads the file mbox-short.txt and extracts information from it. Counts how many lines start with "from"
fh = open(fname)
count = 0
for line in fh:
if line.startswith("From"):
line = line.split()
if (len(line) > 2): print(line[1])
if (len(line) > 2): count += 1
print("There were", count, "lines in the file with From as the first word")
| [
"chrystian.hbk2@gmail.com"
] | chrystian.hbk2@gmail.com |
ac781726298cd7bcab9588034e48921926aa64b4 | ce2cf775f7c092a4ee9ef74a9cf40dc7478f0eae | /semantic/both_models_new_dataset/source_pytorch/data_exploration.py | 3885e7a1d3defd6f2f5512217d16b72d6719da05 | [] | no_license | vyaskartik20/Semantic_Plagiarism_Checker_for_Handwritten_Scripts | 8b2118745cf7bc29d2160d106106c75a5a72ed8e | 06312a4a134335b94e9cc616f976e9cf52c56965 | refs/heads/master | 2023-04-11T06:45:14.480843 | 2021-04-13T15:53:14 | 2021-04-13T15:53:14 | 319,244,321 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,512 | py | # import os
# import matplotlib.pyplot as plt
# import numpy as np
# import pandas as pd
# csv_file = 'data/file_information.csv'
# plagiarism_df = pd.read_csv(csv_file)
# # print out the first few rows of data info
# print(plagiarism_df.head(10))
# # print out some stats about the data
# print('Number of files: ', plagiarism_df.shape[0]) # .shape[0] gives the rows
# # .unique() gives unique items in a specified column
# print('Number of unique tasks/question types (A-E): ', (len(plagiarism_df['Task'].unique())))
# print('Unique plagiarism categories: ', (plagiarism_df['Category'].unique()))
# # Show counts by different tasks and amounts of plagiarism
# # group and count by task
# counts_per_task=plagiarism_df.groupby(['Task']).size().reset_index(name="Counts")
# print("\nTask:")
# print(counts_per_task)
# # group by plagiarism level
# counts_per_category=plagiarism_df.groupby(['Category']).size().reset_index(name="Counts")
# print("\nPlagiarism Levels:")
# print(counts_per_category)
# # group by task AND plagiarism level
# counts_task_and_plagiarism=plagiarism_df.groupby(['Task', 'Category']).size().reset_index(name="Counts")
# print("\nTask & Plagiarism Level Combos :")
# print(counts_task_and_plagiarism)
# # counts
# group = ['Task', 'Category']
# counts = plagiarism_df.groupby(group).size().reset_index(name="Counts")
# plt.figure(figsize=(8,5))
# plt.bar(range(len(counts)), counts['Counts'], color = 'blue')
# plt.show()
# Calculate the ngram containment for one answer file/source file pair in a df
def calculate_containment(df, n, answer_filename):
"""
Calculates the containment between a given answer text and its associated source text.
This function creates a count of ngrams (of a size, n) for each text file in our data.
Then calculates the containment by finding the ngram count for a given answer text,
and its associated source text, and calculating the normalized intersection of those counts.
Arguments
:param df: A dataframe with columns,
'File', 'Task', 'Category', 'Class', 'Text', and 'Datatype'
:param n: An integer that defines the ngram size
:param answer_filename: A filename for an answer text in the df, ex. 'g0pB_taskd.txt'
Return
:return: A single containment value that represents the similarity
between an answer text and its source text.
"""
answer_df = df.query('File == @answer_filename')
a_text = answer_df.iloc[0].at['Text']
source_filename = 'source'+answer_filename[6]+'.txt'
source_df = df.query('File == @source_filename')
s_text = source_df.iloc[0].at['Text']
counts = CountVectorizer(analyzer='word', ngram_range=(n,n))
ngrams = counts.fit_transform([a_text, s_text])
ngram_array = ngrams.toarray()
intersection = [min(a, s) for a, s in zip(*ngram_array)]
c_value = sum(intersection) / sum(ngram_array[0])
return c_value
import pandas as pd
import helpers
import plagiarism_feature_engineering
import numpy as np
# import distinctFeatures/cosine_1
from distinctFeatures import cosine_1
from distinctFeatures import cosine_2
from distinctFeatures import cosine_trigram
from distinctFeatures import docism_nltk
from distinctFeatures import jaccard_trigram
from distinctFeatures import lcs
from distinctFeatures import ngram
from distinctFeatures import phrase_nltk_1
from distinctFeatures import phrase_nltk_2
# from distinctFeatures import rabin_karp_1
from distinctFeatures import rabin_karp_2
from distinctFeatures import sequence_matcher
from distinctFeatures import embed_spacy
from distinctFeatures import bert_sentence_encoder
csv_file = 'data/file_information.csv'
complete_df = pd.read_csv(csv_file)
# text_df = helpers.create_text_column(complete_df)
# check work
# print('\nExample data: ')
# print(text_df.head(10))
# sample_text = text_df.iloc[0]['Text']
# print('Sample processed text:\n\n', sample_text)
# complete_df = helpers.train_test_dataframe(complete_df, random_seed=1)
# complete_df.to_csv('data/file_information.csv')
# import sys
# sys.exit()
# print('\nExample data: ')
# print(complete_df.head(100))
# i=1
# count = 0
# while i < 1000 :
# sample_text1 = complete_df.iloc[i]['File']
# sample_text2 = complete_df.iloc[i]['Datatype']
# i=i+2
# if(sample_text2 == "test"):
# count = count+ 1
# # print( sample_text1, sample_text2)
# print (f"test files : {count } ")
# print (f"train files : {500-count} ")
# n = 5
# # indices for first few files
# test_indices = range(400)
# # iterate through files and calculate containment
# class_vals = []
# containment_vals = []
# for i in test_indices:
# # get level of plagiarism for a given file index
# class_vals.append(complete_df.loc[i, 'Class'])
# # calculate containment for given file and n
# filename = complete_df.loc[i, 'File']
# c = plagiarism_feature_engineering.calculate_containment(complete_df, n, filename)
# containment_vals.append(c)
# print out result, does it make sense?
# print('Original category values: \n', class_vals)
# print()
# print(str(n)+'-gram containment values: \n', containment_vals)
# for i in range(0,400,1) :
# if class_vals[i] != -1 :
# print( str(class_vals[i]) + " ::: " + str(containment_vals[i]))
ngram_range = [1,2,3,4]
# The following code may take a minute to run, depending on your ngram_range
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
features_list = []
# Create features in a features_df
all_features = np.zeros((15, len(complete_df)))
i=0
features_list.append("bert_sentence_encoder")
all_features[i]= np.squeeze(bert_sentence_encoder.create_bert_sentence_encoder_features(complete_df))
from distinctFeatures import tensorflow_sentence_embedding
features_list.append("tensorflow_sentence_embedding")
all_features[i]= np.squeeze(tensorflow_sentence_embedding.create_tensorflow_sentence_embedding_features(complete_df))
i+=1
features_list.append("embed_spacy")
all_features[i]= np.squeeze(embed_spacy.create_embed_spacy_features(complete_df))
i+=1
for n in ngram_range:
column_name = 'c_'+str(n)
features_list.append(column_name)
# create containment features
all_features[i]=np.squeeze(ngram.create_containment_features(complete_df, n))
print(f"n gram ::: {n}")
i+=1
features_list.append("docism_nltk")
all_features[i]= np.squeeze(docism_nltk.create_docism_nltk_features(complete_df))
i+=1
features_list.append("cosine_trigram")
all_features[i]= np.squeeze(cosine_trigram.create_cosine_trigram_features(complete_df))
i+=1
features_list.append("cosine_1")
all_features[i]= np.squeeze(cosine_1.create_cosine_1_features(complete_df))
i+=1
features_list.append("cosine_2")
all_features[i]= np.squeeze(cosine_2.create_cosine_2_features(complete_df))
i+=1
features_list.append("jaccard_trigram")
all_features[i]= np.squeeze(jaccard_trigram.create_jaccard_trigram_features(complete_df))
i+=1
# Calculate features for LCS_Norm Words
features_list.append('lcs_word')
all_features[i]= np.squeeze(lcs.create_lcs_features(complete_df))
i+=1
features_list.append("rabin_karp_2")
all_features[i]= np.squeeze(rabin_karp_2.create_rabin_karp_2_features(complete_df))
i+=1
features_list.append("sequence_matcher")
all_features[i]= np.squeeze(sequence_matcher.create_sequence_matcher_features(complete_df))
# create a features dataframe
features_df = pd.DataFrame(np.transpose(all_features), columns=features_list)
# # Calculate features for containment for ngrams in range
# features_list.append("rabin_karp_1")
# all_features[i]= np.squeeze(rabin_karp_1.create_rabin_karp_1_features(complete_df))
# i+=1
# features_list.append("phrase_nltk_1")
# all_features[i]= np.squeeze(phrase_nltk_1.create_phrase_nltk_1_features(complete_df))
# i+=1
# features_list.append("phrase_nltk_2")
# all_features[i]= np.squeeze(phrase_nltk_2.create_phrase_nltk_2_features(complete_df))
# i+=1
# Print all features/columns
# print()
# print('Features: ', features_list)
# print()
test_selection = list(features_df)[:15] # first couple columns as a test
(train_x, train_y), (test_x, test_y) = plagiarism_feature_engineering.train_test_data(complete_df, features_df, test_selection)
data_dir = 'plagiarism_data'
plagiarism_feature_engineering.make_csv(train_x, train_y, filename='train.csv', data_dir=data_dir)
plagiarism_feature_engineering.make_csv(test_x, test_y, filename='test.csv', data_dir=data_dir) | [
"vyas.2@iitj.ac.in"
] | vyas.2@iitj.ac.in |
aa7714a63d8bb03ffbd3500253ef8dda717a6714 | c2ae7c1ef14067574c100d66f9eb8ce8d9a67fcc | /assignment-2020-1/count_fixed_polyominoes.py.py | 8734ffa5420a7bf97b88eec268fb95987967b3f4 | [] | no_license | pxara/hello-world | a57cba4cf6103b506f69aa3db7128e4ccce66c0e | eb212558bd087fd60d12fef32dcaa6729d87808e | refs/heads/master | 2020-04-07T02:35:45.221868 | 2020-03-24T20:50:35 | 2020-03-24T20:50:35 | 157,982,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | import sys
import pprint
import argparse
def Neighbors(p,u,v):
for i in p:
if i==u:
continue
up=(i[0] ,i[1]+1 )
down=(i[0] ,i[1]-1 )
right=(i[0]+1 ,i[1] )
left=(i[0]-1 ,i[1] )
if right==v or up==v or down==v or left==v:
return True
else:
return False
def CountFixedPolyominoes(G,untried,n,p):
global c
untried=list(untried)
while len(untried)>0:
u=untried[0]
p.append(u)
untried.remove(u)
if len(p)==n:
c=c+1
else:
new_neighbors=[]
for v in G[u]:
if (v not in untried) and (v not in p) and (not Neighbors(p,u,v)):
new_neighbors.append(v)
new_untried=untried+new_neighbors
CountFixedPolyominoes(G,new_untried,n,p)
p.remove(u)
def PolyominoGraph(n): #[y,x] , x>=0
polyominograph={}
orio=n-2
for y in range(0,n):
polyominograph[(y,0)]=[]
for x in range(1,n):
for y in range(-orio,orio+1):
polyominograph[(y,x)]=[]
orio+=-1
for i in polyominograph:
up=(i[0] ,i[1]+1 )
down=(i[0] ,i[1]-1 )
right=(i[0]+1 ,i[1] )
left=(i[0]-1 ,i[1] )
if right in polyominograph:
polyominograph[i].append(right)
if up in polyominograph:
polyominograph[i].append(up)
if left in polyominograph:
polyominograph[i].append(left)
if down in polyominograph:
polyominograph[i].append(down)
return polyominograph
parser=argparse.ArgumentParser()
parser.add_argument("-p","--printswitch",action="store_true")
parser.add_argument("n",type=int,help="polyomino size")
args=parser.parse_args()
G=PolyominoGraph(args.n)
untried={(0,0)}
p=[]
c=0
if args.printswitch:
pprint.pprint(G)
CountFixedPolyominoes(G,untried,args.n,p)
print(c)
| [
"noreply@github.com"
] | noreply@github.com |
cfcce11b27b774b37598f14b5d28dc3399471305 | 35732ac0df40726a3db8efc8365485482f9787ae | /WEB/FRAMEWORK/intra1/polls/urls.py | cac50871090d8f8d60137e38259b430b633dbfaf | [] | no_license | maksru/42 | 14460fe8e4a1963f30fca8460de364eaafe50617 | 6a6f7031a3f4bd52948e68c4a4afe11265034cfe | refs/heads/master | 2020-03-09T11:49:37.825174 | 2014-05-21T00:04:11 | 2014-05-21T00:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | from django.conf.urls import patterns, url
from polls import views
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>\d+)/$', views.DetailView.as_view(), name = "detail"),
url(r'^(?P<pk>\d+)/results/$', views.ResultsView.as_view(), name = "results"),
url(r'^(?P<poll_id>\d+)/vote/$', views.vote, name = "vote"),
)
| [
"mfassi-f@e1r5p5.42.fr"
] | mfassi-f@e1r5p5.42.fr |
fee775131caf37bc5ea889d3b4d1e4cce1797648 | b5d7c7956774e6829c445d4c05d8360152b173ef | /TiendaEnLineaa/TiendaEnLineaa/asgi.py | 5f98c0e495b01b280e9b5867dab0c781dde5abc2 | [] | no_license | GustavoR0dr1gu3z/Tienda_En_Linea | 63e3e270e9482a2b639a91409726ce3813d8a350 | ce26018bf22e83af24848439c21f6073a07cba7e | refs/heads/master | 2023-02-26T04:28:35.187384 | 2021-02-06T03:28:50 | 2021-02-06T03:28:50 | 257,765,623 | 1 | 0 | null | 2021-02-01T23:44:45 | 2020-04-22T02:04:15 | Python | UTF-8 | Python | false | false | 405 | py | """
ASGI config for TiendaEnLineaa project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TiendaEnLineaa.settings')
application = get_asgi_application()
| [
"gustavo.soader.cx@gmail.com"
] | gustavo.soader.cx@gmail.com |
a48e7fa7096b0f0e00f6fc0077ea4b25cb583e86 | a23e027ef46b7c34be12e9336ada303d0780a8d5 | /CODE/Get_key.py | 7f507e64f153feb6222877431d36df6783e08f4a | [] | no_license | PaulDalous/Vigenere_project | 266b431553dc35c86bf4933c0a3a917135334cdc | e0de1e9776479f10d8003a20a2faac6911f8c471 | refs/heads/master | 2020-04-27T03:47:47.226316 | 2019-03-05T23:17:47 | 2019-03-05T23:17:47 | 174,035,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | def get_key(sort_dict_frequencies, length_key, which_letter):
""" Get the key of a ciphered message given its dict_frequencies attribute
:param sort_dict_frequencies: list - list of sorted list where the first element is the most frequent letter for the correspondant letter of the key
:param length_key: integer - length of the key
:param which_letter : list - list of which frequencies we take for each letter of the key (the max or the second max)
:return key: string - value of the decoded key """
key = ""
al = ("".join([ chr(97+i) for i in range(0,26) ])).upper()
for index in range(length_key):
x = which_letter[index]
letter = sort_dict_frequencies[index][x]
p = al.find(letter)
key += al[(p + 26 - al.find("E")) % 26]
return(key) | [
"paul.dalous@gmail.com"
] | paul.dalous@gmail.com |
54b9422df048da72332828a957305b5624b42878 | 32e1c91466bc33a638aae52b0eb43b1bf0d54b12 | /label_image.py | 9f258c29ccee36c3c6a023fb550c7664ca612dd0 | [] | no_license | projectsforus/Animal-Classification | 328fdde22af050f9f53aa95b99aa08f27977f0c7 | a63bba2bef92ba7723428467dc123111b3446dd1 | refs/heads/master | 2020-05-17T19:03:10.172385 | 2019-04-28T12:14:53 | 2019-04-28T12:14:53 | 183,903,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def main_func(file_name, model_file, label_file, input_height, input_width, input_mean, input_std, input_layer, output_layer):
graph = load_graph(model_file)
t = read_tensor_from_image_file(
file_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
with tf.Session(graph=graph) as sess:
results = sess.run(output_operation.outputs[0], {
input_operation.outputs[0]: t
})
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
return labels, results, top_k | [
"mohamedelzallat3@gmail.com"
] | mohamedelzallat3@gmail.com |
d77ef1ccddd7a308942ebe5dac9e380deafe4e34 | ac3406b9e294679f77b2580d8783458a4fb303af | /lib/titlecase.py | 17fa2414e1c96f2bb48afd53b4c83fec43814198 | [
"MIT"
] | permissive | zackw/active-geolocator | ae8d0a08426f47dd38e20311ba433c2056bf5a86 | 4a1f3fd7c17e26c58024efb23e3831a1b23b2a1a | refs/heads/master | 2021-01-17T03:14:07.908676 | 2018-11-21T19:36:20 | 2018-11-21T19:36:20 | 54,581,951 | 29 | 4 | MIT | 2018-11-21T19:36:21 | 2016-03-23T18:01:05 | JavaScript | UTF-8 | Python | false | false | 4,277 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://github.com/ppannuto/python-titlecase/
Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
License: http://www.opensource.org/licenses/mit-license.php
"""
import re
__all__ = ['titlecase']
__version__ = '0.8.1'
SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?'
PUNCT = r"""!"#$%&'‘()*+,\-./:;?@[\\\]_`{|}~"""
SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
INLINE_PERIOD = re.compile(r'[a-z][.][a-z]', re.I)
UC_ELSEWHERE = re.compile(r'[%s]*?[a-zA-Z]+[A-Z]+?' % PUNCT)
CAPFIRST = re.compile(r"^[%s]*?([A-Za-z])" % PUNCT)
SMALL_FIRST = re.compile(r'^([%s]*)(%s)\b' % (PUNCT, SMALL), re.I)
SMALL_LAST = re.compile(r'\b(%s)[%s]?$' % (SMALL, PUNCT), re.I)
SUBPHRASE = re.compile(r'([:.;?!\-\—][ ])(%s)' % SMALL)
APOS_SECOND = re.compile(r"^[dol]{1}['‘]{1}[a-z]+(?:['s]{2})?$", re.I)
ALL_CAPS = re.compile(r'^[A-Z\s\d%s]+$' % PUNCT)
UC_INITIALS = re.compile(r"^(?:[A-Z]{1}\.{1}|[A-Z]{1}\.{1}[A-Z]{1})+$")
MAC_MC = re.compile(r"^([Mm]c)(\w.+)")
def set_small_word_list(small=SMALL):
global SMALL_WORDS
global SMALL_FIRST
global SMALL_LAST
global SUBPHRASE
SMALL_WORDS = re.compile(r'^(%s)$' % small, re.I)
SMALL_FIRST = re.compile(r'^([%s]*)(%s)\b' % (PUNCT, small), re.I)
SMALL_LAST = re.compile(r'\b(%s)[%s]?$' % (small, PUNCT), re.I)
SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % small)
def titlecase(text, callback=None):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
lines = re.split('[\r\n]+', text)
processed = []
for line in lines:
all_caps = ALL_CAPS.match(line)
words = re.split('[\t ]', line)
tc_line = []
for word in words:
if callback:
new_word = callback(word, all_caps=all_caps)
if new_word:
tc_line.append(new_word)
continue
if all_caps:
if UC_INITIALS.match(word):
tc_line.append(word)
continue
if APOS_SECOND.match(word):
if len(word[0]) == 1 and word[0] not in 'aeiouAEIOU':
word = word[0].lower() + word[1] + word[2].upper() + word[3:]
else:
word = word[0].upper() + word[1] + word[2].upper() + word[3:]
tc_line.append(word)
continue
if INLINE_PERIOD.search(word) or (not all_caps and UC_ELSEWHERE.match(word)):
tc_line.append(word)
continue
if SMALL_WORDS.match(word):
tc_line.append(word.lower())
continue
match = MAC_MC.match(word)
if match:
tc_line.append("%s%s" % (match.group(1).capitalize(),
match.group(2).capitalize()))
continue
if "/" in word and "//" not in word:
slashed = map(lambda t: titlecase(t,callback), word.split('/'))
tc_line.append("/".join(slashed))
continue
if '-' in word:
hyphenated = map(lambda t: titlecase(t,callback), word.split('-'))
tc_line.append("-".join(hyphenated))
continue
if all_caps:
word = word.lower()
# Just a normal word that needs to be capitalized
tc_line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
result = " ".join(tc_line)
result = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
result = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), result)
result = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), result)
processed.append(result)
return "\n".join(processed)
| [
"zackw@panix.com"
] | zackw@panix.com |
faadcc1ed3ca705c5b80a43be8f58f6bc6c55a5b | 7dd0c39502d39b8f88199b8614bd0f3cc17874b7 | /next-time.py | 08fd0aecdc43617031ab7e4438cd91da7bb3e7ad | [] | no_license | nealav/Data-Structures-and-Algorithms | 4c47d20b0b76c79a4dce2701bd372c260f1ebd1b | 0cc47f012107784d58dfdc869a80440ca7a3b1ea | refs/heads/master | 2020-03-22T12:38:49.731324 | 2018-10-04T09:36:13 | 2018-10-04T09:36:13 | 140,052,953 | 0 | 1 | null | 2019-10-02T17:03:29 | 2018-07-07T04:35:54 | Python | UTF-8 | Python | false | false | 669 | py | def next_time_permute(S):
hour = int(S[:2])
min = int(S[3:])
t1 = S[:2] + S[3:]
while True:
min -= 1
if (min == -1):
min = 59
hour -= 1
if (hour == -1):
hour = 23
t2 = ''
if (hour < 10):
t2 = t2 + '0' + str(hour)
else: t2 = t2 + str(hour)
if (min < 10):
t2 = t2 + '0' + str(min)
else: t2 = t2 + str(min)
if (subset(t1, t2)):
print(str(t2[:2]) + ':' + str(t2[2:]))
return t2[:2] + ':' + t2[3:]
def subset(S1, S2):
l1 = list(S1)
l2 = list(S2)
return set(l2).issubset(set(l1)) | [
"neal.viswanath@gmail.com"
] | neal.viswanath@gmail.com |
cd7a6929bc5b9db78d6cc4535e54a1e82689ae66 | 98e9ea1c05b3c696e30123f0647773d53a042dc8 | /src/ml_model.py | f6cc969ea242fc58b4f307e7fe9dfdb4970216f8 | [] | no_license | kevinbdsouza/lstm_glif | f1309fe16dbc308690432d24d34edb290b11f019 | 231152419bf702f1859ad95717db333fe4a8785a | refs/heads/master | 2020-07-27T05:12:11.427535 | 2019-09-21T20:43:49 | 2019-09-21T20:43:49 | 208,881,081 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import torch.nn as nn
class SeqParamModel(nn.Module):
def __init__(self, cfg):
super(SeqParamModel, self).__init__()
self.input_dim = cfg.input_dim
self.hidden_dim = cfg.hidden_dim
self.param_dim = cfg.param_dim
self.encode_lstm = nn.LSTM(self.input_dim, self.hidden_dim, 1,
bidirectional=True, batch_first=True)
self.model_param = nn.Linear(self.hidden_dim * 2, self.param_dim)
def train_model(self, x):
encoded_inputs, (h_n, c_n) = self.encode_lstm(x)
h_n = h_n.view(-1, 1, self.hidden_dim * 2)
params = self.model_param(h_n)
return params
def forward(self, x):
params = self.train_model(x)
return params
| [
"kdsouza1496@gmail.com"
] | kdsouza1496@gmail.com |
4c94a35aedb0a78a566827ad4cc80a358349cf40 | cd5d4b0907b694a2b1446f904567a2bfdd352db5 | /utilities.py | 95558d7167579061f441b9a7265a26d609ef654c | [
"MIT"
] | permissive | jaredvonhalle/CSC478WorldFoodFacts | 0ff5a6af9ad5050127f5f85c04c875e11e978ac7 | d86c65e5c4c6d2d88f2b59abc5a73c45736ed7e9 | refs/heads/master | 2021-01-19T05:59:23.348013 | 2016-06-04T20:16:55 | 2016-06-04T20:16:55 | 60,126,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | ############### Utility Functions For World-Food-Facts Dataset #############
import pandas as pd
def getByCountry(dataset, country_name):
country_regex = r'%s' % country_name
isInCountry = dataset.countries_en.str.contains(country_regex)
isInCountry = isInCountry.fillna(False)
return dataset.loc[isInCountry]
def getUniqueCountries(dataset):
return_list = []
unique_countries_raw = dataset.countries_en.unique()
for country in unique_countries_raw:
x = str(country).split(',')
if (len(x) == 1 and x[0] != 'nan'):
return_list.append(x[0])
return return_list
def getAvg(dataset, nutriment):
return dataset[nutriment][dataset[nutriment].notnull()].mean()
def compareCountriesByNutrimentAverage(data, countries, nutriment):
df = pd.DataFrame(index=['average'], columns=countries)
for i in range(len(countries)):
country = countries[i]
country_data = getByCountry(data, country)
if (country_data[nutriment].notnull().sum() > 30):
avg = getAvg(country_data, nutriment)
df[country] = avg
return df.T.sort_values(by='average', axis=0).dropna()
| [
"davidshchang@gmail.com"
] | davidshchang@gmail.com |
741ddcea17c8efd82e9c0094729f2b871d0a1a0d | 4a43a33db281d75bdc6b512cf9b8183667587605 | /benchmarks/unary.py | a9c4f82110ad890bcac1bf1d9c4b211bc5ca657d | [] | no_license | mruberry/nestedtensor | 41e02aa2f7cad18955ad63cdc9f82ad91940a9cf | 5026e29c6b388c73357e6d7c136073cf3b46ee9c | refs/heads/master | 2021-05-17T20:57:20.467867 | 2020-03-29T01:15:22 | 2020-03-29T01:15:22 | 250,949,151 | 0 | 0 | null | 2020-03-29T03:56:04 | 2020-03-29T03:56:04 | null | UTF-8 | Python | false | false | 1,117 | py | import torch
import nestedtensor
import utils
import random
RAND_INTS = [random.randint(10, 30) for _ in range(2000)] # Performance tanks hard for lots of small Tensors as expected
RAND_INTS = [random.randint(100, 300) for _ in range(20)]
def gen_t_cos():
tensor = torch.cat([torch.rand(i, 2560).reshape(-1) for i in RAND_INTS])
def t():
tensor.cos_()
return t
def gen_t_loop_cos():
tensors = [torch.rand(i, 2560) for i in RAND_INTS]
def t_loop():
for t in tensors:
t.cos_()
return t_loop
def gen_nt_cos():
nested_tensor = nestedtensor.nested_tensor(
[torch.rand(i, 2560) for i in RAND_INTS])
def nt():
nested_tensor.cos_()
return nt
def gen_ant_cos():
nested_tensor = nestedtensor.as_nested_tensor(
[torch.rand(i, 2560) for i in RAND_INTS])
def ant():
nested_tensor.cos_()
return ant
if __name__ == "__main__":
print(utils.benchmark_fn(gen_t_cos()))
print(utils.benchmark_fn(gen_t_loop_cos()))
print(utils.benchmark_fn(gen_nt_cos()))
print(utils.benchmark_fn(gen_ant_cos()))
| [
"noreply@github.com"
] | noreply@github.com |
29069b7bf4a02cecc9619fa5e2ca992e6a3f59f0 | ca624da79382caeb2dc9f01f33fc31db2b3bb701 | /plms/plms_pages/company_list.py | 79ee14d186503bd8b0896a7b0794ae520f1896d3 | [] | no_license | buxiangjie/CloudLoanWeb | 890a0210cb46caaa255aa3e1e45252293e884eb3 | ff441d001c2ffcdec532b6aae4ff66c585ce8b99 | refs/heads/master | 2023-05-05T01:27:15.208364 | 2021-05-25T03:22:55 | 2021-05-25T03:22:55 | 271,734,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: UTF-8 -*-
"""
@auth:buxiangjie
@date:2020-05-12 11:26:00
@describe: 企业列表
"""
import time
from selenium.webdriver.common.by import By
from common.base import Base
class CompanyList(Base):
company_name = (By.XPATH, "//form[@class='el-form reqForm el-form--inline']/div[1]/label")
def check_company_list(self):
time.sleep(1)
assert self.get_text(*self.company_name) == "催收公司名称:" | [
"bxj3416162@163.com"
] | bxj3416162@163.com |
abc92ff4af707ac89bad47fec0d739190b3d6d37 | 4b5d0c384b62004d302f22aabfc6c25d969e0ef8 | /gen.py | 03aa0b109fd4b8a5f4e17570b9084516db1cd3a6 | [] | no_license | Integralist/WWENetworkDownload | e7a10c7d0c088210a59953bf30922039e5ce74da | 2ab9d2c65b3fa9499423a678bb54f7ac7f92c4fb | refs/heads/master | 2021-07-12T09:49:31.339253 | 2020-11-28T12:56:52 | 2020-11-28T12:56:52 | 216,238,220 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,942 | py | import argparse
import subprocess
def extract_episodes():
"""generates data structure containing videos with their time segments.
output will resemble:
{
"video_title_A": [
[00:02:16],
[00:10:00, 00:20:00],
[00:30:00, 00:40:00],
...
]
"video_title_B": [
[00:02:16],
[00:10:00, 00:20:00],
[00:30:00, 00:40:00],
...
]
}
"""
episodes = {}
title = "" # will be updated on every loop iteration
with open("segments.txt") as f:
for i, line in enumerate(f):
no_linebreak = line.splitlines()[0]
start_finish = no_linebreak.split(" ")
if len(start_finish) > 2:
title = no_linebreak
episodes[title] = []
elif (
len(start_finish) == 0
or len(start_finish) == 1
and start_finish[0] == ""
):
# we've found an empty line (used as a visual separator)
continue
else:
# we've reached the 'time segment' of the listed video
episodes[title].append(start_finish)
return episodes
def parse_segments(episodes):
for title, segments in episodes.items():
tmp = []
for file_number, segment in enumerate(segments):
for i, time in enumerate(segment):
num_segments = len(segment)
# segments [
# segment [i=0 '00:10:00'],
# segment [i=0 '00:15:02', i=1 '00:16:00'],
# segment [i=0' 00:23:37', i=1 '00:34:14'],
# segment [i=0' 00:35:40', i=1 '00:37:07'],
# segment [i=0' 00:44:40', i=1 '00:45:30'],
# segment [i=0' 00:50:46', i=1 '00:52:44'],
# segment [i=0' 01:02:34', i=1 '01:03:19'],
# segment [i=0' 01:14:02']
# ]
# if the first line only has a single value
if file_number == 0 and num_segments == 1:
flag = "-to"
# if the last segment line only has a single value
elif file_number == len(segments) - 1 and num_segments == 1:
flag = "-ss"
else:
flag = "-ss" if i == 0 else "-to"
file = (
"" if num_segments > 1 and i == 0 else f"{file_number}.mp4"
)
tmp.append(f"{flag} {time} {file}")
# we mutate the original episodes data structure so that the
# nested lists of time segments becomes one long string of
# ffmpeg flags.
#
# e.g. "-to 00:02:16 1.mp4 -ss 00:10:00 -to 00:20:00 2.mp4"
episodes[title] = " ".join(tmp)
return episodes
def normalize_video_filename(video):
replacements = {"(": r"\(", ")": r"\)", " ": r"\ "}
return "".join([replacements.get(c, c) for c in video])
def generate_tmp_video_files(num_files):
tmp_video_files = []
for i in range(num_files):
linebreak = "" if i == num_files - 1 else "\\n"
tmp_video_files.append(f"file '{i}.mp4'{linebreak}")
return "".join(tmp_video_files)
def generate_rm_of_tmp_video_files(num_files):
rm_video_files = []
for i in range(num_files):
comma = "" if i == num_files - 1 else ","
rm_video_files.append(f"{i}{comma}")
return "{" + "".join(rm_video_files) + "}"
def main(args: argparse.Namespace):
output_dir = normalize_video_filename(args.path)
episodes = parse_segments(extract_episodes())
for episode, flags in episodes.items():
video = normalize_video_filename(episode)
video_name = normalize_video_filename(episode.split("- ")[1])
video_date = episode.split(" ")[0]
year = video_date.split(".")[0]
# sometimes I put in a new title but then stop before I start entering more
# time segments for that video (just so I know where to start back up.
# to avoid that scenario from breaking this code and causing an exception,
# I'll just skip over any video that's missing segments.
if flags == []:
continue
num_files = len(flags.split(".mp4")) - 1
cmd_extract = f"ffmpeg -i {video}.mp4 {flags}"
cmd_temp = f'printf "{generate_tmp_video_files(num_files)}" > concat.txt'
cmd_concat = (
f"ffmpeg -f concat -safe 0 -i concat.txt -c copy {video_date}-{video_name}-edit.mp4"
)
cmd_rm = (
f"rm {generate_rm_of_tmp_video_files(num_files)}.mp4 && rm concat.txt"
)
cmd_done = "say all done"
cmd_cd = f"cd {output_dir}{year}"
command = f"{cmd_cd} && {cmd_extract} && {cmd_temp} && {cmd_concat} && {cmd_rm} && {cmd_done}" # noqa
# synchronously execute ffmpeg (i.e. blocking operation)
#
# change to `p = subprocess.Pcall` instead of just `.call` and then use the
# `p.communicate()` to co-ordinate multiple subprocesses.
subprocess.call(command, shell=True)
parser = argparse.ArgumentParser(
description="WWE Video Editor",
usage="python gen.py --path <path/to/drive/where/its/subdirectories/are/years/>")
path_help = """path to external drive.
note: the video files should be stored within subdirectories of the specified
path, while the subdirectories should be categoried by the year of the video.
e.g. set path to /Volumes/external_drive/videos/wwf/ and it'll look for videos
based on the filenames in the segments.txt. meaning a title of '1999.01.24 -
Royal Rumble 1999' would cause the tool to lookup the video in the /1999/
directory.
"""
parser.add_argument('-p', '--path', required=True, help=path_help)
args = parser.parse_args()
main(args)
| [
"mark.mcdx@gmail.com"
] | mark.mcdx@gmail.com |
c3457c29c2f2be6f4aa5f3b116bc5b45af2d46a4 | be6ce809784da5d4644f5be36819d90a0a4fc612 | /app.py | dc9c524202c10cd14e8f4c84338dc520cf5ce95f | [] | no_license | AliElzaa/Web-app | 23ff91a10602b06d5fb722bcd20c808e1c8134a7 | 9857afc2b14ee31b22d767a970af58286a025494 | refs/heads/master | 2020-03-19T02:02:37.224098 | 2018-05-31T14:24:48 | 2018-05-31T14:24:48 | 135,593,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,978 | py | from flask_sqlalchemy import SQLAlchemy
from flask import Flask, render_template, request, redirect, url_for, flash #import flask
from vs_url_for import vs_url_for
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import InputRequired,Email, Length
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
from werkzeug.security import generate_password_hash, check_password_hash
app = Flask(__name__)
app.config['SECRET_KEY'] = 'jgnd98hag98aogad'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://todo_username:todo_password@localhost/Todo'
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True)
email = db.Column(db.String(50), unique=True)
password = db.Column(db.String(80))
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class LoginForm(FlaskForm):
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
remember = BooleanField('remember me')
class RegisterForm(FlaskForm):
email = StringField('email', validators=[InputRequired(), Email(message='Invalid email'), Length(max=50)])
username = StringField('username', validators=[InputRequired(), Length(min=4, max=15)])
password = PasswordField('password', validators=[InputRequired(), Length(min=8, max=80)])
class Todo(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.String(200)) #This is for the text
complete = db.Column(db.Boolean) #boolean variable.
@app.route('/')
def index():
return render_template('index.html')
@app.route('/add', methods=['POST'])
def add():
todo = Todo(text=request.form['todoitem'], complete=False)
db.session.add(todo)
db.session.commit()
return redirect(vs_url_for('dashboard'))
@app.route('/update', methods=['POST'])
def update():
return redirect(vs_url_for('dashboard'))
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
return redirect(vs_url_for('dashboard'))
return '<h1> Invalid username or password</h1>'
return render_template('login.html', form=form)
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data, method='sha256')
new_user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(new_user)
db.session.commit()
return '<h1> New user has been Created!!!'
return render_template('signup.html', form=form)
@app.route('/dashboard')
@login_required
def dashboard():
todos = Todo.query.all()
return render_template('dashboard.html', todos=todos, name=current_user.username)
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(vs_url_for('index'))
@app.route('/delete_todo/<id>', methods=['GET', 'POST'])
def delete_todo(id):
todos = Todo.query.filter_by(id=id).first()
if not todos:
return "<h1>no order found</h1>"
db.session.delete(todos)
db.session.commit()
flash('You succesfully deleted')
return redirect(vs_url_for('dashboard'))
if __name__ == '__main__':
app.run( host='0.0.0.0', port=8000)
| [
"aelza001@gold.ac.uk"
] | aelza001@gold.ac.uk |
ae4c1c1b0df6cf9a31d0f6d154fe645dd8e7fe8e | fd5c2d6e8a334977cda58d4513eb3385b431a13a | /extract_census_doc.py | a1445f608f735d677f398b8b2b123c44cf91d16e | [
"MIT"
] | permissive | censusreporter/census-api | 817c616b06f6b1c70c7b3737f82f45a80544c44d | c8d2c04c7be19cdee1000001772adda541710a80 | refs/heads/master | 2023-07-28T06:17:26.572796 | 2023-07-05T20:37:03 | 2023-07-05T20:37:03 | 9,879,953 | 146 | 52 | MIT | 2022-07-11T07:16:19 | 2013-05-06T05:24:57 | Python | UTF-8 | Python | false | false | 7,414 | py | #!/bin/python
import psycopg2
import psycopg2.extras
import json
from collections import OrderedDict
conn = psycopg2.connect(database='postgres')
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
state = 'IL'
logrecno = '89' # Evanston city, IL
def sum(data, *columns):
def reduce_fn(x, y):
if x and y:
return x + y
elif x and not y:
return x
elif y and not x:
return y
else:
return None
return reduce(reduce_fn, map(lambda col: data[col], columns))
def maybe_int(i):
return int(i) if i else i
doc = dict(population=dict(), geography=dict(), education=dict())
cur.execute("SELECT * FROM acs2010_1yr.geoheader WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['geography'] = dict(name=data['name'],
stusab=data['stusab'],
sumlevel=data['sumlevel'])
cur.execute("SELECT * FROM acs2010_1yr.B01002 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['population']['median_age'] = dict(total=maybe_int(data['b010020001']),
male=maybe_int(data['b010020002']),
female=maybe_int(data['b010020003']))
cur.execute("SELECT * FROM acs2010_1yr.B01003 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['population']['total'] = maybe_int(data['b010030001'])
cur.execute("SELECT * FROM acs2010_1yr.B01001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['population']['gender'] = OrderedDict([
('0-9', dict(male=maybe_int(sum(data, 'b010010003', 'b010010004')),
female=maybe_int(sum(data, 'b010010027', 'b010010028')))),
('10-19', dict(male=maybe_int(sum(data, 'b010010005', 'b010010006', 'b010010007')),
female=maybe_int(sum(data, 'b010010029', 'b010010030', 'b010010031')))),
('20-29', dict(male=maybe_int(sum(data, 'b010010008', 'b010010009', 'b010010010', 'b010010011')),
female=maybe_int(sum(data, 'b010010032', 'b010010033', 'b010010034', 'b010010035')))),
('30-39', dict(male=maybe_int(sum(data, 'b010010012', 'b010010013')),
female=maybe_int(sum(data, 'b010010036', 'b010010037')))),
('40-49', dict(male=maybe_int(sum(data, 'b010010014', 'b010010015')),
female=maybe_int(sum(data, 'b010010038', 'b010010039')))),
('50-59', dict(male=maybe_int(sum(data, 'b010010016', 'b010010017')),
female=maybe_int(sum(data, 'b010010040', 'b010010041')))),
('60-69', dict(male=maybe_int(sum(data, 'b010010018', 'b010010019', 'b010010020', 'b010010021')),
female=maybe_int(sum(data, 'b010010042', 'b010010043', 'b010010044', 'b010010045')))),
('70-79', dict(male=maybe_int(sum(data, 'b010010022', 'b010010023')),
female=maybe_int(sum(data, 'b010010046', 'b010010047')))),
('80+', dict(male=maybe_int(sum(data, 'b010010024', 'b010010025')),
female=maybe_int(sum(data, 'b010010048', 'b010010049'))))
])
cur.execute("SELECT * FROM acs2010_1yr.B15001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['education']['attainment'] = OrderedDict([
('<9th Grade', maybe_int(sum(data, 'b150010004', 'b150010012', 'b150010020', 'b150010028', 'b150010036', 'b150010045', 'b150010053', 'b150010061', 'b150010069', 'b150010077'))),
('9th-12th Grade (No Diploma)', maybe_int(sum(data, 'b150010005', 'b150010013', 'b150010021', 'b150010029', 'b150010037', 'b150010046', 'b150010054', 'b150010062', 'b150010070', 'b150010078'))),
('High School Grad/GED/Alt', maybe_int(sum(data, 'b150010006', 'b150010014', 'b150010022', 'b150010030', 'b150010038', 'b150010047', 'b150010055', 'b150010063', 'b150010071', 'b150010079'))),
('Some College (No Degree)', maybe_int(sum(data, 'b150010007', 'b150010015', 'b150010023', 'b150010031', 'b150010039', 'b150010048', 'b150010056', 'b150010064', 'b150010072', 'b150010080'))),
('Associate Degree', maybe_int(sum(data, 'b150010008', 'b150010016', 'b150010024', 'b150010032', 'b150010040', 'b150010049', 'b150010057', 'b150010065', 'b150010073', 'b150010081'))),
('Bachelor Degree', maybe_int(sum(data, 'b150010009', 'b150010017', 'b150010025', 'b150010033', 'b150010041', 'b150010050', 'b150010058', 'b150010066', 'b150010074', 'b150010082'))),
('Graduate or Professional Degree', maybe_int(sum(data, 'b150010010', 'b150010018', 'b150010026', 'b150010034', 'b150010042', 'b150010051', 'b150010059', 'b150010067', 'b150010075', 'b150010083')))
])
cur.execute("SELECT * FROM acs2010_1yr.C16001 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['language'] = OrderedDict([
('English Only', maybe_int(data['c160010002'])),
('Spanish', maybe_int(data['c160010003'])),
('French', maybe_int(data['c160010004'])),
('German', maybe_int(data['c160010005'])),
('Slavic', maybe_int(data['c160010006'])),
('Other Indo-European', maybe_int(data['c160010007'])),
('Korean', maybe_int(data['c160010008'])),
('Chinese', maybe_int(data['c160010009'])),
('Vietnamese', maybe_int(data['c160010010'])),
('Tagalong', maybe_int(data['c160010011'])),
('Other Asian', maybe_int(data['c160010012'])),
('Other & Unspecified', maybe_int(data['c160010013']))
])
cur.execute("SELECT * FROM acs2010_1yr.B27010 WHERE stusab=%s AND logrecno=%s;", [state, logrecno])
data = cur.fetchone()
doc['insurance'] = OrderedDict([
('No Insurance', maybe_int(sum(data, 'b270100017', 'b270100033', 'b270100050', 'b270100053'))),
('Employer Only', maybe_int(sum(data, 'b270100004', 'b270100020', 'b270100036', 'b270100054'))),
('Direct-Purchase Only', maybe_int(sum(data, 'b270100005', 'b270100021', 'b270100037', 'b270100055'))),
('Medicare Only', maybe_int(sum(data, 'b270100006', 'b270100022', 'b270100038' ))),
('Medicaid/Means-Tested Only', maybe_int(sum(data, 'b270100007', 'b270100023', 'b270100039' ))),
('Tricare/Military Only', maybe_int(sum(data, 'b270100008', 'b270100024', 'b270100040', 'b270100056'))),
('VA Health Care Only', maybe_int(sum(data, 'b270100009', 'b270100025', 'b270100041', 'b270100057'))),
('Employer+Direct Purchase', maybe_int(sum(data, 'b270100011', 'b270100027', 'b270100043', 'b270100058'))),
('Employer+Medicare', maybe_int(sum(data, 'b270100012', 'b270100028', 'b270100044', 'b270100059'))),
('Direct+Medicare', maybe_int(sum(data, 'b270100045', 'b270100060'))),
('Medicare+Medicaid', maybe_int(sum(data, 'b270100013', 'b270100029', 'b270100046', 'b270100061'))),
('Other Private-Only', maybe_int(sum(data, 'b270100014', 'b270100030', 'b270100047', 'b270100062'))),
('Other Public-Only', maybe_int(sum(data, 'b270100015', 'b270100031', 'b270100048', 'b270100064'))),
('Other', maybe_int(sum(data, 'b270100016', 'b270100032', 'b270100049', 'b270100065')))
])
print json.dumps(doc, indent=2)
| [
"ian.dees@gmail.com"
] | ian.dees@gmail.com |
f9c568a46854f97c14938d17f5845aa1f9cf72f9 | 915ea8bcabf4da0833d241050ef226100f7bd233 | /SDKs/Python/test/test_contract_item.py | d3f8d89ca8fd4f3b3678876eb22038d67bad2eb9 | [
"BSD-2-Clause"
] | permissive | parserrr/API-Examples | 03c3855e2aea8588330ba6a42d48a71eb4599616 | 0af039afc104316f1722ee2ec6d2881abd3fbc07 | refs/heads/master | 2020-07-10T22:17:24.906233 | 2019-08-26T03:06:21 | 2019-08-26T03:06:21 | 204,382,917 | 0 | 0 | null | 2019-08-26T02:48:16 | 2019-08-26T02:48:15 | null | UTF-8 | Python | false | false | 922 | py | # coding: utf-8
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.contract_item import ContractItem # noqa: E501
from swagger_client.rest import ApiException
class TestContractItem(unittest.TestCase):
"""ContractItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testContractItem(self):
"""Test ContractItem"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.contract_item.ContractItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"christopher.volpi@mindbodyonline.com"
] | christopher.volpi@mindbodyonline.com |
593d512b094c27ccc5ddd8c5e9a4aea17a001183 | 35673a5d855b68cc2674e7d7c728784b61ddd6cc | /fashion_mnist_keras_cnn.py | 433c87a4fa7d5b4ffd102b04145b75567dd8a431 | [] | no_license | MinaSeddik/MachineLearning | d5f4a29e7f72ee841a4ce0f9e0aa74f688f5c20b | 54561def4a39c804e3b52c88a84e211eecdc8a25 | refs/heads/master | 2020-08-07T01:05:10.916865 | 2020-02-09T15:07:03 | 2020-02-09T15:07:03 | 213,226,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,235 | py | import logging
import numpy as np
from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Dropout
from keras.models import Sequential
from keras.optimizers import Adam
from keras.utils import to_categorical
from fashion_mnist_reader import load_fashion_mnist_dataset
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
img_size = 28
# load Fashion MNIST dataset
logger.debug('loading Fashion MNIST dataset')
x_train, y_train_label, x_test, y_test_label = load_fashion_mnist_dataset()
# reshape the x_train and x_test to fit into the convolutional NN model
x_train = x_train.reshape(x_train.shape[0], img_size, img_size, 1)
x_test = x_test.reshape(x_test.shape[0], img_size, img_size, 1)
# we need to convert the label to hot encoded format
y_train = to_categorical(y_train_label)
y_test = to_categorical(y_test_label)
logger.debug('x_train shape = %s', x_train.shape)
logger.debug('y_train shape = %s', y_train.shape)
logger.debug('Before Normalizing, First Image = \n%s', x_train[0])
# prepare and normalize the training and test sets
x_train = x_train / 255.0
x_test = x_test / 255.0
logger.debug('After Normalizing, First Image = \n%s', x_train[0])
logger.debug('First Label = %s', y_train_label[0])
logger.debug('First Hot encoded Label = %s', y_train[0])
# create model
model = Sequential()
# add model layers
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(img_size, img_size, 1)))
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary(print_fn=lambda line: logger.info(line))
# compile the model
learning_rate = 0.001
logger.info('Compile the model with Learning rate = %f', learning_rate)
model.compile(Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=['accuracy'])
# train the model
model.fit(x_train, y_train, validation_split=0.1, batch_size=32, epochs=100, verbose=2)
test_loss, test_acc = model.evaluate(x_test, y_test)
logger.info("Test accuracy: %.2f%%" % (test_acc * 100))
# Save the model to disk.
model_file_path = 'saved_models/fashion_mnist_keras_cnn.h5'
model.save_weights(model_file_path)
# Load the model from disk later using:
# model.load_weights(model_file_path)
# make class prediction
# rounded_predictions = model.predict_classes(x_test)
# Predict on the test images.
predictions = model.predict(x_test)
# HINT: the prediction is ratio of all classes per test row
logger.debug('x_test shape = %s', x_test.shape)
logger.debug('predictions shape = %s', predictions.shape)
# get the max class for each
y_predict = np.argmax(predictions, axis=1)
logger.debug("Display the First 10 test Image's predictions:")
logger.debug('\tLabel\t\tPredicted Label:')
for i in range(0, 10):
logger.debug('\t%d\t\t%d', y_test_label[i], y_predict[i])
| [
"menaibrahim2006@yahoo.com"
] | menaibrahim2006@yahoo.com |
bc4dde6205e2dc08c3f1b2c7b8d97523b58c76b8 | 8b00e2b136636841b38eb182196e56f4721a1e4c | /trio/_core/_exceptions.py | 45f21d389ae8d6f15662d6ff796adfea373bad80 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | xyicheng/trio | 77c8c1e08e3aa4effe8cf04e879720ccfcdb7d33 | fa091e2e91d196c2a57b122589a166949ea03103 | refs/heads/master | 2021-01-23T00:05:59.618483 | 2017-03-16T04:25:05 | 2017-03-16T04:25:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | import attr
# Re-exported
__all__ = [
"TrioInternalError", "RunFinishedError", "WouldBlock",
"Cancelled", "PartialResult",
]
class TrioInternalError(Exception):
"""Raised by :func:`run` if we encounter a bug in trio, or (possibly) a
misuse of one of the low-level :mod:`trio.hazmat` APIs.
This should never happen! If you get this error, please file a bug.
Unfortunately, if you get this error it also means that all bets are off –
trio doesn't know what is going on and its normal invariants may be void.
(For example, we might have "lost track" of a task. Or lost track of all
tasks.) Again, though, this shouldn't happen.
"""
pass
TrioInternalError.__module__ = "trio"
class RunFinishedError(RuntimeError):
"""Raised by ``run_in_trio_thread`` and similar functions if the
corresponding call to :func:`trio.run` has already finished.
"""
pass
RunFinishedError.__module__ = "trio"
class WouldBlock(Exception):
"""Raised by ``X_nowait`` functions if ``X`` would block.
"""
pass
WouldBlock.__module__ = "trio"
class Cancelled(BaseException):
"""Raised by blocking calls if the surrounding scope has been cancelled.
You should let this exception propagate, to be caught by the relevant
cancel scope. To remind you of this, it inherits from
:exc:`BaseException`, like :exc:`KeyboardInterrupt` and
:exc:`SystemExit`.
.. note::
In the US it's also common to see this word spelled "canceled", with
only one "l". This is a `recent
<https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=5&smoothing=3&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
and `US-specific
<https://books.google.com/ngrams/graph?content=canceled%2Ccancelled&year_start=1800&year_end=2000&corpus=18&smoothing=3&share=&direct_url=t1%3B%2Ccanceled%3B%2Cc0%3B.t1%3B%2Ccancelled%3B%2Cc0>`__
innovation, and even in the US both forms are still commonly used. So
for consistency with the rest of the world and with "cancellation"
(which always has two "l"s), trio uses the two "l" spelling
everywhere.
"""
_scope = None
Cancelled.__module__ = "trio"
@attr.s(slots=True, frozen=True)
class PartialResult:
# XX
bytes_sent = attr.ib()
| [
"njs@pobox.com"
] | njs@pobox.com |
537ecd9ff7dea52514e94a67ec8488f4a88abd28 | 10f1f4ce92c83d34de1531e8e891f2a074b3fefd | /graph/gcn_utils/feeder.py | 9b012bf3355a26228cac9c53bbd94c997bfe56d8 | [
"MIT"
] | permissive | sourabhyadav/test_track | d88c4d35753d2b21e3881fc10233bf7bbb1e2cec | d2b4813aaf45dd35db5de3036eda114ef14d5022 | refs/heads/master | 2021-01-06T12:38:56.883549 | 2020-02-05T07:08:46 | 2020-02-05T07:08:46 | 241,328,706 | 1 | 0 | MIT | 2020-02-18T10:06:14 | 2020-02-18T10:06:13 | null | UTF-8 | Python | false | false | 2,751 | py | '''
Author: Guanghan Ning
E-mail: guanghan.ning@jd.com
October 24th, 2018
Feeder of Siamese Graph Convolutional Networks for Pose Tracking
Code partially borrowed from:
https://github.com/yysijie/st-gcn/blob/master/feeder/feeder.py
'''
# sys
import os
import sys
import numpy as np
import random
import pickle
import json
# torch
import torch
import torch.nn as nn
from torchvision import datasets, transforms
# operation
from . import tools
import random
class Feeder(torch.utils.data.Dataset):
""" Feeder of PoseTrack Dataset
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
data_neg_path,
ignore_empty_sample=True,
debug=False):
self.debug = debug
self.data_path = data_path
self.neg_data_path = data_neg_path
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
with open(self.data_path, 'rb') as handle:
self.graph_pos_pair_list_all = pickle.load(handle)
with open(self.neg_data_path, 'rb') as handle:
self.graph_neg_pair_list_all = pickle.load(handle)
# output data shape (N, C, T, V, M)
self.N = min(len(self.graph_pos_pair_list_all) , len(self.graph_neg_pair_list_all)) #sample
self.C = 2 #channel
self.T = 1 #frame
self.V = 15 #joint
self.M = 1 #person
def __len__(self):
return self.N
def __iter__(self):
return self
def __getitem__(self, index):
# randomly add negative samples
random_num = random.uniform(0, 1)
if random_num > 0.5:
#if False:
# output shape (C, T, V, M)
# get data
sample_graph_pair = self.graph_pos_pair_list_all[index]
label = 1 # a pair should match
else:
sample_graph_pair = self.graph_neg_pair_list_all[index]
label = 0 # a pair does not match
data_numpy_pair = []
for siamese_id in range(2):
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, 1))
pose = sample_graph_pair[:][siamese_id]
data_numpy[0, 0, :, 0] = [x[0] for x in pose]
data_numpy[1, 0, :, 0] = [x[1] for x in pose]
data_numpy_pair.append(data_numpy)
return data_numpy_pair[0], data_numpy_pair[1], label
| [
"chenhaomingbob@163.com"
] | chenhaomingbob@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.