blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6d52a4e06270dddf04863bcdef9de3cc07487dd | 09ca09bb5145d47aaefe7d521e420cbfe4984eaf | /pb_server_reference/twistedpb_server_1.2/server.py | b6956904a0ef1d53270f23c8e95ffa4485a53861 | [] | no_license | bendemott/bend | 2dfad27a72c1c3392c9d433cf5321dfd89c41ae7 | 8f6e57049324a21a37ab54ad4f3a644a413e6f0e | refs/heads/master | 2020-04-06T07:00:57.135879 | 2013-07-13T19:45:16 | 2013-07-13T19:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,347 | py | #! /usr/bin/python
'''
This is a Twisted Prospective Broker Server Application Server
Prospective Broker is a set of api's to handle remote objects and communication
between client and server.
This server is an alpha version and will contain logic to test new concepts and
methodologies around Perspective Broker.
Goals:
-Detach main server functionality from applications. (done)
-Support multiple applications that can Attach/Detach/Re-attach to the PB
server at anytime. (done, additional functionality needs to be defined for reloading apps, etc)
-Support the reloading of individual applications (...)
-Properly destroy application references and scope information (??? haven't explored this yet)
-Provide consistent mechanism to store and represent STATE information
across all connections.
-Provide central and standardized authentication mechanisms and configs.
-Provide consistent configuration app configuration interface.
-Define built-in modules like 'server-status', 'server-admin', 'app-manager'
these probably should stem from the root objects methods if they are built
in functionality.
-Define cluster api / PB Cluster functionality.
'''
__author__="Ben"
__date__ ="Jan 12, 2011 6:19:54 PM"
_port = 16030
from twisted.spread import pb
from twisted.internet import reactor
from twisted.web.resource import Resource
from twisted.internet import protocol, defer
from twisted.web.server import NOT_DONE_YET
from twisted.python import log
import sys
log.startLogging(sys.stdout)
sys.path.append("./site-packages")
sys.path.append("../../twisted_pbplugins")
import time
import os
import os.path
from pbplugins0_2 import PbServerRoot, PbServerFactory
#TODO figure out how to tack client-disconnects
#TODO figure out auth model/ldap integration (do we need to extend/modify?)
if __name__ == '__main__':
print "PBServer Port: %s" % _port
cpath = os.path.abspath(os.path.dirname(__file__))
root = PbServerRoot()
# module path
root.register_app("ordermanage", os.path.join(cpath, "../OrderManageApplication"))
root.register_app("order", os.path.join( cpath, "../review/twisted"))
#root.register_app("ordersearchapp", os.path.join(cpath, "../OrderSearchApplication"))
print "All applications successfully registered"
reactor.listenTCP(_port, PbServerFactory(root))
reactor.run()
| [
"ben.demott@gmail.com"
] | ben.demott@gmail.com |
8bdff3b2d87095e5942e3af9fd25eca4d179f41d | 9e9ec4ad8430933c9c63bfa9539ccbd8a04b4bf5 | /course/urls.py | 06d09e7be4155e38f7354877528a342b9737d4b4 | [] | no_license | dmnhwjin/Django_Test_Platform | 34bceb2cafbef575b61bac0eacd583b6580bfc85 | 567174a30bf5404f45cf8ac4e7011ba3ab262011 | refs/heads/master | 2022-02-10T06:47:22.068682 | 2019-05-08T12:51:10 | 2019-05-08T12:51:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django.conf.urls import url
from django.views.generic import TemplateView
from .views import CourseListView,ManageCourseListView
from . import views
urlpatterns = [
url(r'^about/$',TemplateView.as_view(template_name='course/about.html'),name='about'),
url(r'^course-list/$',CourseListView.as_view(),name='course_list'),
url(r'^manage-course/$',ManageCourseListView.as_view(),name='manage_course'),
# url(r'^create-course/$',CreateCourseView.as_view(),name='create_course'),
url(r'^create-course/$',views.CreateCourseView.as_view(),name='create_course'),
url(r'^delete-course/(?P<pk>\d+)/$',views.DeleteCourseView.as_view(),name='delete_course')
] | [
"q95linyu@163.com"
] | q95linyu@163.com |
166351139c35e29e21caa9e41e3d1bad68ff61a9 | a4d087f5a4fcc9998ae5f84c393b42cbc26d6c99 | /plotRN.py | c84b03a2c636c7de4fa3edf01bc0f8e3e803872e | [] | no_license | zhuww/noisemodel | 49780390e97f31ab515024ea883bf5835bcf232c | 4c545692518842ed68bcb324c906c0c0ccba5288 | refs/heads/master | 2018-12-29T18:57:59.435146 | 2015-02-05T10:22:59 | 2015-02-05T10:22:59 | 24,010,531 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,870 | py | import numpy as np
import scipy.linalg as sl
import os, sys
import time
from numpy import dot
from datatools.tempo import *
from fortran_utils import *
from pylab import *
from scipy.optimize import fmin, minimize, fmin_powell
from rankreduced import get_rr_rep, pl_psd
np.set_printoptions(precision=3, suppress=True)
secperday = 86400
dayperyear = 365.24218967
secperyear = secperday*dayperyear
EFAC_default = 1.0
ECORR_default = 1.e-6
EQUAD_default = 1.e-6
LARGE_NUMBER = np.exp(300)
ampfac = ((secperyear*1e6)/(2.0*np.pi*np.sqrt(3.0)))
tstart = time.time()
tf = TOAfile('1713.Sep.T2.tim')
Tstart = float(tf.start)
Tspan = float(tf.end - tf.start)/dayperyear #in unit of year
toas = np.array([(float(toa.TOA)-Tstart)/dayperyear for toa in tf.toalist]) #in unit of year
md = model('1713.Oct.omdot.par')
#md = model('1713_21yr_test.par')
#md = model('1713_21yr_JAE.par')
md.tempofit(tf, DesignMatrix=True)
T2EFAC = [par for par in md.__dict__ if par.startswith('T2EFAC')]
T2EQUAD = [par for par in md.__dict__ if par.startswith('T2EQUAD')]
T2ECORR = [par for par in md.__dict__ if par.startswith('ECORR')]
if 'RNAMP' in md.__dict__:
RNAMP = np.abs(float(md.RNAMP))
else:
RNAMP = np.abs(0.06)
md.manifest.append('RNAMP')
if 'RNIDX' in md.__dict__:
RNIDX = float(md.RNIDX)
else:
RNIDX = -2.17
md.manifest.append('RNIDX')
groups = {}
flaglist = []
for par in T2EQUAD:
flag = ' '.join(par.split()[1:3])
groups[flag] = tf.othergroups[flag]
flaglist.append(flag)
md.average(lapse=0.0001, groups=groups)
t,w,M = read_design() #read in the design matrix
#u, s, v = numpy.linalg.svd(A) # svd decomposition
#Np = len(s)
#F = u[..., :Np] #extract the F matrixd
#G = u[..., Np:] #extract the G matrrix
r = np.array(md.res)
n = r.shape[0]
#m = Np
#M = A
""" Setup matrix U and epoch-avearging matrix EA"""
ECORRtags = [' '.join(tag.split()[1:3]) for tag in T2ECORR]
S_idx = []
stack = []
aveeph = []
ECORRlist = []
for key in T2ECORR:
flag = ' '.join(key.split()[1:3])
if key in md.__dict__:
ECORRlist.append(float(md.__dict__[key]))
S_idx.append(0)
for epochs in sorted(md.toagrps[flag].keys()):
aveeph.append(epochs)
idx = md.toagrps[flag][epochs]
S_idx[-1] += 1
l = np.zeros(n)
l[idx] = 1.
stack.append(l)
S_idx[-1] = np.array(S_idx[-1])
S_ele = np.hstack([ ECORRlist[i]**2 *np.ones(k) for i,k in enumerate(S_idx)])
U = (np.vstack(stack)).T
UT = U.T
""" Setup EFAC, EQUAD"""
EFACtags = [' '.join(tag.split()[1:3]) for tag in T2EFAC]
EQUADtags = [' '.join(tag.split()[1:3]) for tag in T2EQUAD]
EFACidx = {}
EQUADidx = {}
SIGMA = []
EFAClist = []
EQUADlist = []
for tag in EFACtags:
EFACidx[tag] = []
EFAC = float(md.__dict__['T2EFAC ' + tag])
EFAClist.append(EFAC)
for tag in EQUADtags:
EQUADidx[tag] = []
EQUAD = float(md.__dict__['T2EQUAD ' + tag])
EQUADlist.append(EQUAD)
for i, toa in enumerate(tf.toalist):
SIGMA.append(float(toa.TOAsigma))
TOAflags = [('-%s %s' % (f,toa.flags[f])) for f in toa.flags]
try:
key = (set(EFACtags) & set(TOAflags)).pop()
EFACidx[key].append(i)
except:
EFAC = EFAC_default
key = (set(EQUADtags) & set(TOAflags)).pop()
EQUADidx[key].append(i)
"""Setup red noise Fourier matrix, Fr"""
freq , F = get_rr_rep(toas, Tspan, 1./Tspan/4.7, 50, 20)
p0 = [float(md.__dict__[p]) for p in T2EFAC]
p1 = [np.log(float(md.__dict__[p])) for p in T2EQUAD]
p2 = [np.log(float(md.__dict__[p])) for p in T2ECORR]
p3 = [np.log(RNAMP), RNIDX]
np0 = len(p0)
np1 = np0 + len(p1)
np2 = np1 + len(p2)
np3 = np2 + len(p3)
plist = np.array(p0 + p1 + p2 + p3)
def plotRN(plist, logspace=True):
tstart = time.time()
"""setup parameters"""
p0 = np.array(plist[:np0])
p1 = np.array(plist[np0:np1])
p2 = np.array(plist[np1:np2])
p3 = np.array(plist[np2:np3])
if logspace:
CoordTransTerm = (np.sum(p1) + np.sum(p2) + p3[0]) #sume all the logterm for coordinate transformation dz = z d (ln(z))
p1 = np.exp(p1)
p2 = np.exp(p2)
p3[0] = np.exp(p3[0])
else:
CoordTransTerm = 0.
"""Load parameters"""
efac = np.ones(n)
for i,tag in enumerate(EFACtags):
efac[EFACidx[tag]] = p0[i]
Nvec = np.array(SIGMA * efac)**2
equad = np.zeros(n)
for i,tag in enumerate(EQUADtags):
equad[EQUADidx[tag]] = p1[i]
Nvec += (equad**2)
S_ele = np.hstack([ p2[i]**2. *np.ones(k) for i,k in enumerate(S_idx)])
RNAMP = np.abs(p3[0]) #/ ampfac
RNIDX = p3[1]
#phi = (RNAMP**2)/12/np.pi/np.pi *fyr**(-3-RNIDX) * f**RNIDX
phi = (RNAMP**2) * freq ** RNIDX #assuming f is is 1/yr units.
"""Putting the noise models together"""
T = np.hstack((M, F, U))
"""Putting the timing and noise parameters together """
m = M.shape[1]
n_f = F.shape[1]
n_j = U.shape[1]
n_p = T.shape[1]
Phi_I = np.zeros((n_p, n_p))
Pars = [LARGE_NUMBER for i in range(m)]
Pars += list(phi)
Pars += list(S_ele)
Pi = 1./np.array(Pars)
np.fill_diagonal(Phi_I, Pi)
"""compute likelyhood"""
d = dot(T.T, r/Nvec)
Sigma = Phi_I + dot(T.T, (1./Nvec * T.T).T)
#print d.shape, Sigma.shape
try:
cfSigma = sl.cho_factor(Sigma)
#except LinAlgError:
except :
print Sigma
logdetSigma = 2.*np.sum(np.log(np.diag(cfSigma[0])))
Sid = sl.cho_solve(cfSigma, d)
dSid = np.dot(d.T, Sid)
logdetCt = np.sum(np.log(np.array(Pars[m:])))
logdetN = np.sum(np.log(Nvec))
Nir = r / Nvec
LogLike1 = 0.5 * ( dot(r.T, Nir) - dSid)
LogLike2 = 0.5 * ( logdetN + logdetSigma + logdetCt)
LogLike = LogLike1 + LogLike2 - CoordTransTerm
b = sl.cho_solve(cfSigma, d.T)
bphi = b[m:m+n_f]
phi_I = np.zeros((n_f, n_f))
np.fill_diagonal(phi_I, 1./phi)
FT = F.T
sigma = phi_I #+ dot(FT, (1./Nvec * FT).T)
cfsigma = sl.cho_factor(sigma)
y = dot(F, bphi.T)
yerr = np.sqrt(np.diag(dot(F, sl.cho_solve(cfsigma, FT))))
errorbar(toas, y, yerr=yerr, fmt='.')
#plot(toas,y,'.')
np.save('RNrealization.npy', (y, yerr))
show()
return LogLike
from pylab import *
plotRN(plist)
y,yerr = np.load('RNrealization.npy')
#del md,tf
#tf = TOAfile('1713.Sep.T2.tim')
#md = model('1713_21yr_test.par')
ax = subplot(111)
md.tempofit(tf, GLS=True)
md.average()
md.plot('mjd', 'averes', ax=ax)
np.save('tempoGLS.npy', (md.toa, md.res, md.err, md.avetoa, md.averes, md.aveerr))
t, r, e, at, ar, ae = np.load('tempoGLS.npy')
for grp in at:
errorbar(at[grp], ar[grp], yerr=ae[grp], fmt='.')
ty = np.vstack((t, y, yerr)).T
ty = np.array(sorted(ty, key=lambda x:x[0]))
t = ty[:,0]
y = ty[:,1]
yerr = ty[:,2]
#print t.shape, y.shape, yerr.shape
fill_between(t, y+yerr, y-yerr, facecolor='grey')
show()
#print loglikelihood(plist)
| [
"zhuwwpku@gmail.com"
] | zhuwwpku@gmail.com |
6496d16a1187ee2afb1d4f13a192c17ebc29b49a | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/work/v4_1/models/board_reference.py | 6e58999b0b5322ab38b6dc0d498ad651c3982709 | [] | no_license | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,204 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class BoardReference(Model):
"""BoardReference.
:param id: Id of the resource
:type id: str
:param name: Name of the resource
:type name: str
:param url: Full http link to the resource
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, name=None, url=None):
super(BoardReference, self).__init__()
self.id = id
self.name = name
self.url = url
| [
"usama.blavins1@gmail.com"
] | usama.blavins1@gmail.com |
a5e9f553c4445d6dc6b90192c0415a3fbc9acd66 | f93dc6941a780e4e117f796d8f32676e5706eb27 | /jagerunner/Configuration.py | 62e82e5cd12213f5aaa7d7af36f73202c5ef9af3 | [] | no_license | ghik/sg-swift | 396caf3dfaa6ac12ceace64d3c19721967092ba3 | 315e6502697d7992e071880379bdb4617085eaae | refs/heads/master | 2020-11-26T15:23:27.107947 | 2013-01-17T13:10:36 | 2013-01-17T13:10:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py |
class Configuration:
driver = 'jageplatform'
execpath = '/home/ghik/sem/Gridy/sg-swift/jage/algorithms/applications/emas-app'
agexml = "classpath:age.xml"
dotreplacer = '_'
constantParameters = ['outfile', 'steps', 'problem_size', 'islands_number', 'individual_chanceToMigrate']
steps = 1000
problem_size = 10
islands_number = 5
individual_chanceToMigrate = 0.001
| [
"roman@student.agh.edu.pl"
] | roman@student.agh.edu.pl |
52f5c5b0eebfe21aefcedfcaa6b683808db9b6ae | 674f1a1d4ed16e8db51e8d663a85b955a244aece | /src/vscript/tests/statements/test_sub.py | eb66b71409125a49bc86ee63ed83a12c549e80af | [] | no_license | VDOMBoxGroup/vdomserver1.3 | a84fcec493cdcbae95fa5b51ba54912ac2ea8d98 | b9df6bcbe3b78dbc3b48ab074150e1a0d31edab4 | refs/heads/master | 2021-01-20T20:08:02.516528 | 2018-05-22T06:06:16 | 2018-05-22T06:06:16 | 62,622,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,009 | py |
from ...testing import raises, VScriptTestCase
from ... import errors
from ...subtypes import mismatch, empty, null, integer, string, double, \
boolean, error, binary, date, array, dictionary, generic, nothing, \
nan, infinity, true, false, v_mismatch, v_empty, v_null, v_nothing
class TestSubStatement(VScriptTestCase):
def test_sub_statement(self):
assert self.execute("""
sub mysub
result=3
end
mysub""").is_integer(3)
assert self.execute("""
sub mysub
result=3
end sub
mysub""").is_integer(3)
def test_sub_with_arguments_statement(self):
assert self.execute("""
sub mysub(x, y)
result=x+y
end
mysub(1, 2)""").is_integer(3)
assert self.execute("""
sub mysub(x, y)
result=x+y
end sub
mysub(1, 2)""").is_integer(3)
def test_class_sub_statement(self):
assert self.execute("""
class object
sub myclasssub
result=3
end
end
set instance=new object
instance.myclasssub""").is_integer(3)
assert self.execute("""
class object
sub myclasssub
result=3
end sub
end class
set instance=new object
instance.myclasssub""").is_integer(3)
def test_class_sub_with_arguments_statement(self):
assert self.execute("""
class object
sub myclasssub(x, y)
result=x+y
end
end
set instance=new object
instance.myclasssub(1, 2)""").is_integer(3)
assert self.execute("""
class object
sub myclasssub(x, y)
result=x+y
end sub
end class
set instance=new object
instance.myclasssub(1, 2)""").is_integer(3)
class TestWrongSubStatement(VScriptTestCase):
def test_class_default_function_statement(self):
with raises(errors.syntax_error):
assert self.execute("""
class object
default sub myclasssub
end
end""")
with raises(errors.syntax_error):
assert self.execute("""
class object
default sub myclasssub
end sub
end class""")
| [
"Nikolay Grishkov@230e8929-42e3-3b44-a413-55659dd3a577"
] | Nikolay Grishkov@230e8929-42e3-3b44-a413-55659dd3a577 |
4d1e79f734bd19c0d0972d5276d3633d0a9326d3 | d1caebb87160a93239abad56188fc8e934b5ba04 | /recipe/bin/chardetect | b338c406b9fe3f3b912a0ab65f77826ddc75669a | [] | no_license | glennneiger/recipebook | 05ae0464227548e340e21557fde580b42fe7b7bc | 5320397d79fce98fd8306b722c594940038f0137 | refs/heads/master | 2020-12-03T22:43:40.944841 | 2020-01-03T01:27:38 | 2020-01-03T01:27:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/Users/priyanka/Desktop/recipe/recipe/recipe/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"priyanka.renta@gmail.com"
] | priyanka.renta@gmail.com | |
6b5467a41c41da8b62519815c7c1e220f5e91bc1 | 91ad6a972fb75a9c0f47924784a1efbab49dc0dc | /find_all_poss_palindromes.py | bb2daf4ff4ed8613db8666e6700d3bc19621bf16 | [] | no_license | asharma567/practice_problems | ed7d0ce19d18db104151a5d681fe80f3a086d671 | d39b050d24beb5beaab0bbb4d2a3f25c7e82d792 | refs/heads/master | 2021-05-16T02:43:17.265135 | 2018-04-05T18:19:31 | 2018-04-05T18:19:31 | 24,543,026 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 822 | py | '''
finding all possible palindromes problem:
*still looking for a better write up of the problem
'''
from itertoools import permutations
def permute_string(string):
#starting with the first character move it once to the right
for char in string:
for i in xrange(len(string)):
before = string[:i + 1]
after = string[i:]
perm = before + char + after
possibilities.append(perm)
return possibilities
def permute_string(string):
return list(set([''.join(possibility) for possibility in permutations(string, len(string))))
def palindrome(string):
if string = string[::-1]: return True
return False
def main():
list_of_possibilities = permute_string(string)
return [(poss, palindrome(poss)) for poss in list_of_possibilities]
| [
"asharma567567@gmail.com"
] | asharma567567@gmail.com |
e7bab45d83b2b3418bbf9dfb6ebb11ed89573d0a | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/joblib/test/common.py | 038ab537b9faa4cf059cd0c8b2edab674cdc1a7a | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 3,061 | py | """
Small utilities for testing.
"""
import threading
import signal
import time
import os
import sys
import gc
from joblib._multiprocessing_helpers import mp
from joblib.testing import SkipTest, skipif
# A decorator to run tests only when numpy is available
try:
import numpy as np
def with_numpy(func):
"""A decorator to skip tests requiring numpy."""
return func
except ImportError:
def with_numpy(func):
"""A decorator to skip tests requiring numpy."""
def my_func():
raise SkipTest('Test requires numpy')
return my_func
np = None
# TODO: Turn this back on after refactoring yield based tests in test_hashing
# with_numpy = skipif(not np, reason='Test requires numpy.')
# we use memory_profiler library for memory consumption checks
try:
from memory_profiler import memory_usage
def with_memory_profiler(func):
"""A decorator to skip tests requiring memory_profiler."""
return func
def memory_used(func, *args, **kwargs):
"""Compute memory usage when executing func."""
gc.collect()
mem_use = memory_usage((func, args, kwargs), interval=.001)
return max(mem_use) - min(mem_use)
except ImportError:
def with_memory_profiler(func):
"""A decorator to skip tests requiring memory_profiler."""
def dummy_func():
raise SkipTest('Test requires memory_profiler.')
return dummy_func
memory_usage = memory_used = None
# A utility to kill the test runner in case a multiprocessing assumption
# triggers an infinite wait on a pipe by the master process for one of its
# failed workers
_KILLER_THREADS = dict()
def setup_autokill(module_name, timeout=30):
"""Timeout based suiciding thread to kill the test runner process
If some subprocess dies in an unexpected way we don't want the
parent process to block indefinitely.
"""
if "NO_AUTOKILL" in os.environ or "--pdb" in sys.argv:
# Do not install the autokiller
return
# Renew any previous contract under that name by first cancelling the
# previous version (that should normally not happen in practice)
teardown_autokill(module_name)
def autokill():
pid = os.getpid()
print("Timeout exceeded: terminating stalled process: %d" % pid)
os.kill(pid, signal.SIGTERM)
# If were are still there ask the OS to kill ourself for real
time.sleep(0.5)
print("Timeout exceeded: killing stalled process: %d" % pid)
os.kill(pid, signal.SIGKILL)
_KILLER_THREADS[module_name] = t = threading.Timer(timeout, autokill)
t.start()
def teardown_autokill(module_name):
"""Cancel a previously started killer thread"""
killer = _KILLER_THREADS.get(module_name)
if killer is not None:
killer.cancel()
with_multiprocessing = skipif(
mp is None, reason='Needs multiprocessing to run.')
with_dev_shm = skipif(
not os.path.exists('/dev/shm'),
reason='This test requires the /dev/shm shared memory fs.')
| [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
a16d77aee837954ac3a8a0501b4049e329e31f53 | 369961a60d92e42ff3273b3e0397590a9ca0b344 | /blog/views.py | b36f5692cd97fb70559e2ef44f8224ef066f0b48 | [] | no_license | monxiaolee/django-blog | 2c0d174a493f27dcb5e9d483a994b894357b0c90 | 6b38d25c5cd7f81dea15da25626ee4c9aece32b6 | refs/heads/master | 2020-03-19T13:52:38.576933 | 2018-06-08T09:40:00 | 2018-06-08T09:40:00 | 136,599,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,797 | py | # -*- coding: UTF-8 -*-
from django.shortcuts import render
# Create your views here.
from comments.forms import CommentForm
from django.http import HttpResponse
import markdown
from django.shortcuts import render, get_object_or_404
from . models import Post, Category
def index(request):
Post_list = Post.objects.all().order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': Post_list})
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
# 记得在顶部引入 markdown 模块
post.body = markdown.markdown(post.body,
extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
])
# 记得在顶部导入 CommentForm
form = CommentForm()
# 获取这篇 post 下的全部评论
comment_list = post.comment_set.all()
# 将文章、表单、以及文章下的评论列表作为模板变量传给 detail.html 模板,以便渲染相应数据。
context = {'post': post,
'form': form,
'comment_list': comment_list
}
return render(request, 'blog/detail.html', context)
def archives(request, year, month):
post_list = Post.objects.filter(created_time__year=year, created_time__month=month).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
post_list = Post.objects.filter(category=cate).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list}) | [
"15893623902Lee"
] | 15893623902Lee |
2306a63f3458c869472722d03d5f2cd7e91436b8 | dc3df9ba09049caf3c7ecb6879cdb274cdc99bc2 | /SVM/hw5.py | 62c72cc02a3b6404deff6ed7e6751e57ced1e3a4 | [] | no_license | OriZilka/machine_learning | 4b5b1a8e8bc2c71af7fc6173af49a07662ad39e2 | 5dd5305c660c595a7ff476fac8b2ef4bad7c9a9e | refs/heads/master | 2022-12-04T18:46:49.693845 | 2020-08-20T11:15:58 | 2020-08-20T11:15:58 | 288,985,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,650 | py | from numpy import count_nonzero, logical_and, logical_or, concatenate, mean, array_split, poly1d, polyfit, array
from numpy.random import permutation
import pandas as pd
from sklearn.svm import SVC
import matplotlib.pyplot as plt
SVM_DEFAULT_DEGREE = 3
SVM_DEFAULT_GAMMA = 'auto'
SVM_DEFAULT_C = 1.0
ALPHA = 1.5
def prepare_data(data, labels, max_count=None, train_ratio=0.8):
"""
:param data: a numpy array with the features dataset
:param labels: a numpy array with the labels
:param max_count: max amout of samples to work on. can be used for testing
:param train_ratio: ratio of samples used for train
:return: train_data: a numpy array with the features dataset - for train
train_labels: a numpy array with the labels - for train
test_data: a numpy array with the features dataset - for test
test_labels: a numpy array with the features dataset - for test
"""
if max_count:
data = data[:max_count]
labels = labels[:max_count]
###########################################################################
# TODO: Implement the function #
###########################################################################
data_plus_labels = concatenate((data, array([labels]).T), axis=1)
shuffled_data_plus_labels = permutation(data_plus_labels)
num_of_train_instunses = int(shuffled_data_plus_labels.shape[0] * train_ratio)
num_of_test_instunses = shuffled_data_plus_labels.shape[0] - num_of_train_instunses
train_data = array(shuffled_data_plus_labels[0:num_of_train_instunses,0:-1])
train_labels = array(shuffled_data_plus_labels[0:num_of_train_instunses,-1])
test_data = array(shuffled_data_plus_labels[num_of_train_instunses:,0:-1])
test_labels = array(shuffled_data_plus_labels[num_of_train_instunses:,-1])
###########################################################################
# END OF YOUR CODE #
###########################################################################
return train_data, train_labels, test_data, test_labels
def get_stats(prediction, labels):
"""
:param prediction: a numpy array with the prediction of the model
:param labels: a numpy array with the target values (labels)
:return: tpr: true positive rate
fpr: false positive rate
accuracy: accuracy of the model given the predictions
"""
# ###########################################################################
# # TODO: Implement the function #
# ###########################################################################
positive_num = count_nonzero(labels)
negative_num = labels.shape[0] - positive_num
tp = 0
fp = 0
for i in range(labels.shape[0]):
if (prediction[i] == 1 and labels[i] == 1):
tp += 1
if (prediction[i] == 1 and labels[i] == 0):
fp +=1
tn = negative_num - fp
fn = positive_num - tp
tpr = tp / (tp + fn)
fpr = fp / (tn + fp)
accuracy = (tp + tn) / (tp + fp + tn + fn)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return tpr, fpr, accuracy
def get_k_fold_stats(folds_array, labels_array, clf):
"""
:param folds_array: a k-folds arrays based on a dataset with M features and N samples
:param labels_array: a k-folds labels array based on the same dataset
:param clf: the configured SVC learner
:return: mean(tpr), mean(fpr), mean(accuracy) - means across all folds
"""
tpr = []
fpr = []
accuracy = []
###########################################################################
# TODO: Implement the function #
###########################################################################
curr_train_data = folds_array
curr_train_label = labels_array
for i in range(len(folds_array)):
curr_test_data = curr_train_data.pop(0)
curr_test_label = curr_train_label.pop(0)
conct_train_data = concatenate(curr_train_data)
conct_train_label = concatenate(curr_train_label)
clf.fit(conct_train_data, conct_train_label)
curr_tpr, curr_fpr, curr_accuracy = get_stats(clf.predict(curr_test_data), curr_test_label)
tpr.append(curr_tpr)
fpr.append(curr_fpr)
accuracy.append(curr_accuracy)
curr_train_data.append(curr_test_data)
curr_train_label.append(curr_test_label)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return mean(tpr), mean(fpr), mean(accuracy)
def compare_svms(data_array,
labels_array,
folds_count,
kernels_list=('poly', 'poly', 'poly', 'rbf', 'rbf', 'rbf',),
kernel_params=({'degree': 2}, {'degree': 3}, {'degree': 4}, {'gamma': 0.005}, {'gamma': 0.05}, {'gamma': 0.5},)):
"""
:param data_array: a numpy array with the features dataset
:param labels_array: a numpy array with the labels
:param folds_count: number of cross-validation folds
:param kernels_list: a list of strings defining the SVM kernels
:param kernel_params: a dictionary with kernel parameters - degree, gamma, c
:return: svm_df: a dataframe containing the results as described below
"""
svm_df = pd.DataFrame()
svm_df['kernel'] = kernels_list
svm_df['kernel_params'] = kernel_params
svm_df['tpr'] = None
svm_df['fpr'] = None
svm_df['accuracy'] = None
tpr = []
fpr = []
accuracy = []
###########################################################################
# TODO: Implement the function #
###########################################################################
fold_data_array = array_split(data_array, folds_count)
fold_labels_array = array_split(labels_array, folds_count)
clf = SVC()
for i in range(len(kernels_list)):
ker_type = kernels_list[i]
ker_param = kernel_params[i]
clf.set_params(**{'kernel' :ker_type, 'C' : SVM_DEFAULT_C, 'gamma' : SVM_DEFAULT_GAMMA, 'degree' : SVM_DEFAULT_DEGREE})
clf.set_params(**ker_param)
mean_tpr, mean_fpr, mean_accuracy = get_k_fold_stats(fold_data_array, fold_labels_array, clf)
tpr.append(mean_tpr)
fpr.append(mean_fpr)
accuracy.append(mean_accuracy)
svm_df['tpr'] = tpr
svm_df['fpr'] = fpr
svm_df['accuracy'] = accuracy
###########################################################################
# END OF YOUR CODE #
###########################################################################
return svm_df
def get_most_accurate_kernel(df):
"""
:return: integer representing the row number of the most accurate kernel
"""
best_kernel = list(df).index(max(df))
return best_kernel
def get_kernel_with_highest_score(df):
"""
:return: integer representing the row number of the kernel with the highest score
"""
best_kernel = list(df).index(max(df))
return best_kernel
def plot_roc_curve_with_score(df, alpha_slope=1.5):
"""
:param df: a dataframe containing the results of compare_svms
:param alpha_slope: alpha parameter for plotting the linear score line
:return:
"""
x = df.fpr.tolist()
y = df.tpr.tolist()
###########################################################################
# TODO: Implement the function #
###########################################################################
b = y[get_kernel_with_highest_score(df['score'])] - (alpha_slope * x[get_kernel_with_highest_score(df['score'])])
straight_line = poly1d([alpha_slope, b])
plt.title("ROC")
plt.ylabel('Tpr')
plt.xlabel('Fpr')
plt.xlim([0,1.01])
plt.ylim([0,3])
plt.plot(x, y, 'ro', ms=5, mec='green')
plt.plot([0,1], straight_line([0,1]), "-r")
plt.show()
# print(x)
# print(y)
###########################################################################
# END OF YOUR CODE #
###########################################################################
def evaluate_c_param(data_array, labels_array, folds_count, kernels_list, best_kernel_params):
"""
:param data_array: a numpy array with the features dataset
:param labels_array: a numpy array with the labels
:param folds_count: number of cross-validation folds
:return: res: a dataframe containing the results for the different c values. columns similar to `compare_svms`
"""
###########################################################################
# TODO: Implement the function #
###########################################################################
res = compare_svms(data_array, labels_array, folds_count, kernels_list, best_kernel_params)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return res
def get_test_set_performance(train_data, train_labels, test_data, test_labels, best_kernel, best_kernel_params):
"""
:param train_data: a numpy array with the features dataset - train
:param train_labels: a numpy array with the labels - train
:param test_data: a numpy array with the features dataset - test
:param test_labels: a numpy array with the labels - test
:return: kernel_type: the chosen kernel type (either 'poly' or 'rbf')
kernel_params: a dictionary with the chosen kernel's parameters - c value, gamma or degree
clf: the SVM leaner that was built from the parameters
tpr: tpr on the test dataset
fpr: fpr on the test dataset
accuracy: accuracy of the model on the test dataset
"""
kernel_type = best_kernel
kernel_params = best_kernel_params
clf = SVC(class_weight='balanced', kernel = kernel_type , C = SVM_DEFAULT_C,
gamma = SVM_DEFAULT_GAMMA, degree = SVM_DEFAULT_DEGREE) # TODO: set the right kernel
tpr = 0.0
fpr = 0.0
accuracy = 0.0
###########################################################################
# TODO: Implement the function #
###########################################################################
clf.set_params(**kernel_params)
clf.fit(train_data, train_labels)
tpr, fpr, accuracy = get_stats(clf.predict(test_data), test_labels)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return kernel_type, kernel_params, clf, tpr, fpr, accuracy
| [
"zilka@gmail.com"
] | zilka@gmail.com |
3c8918c20a8826ccc1a9ea38ea050047dfc5c55a | a97bb37bab680a4b96bb8adddc6aa6f07cf25f55 | /main.py | bd9843c5cfc631ddc0a59fd8068d3ff0f00c56aa | [] | no_license | Svtter/fastapi_tutorial | 0cf72e57e56e5552e3c6a7ade39603e546dfcea3 | a0fa793654d88e5e78e852c7b02d0ef857951723 | refs/heads/master | 2020-04-30T20:50:06.375820 | 2019-03-22T05:51:22 | 2019-03-22T05:51:22 | 177,079,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/items/{item_id}")
def read_item(item_id: int, q: str = None):
return {"item_id": item_id, "q": q}
| [
"svtter@qq.com"
] | svtter@qq.com |
6e1d48d8959e9b2f56624bdd99cabc6a16df0d47 | 74c6b25b7ad41ba26d7995cf4bfcd4679987f27c | /accounts/models.py | f3107bf24ecffc3316207ebd0098c095e5c02a05 | [] | no_license | ifteheralom/mytutor-web | 7a791dab7dc397131132753145c8db17c43d3597 | 94c922f57f25d34d5bcaf3288c999ee0fbc3b08f | refs/heads/master | 2020-04-17T11:53:06.854092 | 2019-01-19T14:58:52 | 2019-01-19T14:58:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,964 | py | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class ClassRequest(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
req_to = models.CharField(max_length=100, default='')
msg = models.CharField(max_length=1500, default='')
time = models.DateTimeField(auto_now_add=True)
class CoursesOffered(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
city = models.CharField(max_length=100, default='')
study_level = models.CharField(max_length=100, default='')
subject = models.CharField(max_length=100,)
def __str__(self):
return self.user.username
class TutorProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
description = models.CharField(max_length=1500, default='')
expertise = models.CharField(max_length=100, default='')
edu_qualification = models.CharField(max_length=100, default='')
tutor_image = models.ImageField(upload_to='profile_image', blank=True)
charge_hr = models.IntegerField(default=100)
class StudentProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
description = models.CharField(max_length=1500, default='')
school = models.CharField(max_length=100, default='')
fav_subjects = models.CharField(max_length=100, default='')
student_image = models.ImageField(upload_to='profile_image', blank=True)
grade = models.IntegerField(default=100)
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
full_name = models.CharField(max_length=100, default='')
city = models.CharField(max_length=100, default='')
profession = models.CharField(max_length=100, default='')
user_type = models.CharField(max_length=50,)
def __str__(self):
return self.user.username
| [
"ifteher.alom@gmail.com"
] | ifteher.alom@gmail.com |
1c9154c8db4a80397eb95294a8a71b01270574b7 | 44651d363ab806b4557d605ea513fe72808db852 | /getting_started/__init__.py | 4783d23fcae24fa9d4031e6701b46c11dfbffc54 | [] | no_license | copev313/Flask-App-Getting-Started | 6f368c8ed216f1c39659bb6a917d02986e27a44d | c9123bf255e4675c6b1c661f2655d23bffeda04f | refs/heads/main | 2023-05-24T08:06:00.899938 | 2021-06-18T13:23:31 | 2021-06-18T13:23:31 | 378,145,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from flask import Flask
app = Flask(__name__)
# Add any other Flask app configurations here . . .
from getting_started import views | [
"copev313@gmail.com"
] | copev313@gmail.com |
c6d8326c3b641681ecf4f3bdbcfbf09033f4aeec | 386b863933b4b5a1729012ba1247b90c237ba319 | /module/controller/con.py | b9fc66177ee381a1ac1dd3f196a4c0b9130a8237 | [] | no_license | derechoanthony/k-Api | 5d153058adced5214dd077e4378948b905938859 | 7cc5fcf130050df102e7532002e1aa518ce263c5 | refs/heads/master | 2021-09-14T09:11:24.678957 | 2018-05-11T01:22:33 | 2018-05-11T01:22:33 | 125,138,230 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,129 | py | from sqlalchemy import Table, Column, Integer, String, Float, MetaData, ForeignKey,func, select, outerjoin, case, literal_column
from sqlalchemy.sql import select
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from config import SESSION_OPTS
import sqlalchemy
import json
import datetime
# engine = sqlalchemy.create_engine('mysql://root:1..@127.0.0.1:3306/eagle1') # connect to server
engine = create_engine(SESSION_OPTS['session.url'], pool_size=40, max_overflow=-1, pool_recycle=3600, echo=True)
metadata = MetaData()
usrrole = Table('usr_role',
metadata,
Column('id', Integer, primary_key=True),
Column('role_name', String),
Column('role_code', Integer),
Column('role_status',Integer))
usrTable = Table('usr',
metadata,
Column('usrid', Integer,primary_key=True),
Column('username', String),
Column('pwd', String),
Column('reg_date', String),
Column('usr_status', String))
usrProfile = Table('userprofile',
metadata,
Column('usrpid', Integer,primary_key=True),
Column('usrid', Integer),
Column('firstname', String),
Column('lastname', String),
Column('email', String),
Column('contact', String),
Column('usrp_status', Integer),
Column('created', Integer))
userpermit = Table('userpermit',
metadata,
Column('usrper_id', Integer,primary_key=True),
Column('permit_val', Integer),
Column('permit_status', Integer),
Column('userid', Integer))
#quotation
qtheader = Table('qtheader',
metadata,
Column('qt_id', Integer,primary_key=True),
Column('reg_date', String),
Column('date_req', String),
Column('clientid', String),
Column('amt', Float),
Column('vendorid',Integer),
Column('booking_id', Integer),
Column('particulars',String),
Column('pointA_lat',String),
Column('pointA_lon',String),
Column('pointB_lat',String),
Column('pointB_lon',String),
Column('stat',Integer))
qtdetail = Table('qtdetail',
metadata,
Column('qtd_id', Integer, primary_key=True),
Column('qth_id',Integer),
Column('descrp',String),
Column('qty',Integer),
Column('price',Float),
Column('type', Integer),
Column('vendorid',Integer),
Column('total',Float),
Column('stat',Integer))
ven = Table('vendor',
metadata,
Column('ven_id', Integer, primary_key=True),
Column('fname', String),
Column('lanme', String),
Column('address',String),
Column('mobile', String),
Column('tel', String),
Column('email', String),
Column('payment',String),
Column('status', Integer),
Column('CompanyName', String))
vehcle = Table('vehicle',
metadata,
Column('vehicle_id', Integer, primary_key=True),
Column('vendor_id', Integer),
Column('vechicle_type', String),
Column('qty', Integer),
Column('price',Float),
Column('description', String),
Column('platenumber', String),
Column('stockonhand', Integer),
Column('imgcount', Integer),
Column('reg_date', String),
Column('capacity', Float),
Column('tankcapacity', Float),
Column('unit', String),
Column('reason_deactivation', String),
Column('req_deactivate', Integer),
Column('status', Integer)
)
#client
client = Table('client',
metadata,
Column('client_id', Integer, primary_key=True),
Column('client_type', Integer),
Column('username', String),
Column('password', String),
Column('fname', String),
Column('lname', String),
Column('contact', Integer),
Column('email', String),
Column('address', String),
Column('status', Integer)
)
#Booking
booking = Table('booking',
metadata,
Column('bookingID', Integer, primary_key=True ),
Column('vehicleID',Integer ),
Column('client_id',Integer ),
Column('start_date', String),
Column('end_date', String),
Column('picking_time', String),
Column('pointA', String),
Column('pointB', String),
Column('price', Integer),
Column('wDriver', Integer),
Column('status', Integer),
Column('booking_code', String)
# Column('date_today', String)
)
#Booking_date_blocking
block_head = Table('block_head',
metadata,
Column('blockID', Integer, primary_key=True ),
Column('bookingID',Integer ),
Column('start_date',String ),
Column('end_date', String),
Column('status', Integer),
Column('time_consume', Integer),
Column('vehicleID', Integer)
)
sub_booking = Table('sub_booking',
metadata,
Column('id', Integer, primary_key=True),
Column('bookingID', Integer),
Column('date_today', String)
)
rating = Table('rating',
metadata,
Column('rating_id', Integer, primary_key=True),
Column('vendor_id', Integer),
Column('vehicle_id', Integer),
Column('rate', Integer))
review = Table('review',
metadata,
Column('review_id', Integer, primary_key=True),
Column('vendor_id', Integer),
Column('vehicle_id', Integer),
Column('reviews', Integer))
Session = sessionmaker(bind=engine)
conn = engine.connect()
session = Session(bind=conn)
# s = select([func.to_char(usr.reg_date,'%Y-%m-%d %H:%M')])
| [
"derechoanthony@gmail.com"
] | derechoanthony@gmail.com |
319225d08a9c015e35a2eeec8e3d282d841b3718 | e2ee6be0e3e1262541224b64e9f037c3d0dbfa2d | /blog/api/serializers.py | 9c6aa7c84e2c5d629c1e85cd377a6977baafd56f | [] | no_license | kevinchrist20/blog-api | 9df13d267770f43aba08cbb5267d06215b07a994 | ea24fa9fd371437a6cff432002b80a347eda0a82 | refs/heads/main | 2023-01-11T01:56:09.585363 | 2020-11-08T14:36:53 | 2020-11-08T14:36:53 | 308,846,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | from rest_framework import serializers
from .models import Category, Blog
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ["id", "name", "description"]
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Blog
fields = ["id", "uuid", "title", "category", "author", "content", "images", "created_at"]
| [
"kevinamevor@outlook.com"
] | kevinamevor@outlook.com |
ed0aea728182b38005db1db7ce0e119926bcc869 | 7bbf41a5cc3b39a7b5abdd6c4a58e3e42fd2d59b | /MainFrame/Categories/AddCategory.py | 80d9878071f97e1a7e53e18d2d23a59a6247cf8f | [] | no_license | retimarcell/multilanguage_dictionary | eb41146233d1a930fafe5c30be3e8ef665363f88 | a00dd00fb5818c8006057bc857cfe5f78d549e9c | refs/heads/master | 2022-02-10T19:09:28.503283 | 2019-05-11T13:19:10 | 2019-05-11T13:19:10 | 174,150,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,317 | py | from tkinter import *
from tkinter.messagebox import *
from User import Category
class AddCategory:
def __init__(self, logObj, user):
self.logObj = logObj
self.logObj.simpleLog("Creating category addition window")
self.user = user
self.root = Tk()
self.root.title("Új kategória")
ws = self.root.winfo_screenwidth() / 2 - 100
hs = self.root.winfo_screenheight() / 2 - 300
self.root.configure(bg='white')
self.root.geometry('+%d+%d' % (ws, hs))
self.root.resizable(width=FALSE, height=FALSE)
self.label = Label(self.root, text='Kategória neve:', font=("Helvetica", 13), bg='white')
self.entry = Entry(self.root, width=25, borderwidth=2, fg='#000000', relief=GROOVE, font=("Helvetica", 13), bg='white')
self.botFrame = Frame(self.root, bg='white')
self.forwardButton = Button(self.botFrame, text="Hozzáadás", font=("Helvetica", 11), command=self.play, bg='white', activebackground='white')
self.cancelButton = Button(self.botFrame, text="Mégse", font=("Helvetica", 11), command=self.cancelAddition, bg='white', activebackground='white')
self.label.grid(row=0, sticky=E+W, pady=(5,0))
self.entry.grid(row=1, sticky=E+W, pady=(2,2), padx=(2,2))
self.botFrame.grid(row=2, sticky=E)
self.cancelButton.grid(row=0, column=1, sticky=E, padx=10)
self.forwardButton.grid(row=0, column=0, sticky=E)
self.entry.focus_force()
self.root.bind('<Return>', self.play)
self.root.mainloop()
def play(self, event=None):
entry = self.entry.get()
if entry != "":
self.user.categories.append(Category.Category(entry))
self.user.database.insertIntoTable("Categories", [entry, self.user.username, -1])
self.root.quit()
self.root.destroy()
else:
showerror("Hiba", "Bemeneti mező üresen hagyva!")
def cancelAddition(self):
result = askyesno("Megszakítás", "Biztosan megszakítja a hozzáadást?")
if result:
self.logObj.simpleLog("Category addition cancelled.")
self.root.destroy()
else:
self.root.focus_force()
self.logObj.simpleLog("Category addition cancel cancelled.")
| [
"reti.marcell@gmail.com"
] | reti.marcell@gmail.com |
de36e37006d7df48083f195927a423456d030e09 | 89e1e77f382cb48cb75a51d568d149eab1730afe | /app/core/migrations/0013_auto_20201112_1230.py | bd737a21d763536fa83ccef1659b6e9c1867b216 | [] | no_license | payamy/MusicAppAPI | 61f230c6aa2709b4536b0001be6b2d8755ae2e73 | a0393ace41faa895fb53887058b4e7e836352f48 | refs/heads/main | 2023-02-27T16:39:39.163929 | 2021-02-04T20:37:25 | 2021-02-04T20:37:25 | 301,766,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | # Generated by Django 3.1.2 on 2020-11-12 12:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0012_classroom_comment_tutorial'),
]
operations = [
migrations.AddField(
model_name='comment',
name='text',
field=models.CharField(default='-', max_length=255),
),
migrations.AddField(
model_name='tutorial',
name='description',
field=models.CharField(default='-', max_length=255),
),
migrations.AddField(
model_name='tutorial',
name='title',
field=models.CharField(default='-', max_length=255),
),
migrations.AddField(
model_name='tutorial',
name='video',
field=models.FileField(null=True, upload_to='videos/', verbose_name=''),
),
migrations.AlterField(
model_name='classroom',
name='description',
field=models.CharField(default='-', max_length=255),
),
migrations.AlterField(
model_name='classroom',
name='name',
field=models.CharField(default='-', max_length=255),
),
]
| [
"payamyarandi98@gmail.com"
] | payamyarandi98@gmail.com |
e2b7f063d35eaea6d7cbe958862f195bbb54a1e5 | b77dff442c658b4e99c69eb75bc3924adbbad231 | /api/views.py | a8af86d69ab6ef17ff4ebb7393d2ad2d5e9b92ac | [] | no_license | JorgeQuetgo/scrap_api | 5a4378d8b7000e463d2bf285162bbdad7a9870c3 | acae2ea6f6674849778b1336d3db4906c63b2987 | refs/heads/master | 2023-02-14T03:06:11.318449 | 2021-01-08T05:33:24 | 2021-01-08T05:33:24 | 327,804,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | from rest_framework.response import Response
from rest_framework import viewsets, status
from api.models import Product
from api.serializers import ProductSerializer, PageScrapSerializer
class ProductViewSet(viewsets.ModelViewSet):
serializer_class = ProductSerializer
queryset = Product.objects.all()
class PageScrappingViewSet(viewsets.ViewSet):
serializer_class = PageScrapSerializer
@staticmethod
def create(request):
serializer = PageScrapSerializer(data=request.data)
if serializer.is_valid():
result =serializer.save()
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response(result, status=201)
| [
"jorge.giron@atentousa.com"
] | jorge.giron@atentousa.com |
d5178f38d43ebcfc3521fb234262ac8566c86c85 | a4346b13110d82274ba37040d125c8e82a4d2b07 | /trends.py | 8427337e917df3fa98d3e40266aea4c0fa56e795 | [
"MIT"
] | permissive | SushruthaSVPKH22/DADV1 | a550df9a5155a0d6869416dd58554f1da30c8462 | 11339b814e88477c1583aaed38548bfced885b6d | refs/heads/master | 2020-05-23T22:22:42.489074 | 2019-05-16T07:07:29 | 2019-05-16T07:07:29 | 186,972,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | import requests
from bs4 import BeautifulSoup
import csv
def crawl(rawdata):
page=BeautifulSoup(rawdata.content,'html.parser');
data_tbody=page.find('tbody',id="ElectionResult")
for div in data_tbody.find_all("div", {'class':'tooltip'}):
div.decompose()
for span in data_tbody.find_all("span"):
span.decompose()
def get_first_child(soup_page, child):
first_child = soup_page.find(child)
all_child = [first_child] + first_child.find_next_siblings(child)
return all_child
all_trs = get_first_child(data_tbody, 'tr')
imp_trs = all_trs[4:]
for tr in imp_trs:
all_tds = get_first_child(tr, 'td')
data_writer = csv.writer(f, delimiter=',');
data_writer.writerow(['%s' % (all_tds[0].getText()), '%s' % (all_tds[1].getText()), '%s' % (all_tds[2].getText()), '%s' % (all_tds[3].getText()), '%s' % (all_tds[4].getText()), '%s' % (all_tds[5].getText()), '%s' % (all_tds[6].getText()), '%s' % (all_tds[7].getText()), '%s' % (all_tds[8].getText()), '%s' % (all_tds[9].getText()), '%s' % (all_tds[10].getText())])
page=[9,23,4,20,12]
states=["S26","S12","S16","S20","S29"]
k=len(page);
j=0;
for s in states:
with open(""+s+".csv", 'w') as f:
print(j)
print(page[j])
for i in range(page[j]):
print(i)
if i == 0:
url ="http://eciresults.nic.in/statewise"+s+".htm?st="+s;
else:
url= "http://eciresults.nic.in/statewise"+s+str(i)+".htm?st="+s+str(i);
print(url);
rawdata=requests.get(url);
crawl(rawdata);
j+=1;
| [
"50095574+SushruthaSVPKH22@users.noreply.github.com"
] | 50095574+SushruthaSVPKH22@users.noreply.github.com |
80391c27b0f9cf82d7dc523406b0c9677f376808 | 5aac30da97184a9864590839a5f8be3bc5df1ced | /https_to_amqps.py | d862fb3b2c7f4cce24ca2dd72068d1b0ad7e2f59 | [] | no_license | mikhaelsantos/aws-lambda-https-to-amqps | 384d2f034e24e543fe89f206e8a4156684cb7118 | ce8f4abd301f0f0e1aab83a9b58b849863b2e291 | refs/heads/master | 2021-08-24T15:36:18.596274 | 2017-12-10T08:12:12 | 2017-12-10T08:12:12 | 113,733,485 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,719 | py | """
Lambda function to connect https to amqps
"""
import json
import os
import boto3
import pika
print('Loading function')
CREDENTIALS = None
class MissingEnvVarsException(Exception):
pass
def get_credentials(bucket: str, path: str, encryption_context: str) -> dict:
"""
Gets RabbitMq credentials from s3
:param bucket:Configuration Bucket
:param path: Path to encrypted configuration file
:param encryption_context: Encryption context used during encryption
:return: Credentials to access Rabbitmq
"""
global CREDENTIALS
if CREDENTIALS is None:
client = boto3.client("s3")
file = client.get_object(Bucket=bucket, Key=path)
client = boto3.client("kms")
response = client.decrypt(
CiphertextBlob=file['Body'].read(),
EncryptionContext=encryption_context)
CREDENTIALS = json.loads(response["Plaintext"])
return CREDENTIALS
def respond(err: dict, res: dict = None) -> dict:
"""
Generates an API Gateway response with elements to for the api caller
:param err: (object) With error information
:param res: (dict) Containing the response to return to the user
:return: (dict) Response in API Gateway format
"""
return {
'statusCode': '400' if err else '200',
'body': json.dumps(err) if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def direct_message_to_rabbitmq(credentials: dict, host: str, vhost: str, body: dict) -> dict:
"""
Receives a message from API Gateway and forwards it to RabbitMQ
:param credentials: (dict) containing the username and password to accesss rabbit
:param host: host address to access RabbitMq
:param vhost: rabbitmq vhost
:param body: Message to forward to RabbitMq
:return: Returns the message forwarded to RabbitMq
"""
print("Forward to RabbitMq")
uri = "amqps://{}:{}@{}/{}".format(
credentials["username"],
credentials["password"],
host,
vhost)
connection = pika.connection.URLParameters(uri)
connection = pika.BlockingConnection(connection)
channel = connection.channel()
properties = pika.spec.BasicProperties(priority=int(body["priority"]))
channel.basic_publish(exchange=body["exchange"],
body=json.dumps(body),
properties=properties,
routing_key="")
return body
def lambda_handler(event: dict, context: object) -> dict:
"""
Receives a message from API Gateway and forwards it to RabbitMQ
:param event: (dict) with the users request to the API Gateway
:param context: (object)
:return: (dict) Response in API Gateway format
"""
print("Set env variables")
host = os.getenv("ADDRESS")
vhost = os.getenv("VHOST")
app_name = os.getenv("APPNAME")
encryption_context = os.getenv("ENCRYPTION_CONTEXT")
config_bucket = os.getenv("CONFIG_BUCKET")
config_path = os.getenv("CONFIG_PATH")
if not host and not vhost and not app_name and not config_path and not config_bucket:
raise MissingEnvVarsException
print("Set up operation behaviour")
operation = event["context"]["http-method"]
operations = {
'POST': direct_message_to_rabbitmq
}
credentials = get_credentials(config_bucket, config_path, encryption_context)
print("Get payload")
body = event['body-json']
if operation in operations:
return respond(
None, operations[operation](
credentials, host, vhost, body))
return respond({"message": 'Unsupported method "{}"'.format(operation)})
| [
"mikhael.santos@f-secure.com"
] | mikhael.santos@f-secure.com |
dae634b72dac458cfa57d1bcb809f4d6d4bedf11 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2790/60730/256936.py | ed7cd9f25137e77fa2fe3809ef5f880ec7402267 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | num_m, num_n = map(int, input().split())
m = list(map(int, input().split()))
n = list(map(int, input().split()))
m.sort(reverse=False)
tmp = 0
for i in range(num_n):
for j in range(num_m):
if (m[j] <= n[i]):
tmp = tmp + 1
if (j == 4):
print(str(tmp))
tmp = 0;
else:
continue
else:
print(str(tmp))
tmp = 0
break | [
"1069583789@qq.com"
] | 1069583789@qq.com |
5e352046fdbd446e5a251b83c8baad682d09962b | b6c501374f95c21ed82a58519c34008c4f30f9d8 | /Python/flask_fundamentals/advanced_routing/server.py | 7a451938289b23818adfd2ce814eea412840906e | [] | no_license | satofromjapan/DojoAssignments | 839c67b02391d69637d5137f9d97526f496a5c15 | ba5589b00c64c04e84b2d1d662968e20be6fd92a | refs/heads/master | 2021-01-18T04:09:24.205148 | 2017-04-10T15:37:25 | 2017-04-10T15:37:25 | 85,758,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from flask import Flask, render_template, request, redirect
app = Flask(__name__)
@app.route('/users/<username>')
def show_user_profile(username):
print username
return render_template("user.html")
app.run(debug=True)
| [
"masato.takaha@gmail.com"
] | masato.takaha@gmail.com |
61786e2faf6ca3617cc2547869f76cce86441d76 | b76c6813f2ce2fd24a33175a0249cd9544583fe7 | /acerca_de/url_acerca_de.py | 50417f63558bf4aa93ed35902ad121d612af8286 | [] | no_license | adrianglez2203/nuevo_as | 0074e6d8155a471bb7d81bc3456914acdc7fba98 | df375410e9d6922ebb931645ff8f1c7b3f5cb93b | refs/heads/master | 2022-08-01T23:43:51.328124 | 2020-06-06T15:35:23 | 2020-06-06T15:35:23 | 270,111,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | from django.urls import path
from acerca_de import views
urlpatterns = [
path('acerca',views.vista , name='acerca_de'),
] | [
"adrianglez2203@gmail.com"
] | adrianglez2203@gmail.com |
cdf70cd90909ae58c57b599d2204957e323a444b | 6208a116cf79629838538b622e27c85f2305363a | /krakenapp/migrations/0004_player_capital.py | 280187e94b466808f42b6a5708cf7a39656fa4d1 | [] | no_license | debnet/riskraken | cdb77b979dac8c30565ed1f508f6cbbce57a4f01 | 57a057c912b1d1411e41cb70f041636f2d35c03d | refs/heads/master | 2023-05-11T22:45:25.613783 | 2021-06-04T00:09:02 | 2021-06-04T00:09:02 | 358,425,108 | 0 | 1 | null | 2021-05-20T18:49:23 | 2021-04-16T00:05:22 | CSS | UTF-8 | Python | false | false | 752 | py | # Generated by Django 3.2 on 2021-04-25 23:31
from django.db import migrations, models
import django.db.models.deletion
import krakenapp.models
class Migration(migrations.Migration):
dependencies = [
('krakenapp', '0003_claim_instead_of_owner'),
]
operations = [
migrations.AddField(
model_name='player',
name='capital',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='krakenapp.territory', verbose_name='capitale'),
),
migrations.AlterModelManagers(
name='player',
managers=[
('objects', krakenapp.models.PlayerManager()),
],
),
]
| [
"marc@debnet.fr"
] | marc@debnet.fr |
ba8c22cf3cea258a5f767182a5ff8dbf68b2b507 | 16b26e6a9e6d6a7db2a20a6327b3d042e2245747 | /bigramas/bigramas.py | dbef177920a9911443ced6ff74a1eed9c8710e79 | [
"Unlicense"
] | permissive | jabaier/iic1103.20152.s4 | 3826b8de35470acc0387c8199b6ecce50d4222bd | 63ddd5f9b73caff218b6744e7392e7a66afba570 | refs/heads/master | 2020-05-27T16:59:32.232746 | 2015-11-20T14:34:21 | 2015-11-20T14:34:21 | 41,114,018 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | import sys
import random
class Bigramas:
def __init__(self):
self.bigramas=[] # lista de bigramas
self.nbigramas=0 # numero de bigramas
def buscar(self,palabra):
for par in self.bigramas:
if par[0]==palabra:
return par[1]
# la palabra no está
self.bigramas.append([palabra,[]])
return self.bigramas[len(self.bigramas)-1][1]
def incrementar(self,lista,palabra):
i=0
while i < len(lista):
if lista[i][0]==palabra:
lista[i][1] = lista[i][1] + 1
return
i+=1
lista.append([palabra,1])
self.nbigramas=self.nbigramas+1
if self.nbigramas%1000==0:
print(".",end="")
sys.stdout.flush()
def agregar(self,pal1,pal2):
lista=self.buscar(pal1)
self.incrementar(lista,pal2)
def caminata(self,palabra,limite):
def sumalista(lista):
suma=0
for elem in lista:
suma+=elem[1]
return suma
contador=0
lista_palabras=[]
while contador < limite:
lista_palabras.append(palabra)
lista=self.buscar(palabra)
if len(lista)>0:
total=sumalista(lista)
rand=random.randint(0,total)
acc=0
i=0
while acc + lista[i][1] < rand and i < len(lista):
acc = acc + lista[i][1]
i = i + 1
palabra=lista[i][0]
else:
palabra=palabras[random.randint(0,len(self.palabras)-1)]
contador = contador + 1
return lista_palabras
def limpiar(palabra):
puntuacion=".,!:;?»«-¿¡"
respuesta=""
for c in palabra:
if not c in puntuacion:
respuesta += c
return respuesta
filename = input("Archivo con datos: ")
f = open(filename,'r')
print("Leyendo los datos")
entrada=''
for linea in f:
linea.rstrip()
entrada += linea
import time
tic1=time.time()
print("Datos leidos. Procesando.",end="")
f.close()
palabras = [limpiar(x.lower()) for x in entrada.split() if x!=""]
b=Bigramas()
i=0
while i < len(palabras)-1:
b.agregar(palabras[i],palabras[i+1])
i+=1
tic2=time.time()
print("\nDatos procesados en ",round(tic2-tic1,2),"seg.")
print("Base de datos tiene",len(b.bigramas),"palabras y",b.nbigramas,"bigramas.")
print(b.bigramas)
while True:
p = input("palabra: ")
print(b.caminata(p,20))
| [
"jabaier@gmail.com"
] | jabaier@gmail.com |
414447816b0198c4653782e0597ca4187c4cf8f3 | ab6fc61c008dcc139eb7d790267322dbb6b63d1c | /acme/settings.py | fadc918c4ac7fe135c4366dfa413fd4f988c2f5e | [] | no_license | philiplewis71239/website_v3 | 8e718ea8dfff7d73624162a8cc9cf1468946deaf | 80e8c55c83860514206de5cf45986ab3aef27be4 | refs/heads/master | 2020-05-20T04:09:33.064671 | 2015-01-06T18:31:32 | 2015-01-06T18:31:32 | 28,876,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,185 | py |
import os
class ConfigMeta(type):
def __getitem__(self, key):
# Implementing __getitem__() allows us to use dict key indexing
# syntax on the Config objects
return self.__dict__[key]
class DevConfig(object):
__metaclass__ = ConfigMeta
SECRET_KEY = '####SOME_SECRET_KEY_HERE####'
DEBUG = True
DB_URI = "mongodb://localhost/db"
@classmethod
def read_env(self):
# For every uppercase key in the Config object,
# check for an equivalent envvar. If the envvar is set, take
# it's value instead
for k in dir(self):
if k.isupper():
if os.getenv(k):
setattr(self, k, os.getenv(k))
# Write all keys except 'PRODUCTION' back into the environment
# so that they are accessible to modules who insist on
# using os.getenv instead of our global config
# We don't write PRODUCTION back to the environment so that we
# can use that as the single authoritative source of which mode
# we're in
if k != 'PRODUCTION':
os.environ[k] = str(getattr(self, k))
return self
@classmethod
def get(self, key, default=None):
# Implementing get() allows us to use the Config objects in the
# same way we use dicts, eg.
# somevar = obj.get('key', 'default_value')
try:
return getattr(self, key)
except AttributeError as e:
return default
class ProductionConfig(DevConfig):
DEBUG = False
DB_URI = "mongodb://username:password@remotehost:9999/db"
class TestConfig(DevConfig):
pass
BLUEPRINTS = (
{"name": "acme.widget.widgets", "url_prefix": "/widgets"},
)
# Here we bootstrap the Config object, choosing one depending if we should be
# in PRODUCTION mode or not.
# This is followed by overriding set values with those set as envvars via
# read_env(), allowing for dynamic configuration in environments like Heroku
CONFIG = ProductionConfig if bool(os.getenv("PRODUCTION", False)) else DevConfig
CONFIG.read_env()
| [
"philip.lewis71239@gmail.com"
] | philip.lewis71239@gmail.com |
41d2f58a5837570bccd54bad521cf8a559e21344 | 489e39c054428d74fa8372c923d64f4175736919 | /satgui/login/urls.py | 7e4cc7a093f87b3bf1933e34da9fd56ea9c204d2 | [] | no_license | juanhernandez17/SafetyAnalysisToolGUI | a94d961a2da23fa5666e73875180cb24a5400282 | 88986da9d2fbc784f66084e57145d7c3135488ed | refs/heads/master | 2023-07-27T05:15:59.489911 | 2021-09-15T01:20:53 | 2021-09-15T01:20:53 | 406,569,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.urls import path
from . import views
urlpatterns = [
path('user/', views.User.as_view(), name='user'),
path('register/', views.Register.as_view(), name='register'),
path('login/', views.Login.as_view(), name='login'),
path('logout/', views.Logout.as_view(), name='logout'),
path('account/', views.Account.as_view(), name='account'),
]
| [
"juanhernandez17@my.unt.edu"
] | juanhernandez17@my.unt.edu |
4344b639645fd9baceaf24a629543fa2d88b3eca | 1410b8d0eb2338738a1539f716f18f99fd92fc9e | /1.py | b0550b3d6c836bfb50a0acf51d068f2acf071f4e | [] | no_license | AlenaGB/hw3 | 7e1d1d9adecf03f634bb541188b6d61ab5f15ad4 | cfafac70e1de0d908dbed2ff28ecb8d0ecdfdbbe | refs/heads/master | 2023-02-08T01:06:29.355567 | 2021-01-03T09:47:10 | 2021-01-03T09:47:10 | 324,968,743 | 0 | 0 | null | 2021-01-03T10:02:55 | 2020-12-28T09:31:16 | Python | UTF-8 | Python | false | false | 364 | py |
def convert (rub, rate):
try:
return rub / rate
except ZeroDivisionError:
print('Incorrect input data')
exit()
print('Convert RUB / USD '.upper())
print('You have {:.3} euros in your account '.format(convert(float(input('How many rubles do you want to convert to euros ')),float(input(('What is the currenst exchange rate ')))))) | [
"olyonapopova@gmail.com"
] | olyonapopova@gmail.com |
4db41f4d1a1037fd1b2f45dd65bc9db80863548b | 12d4932f5ae96115e23b7b1599cab922bf4be111 | /schedule/models.py | 83eda16b353d5d6aa6fd5eb35548e654dd6426c0 | [] | no_license | kabanfaly/planner | e90ce3b0c46346e12169941574ecc1e0cc32e1af | d276dce80a77c0cfdf32936493cede6a3ef073bf | refs/heads/master | 2020-12-24T11:16:41.995829 | 2016-11-26T07:02:19 | 2016-11-26T07:02:22 | 73,046,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from django.db import models
from employee.models import Employee
from workplace.models import Workplace
class Schedule(models.Model):
start_time = models.DateTimeField(verbose_name="Work start time")
end_time = models.DateTimeField(verbose_name="Work end time")
break_time = models.TimeField(null=True)
day_hours = models.TimeField(verbose_name="Number of hours worked as day hours", null=True)
night_hours = models.TimeField(verbose_name="Number of hours worked as night hours", null=True)
sunday_day_hours = models.TimeField(verbose_name="Sunday: Number of hours worked as day hours", null=True)
sunday_night_hours = models.TimeField(verbose_name="Sunday: Number of hours worked as night hours", null=True)
public_holiday_day_hours = models.TimeField(verbose_name="Public holiday: Number of hours worked as day hours", null=True)
public_holiday_night_hours = models.TimeField(verbose_name="Public holiday: Number of hours worked as night hours", null=True)
employee = models.ForeignKey(Employee)
workplace = models.ForeignKey(Workplace)
def __str__(self):
print("{}\nWorkplace: {}\nStart time: {}\nEnd time: {}\nBreak: {}\nDay hours: {}\nNight hours: {}"
.format(self.employee, self.workplace, self.start_time, self.end_time,
self.break_time, self.day_hours, self.night_hours)) | [
"nfalykaba@gmail.com"
] | nfalykaba@gmail.com |
11f4ba38d434e1643f04aaf34ec8646b27782520 | 7e0fdfb76cae8145a4bbac0cac0e4cac6b8e3788 | /dingding_shouqi/dingding_shouqi.py | daaee3a0bc4aec488160ca6adf2ad171e17d3204 | [] | no_license | w193241125/dingdingdaka | 603b06323b5c5e3341e0077614e044b2c80c038b | 3d2088d2e876fc4e80fc06bea3eaa5b8833392ed | refs/heads/master | 2020-05-17T21:45:44.276659 | 2019-05-13T10:16:29 | 2019-05-13T10:16:29 | 183,981,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,007 | py | #!/usr/local/bin python
# -*- coding: utf-8 -*-
# @Time : 2019/3/5 10:38
# @Author : Larwas
# @Link : 164722305@qq.com
# @Site : www.larwas.com
# @File : dingding.py
# @Software: PyCharm
import os
import re
import time
import urllib
import json
import requests
# nameplt = re.compile("package: name='(.*?)' versionCode")
# activityplt = re.compile("launchable-activity: name='(.*?)'")
adbshell = "adb shell" # 启用shell命令可以直接操作Android系统
# adbstr = "adb push D:/1.txt /mnt/txt/1.txt" # 把电脑的文件推送到安卓
# adbpng1 = "adb pull /sdcard/screencap.png d://"
# adbpng2 = "adb pull /sdcard/screencap.png d://1.png"
# adb_use_screencap = "adb shell /system/bin/screencap -p /sdcard/667.png" # 截取安卓的屏幕
# adbpng3 = "adb pull /sdcard/667.png d://3.png" # 把安卓的截图导入到电脑
# get_app_info = "adb shell pm list packages" # 获取模拟器所有包名
tap_place_index = "adb shell input tap 78 1219"
tap_place = "adb shell input tap 363 1219"
tap_place_kaoqin = "adb shell input tap 272 724"
tap_place_kaoqin2 = "adb shell input tap 268 1051"
tap_place_daka = "adb shell input tap 185 1234"
tap_place_shangban = "adb shell input tap 353 499"
tap_place_xiaban = "adb shell input tap 353 756"
wake_up = "adb shell input keyevent 26"
unlock = "adb shell input swipe 370 952 370 318"
wrap_a = "adb shell input swipe 370 318 370 952"
shut_up = "adb shell input tap 654 594" # 收起
return_a = "adb shell input tap 52 101"
return_b = "adb shell input tap 156 101"
return_ding_index = "adb shell input tap 75 1224"
return_index = "adb shell input keyevent 3"
turnback = "adb shell input keyevent 4"
donyin_package_name = "com.alibaba.android.rimet"
douyin_activity_name = "com.alibaba.android.rimet.biz.SplashActivity"
power_stat = "adb shell dumpsys window policy"
kill_dingding = "adb shell am force-stop com.alibaba.android.rimet"
# 获取抖音app的com信息
# get_com_info = r"aapt dump badging G:\python\dingding\rimet.apk > dingding.txt"
# os.system(get_com_info)
# with open("dingding.txt", "r", encoding="utf-8") as fs:
# donyin = fs.read()
#
# donyin_package_name = nameplt.findall(donyin)[0]
# douyin_activity_name = activityplt.findall(donyin)[0]
#
# print("钉钉activity", douyin_activity_name)
# print("钉钉的包名", donyin_package_name)
# os.system(adb_use_screencap)
# #print(os.system(adbpng3))
start_app = f"adb shell am start -n {donyin_package_name}/{douyin_activity_name}"
start_airdroid = f"adb shell am start -n com.sand.airdroid/com.sand.airdroid.ui.splash.SplashActivity_"
# 获取当前周数
current_week = time.strftime("%W")
# 取余 当前1为双休0为单休
mod = int(current_week) % 2
# 获取今天周几
current_weekday = time.strftime("%w", time.localtime())
# 获取当前时间
current_time = time.strftime('%H:%M', time.localtime(time.time()))
def isAwaked(deviceid = ''):
'''
判断的依据是' mAwake=false\n'
'''
if deviceid == '':
cmd = 'adb shell dumpsys window policy'
else:
cmd = 'adb -s ' + deviceid + ' shell dumpsys window policy'
screenAwakevalue = ' mScreenOnEarly=true mScreenOnFully=true mOrientationSensorEnabled=false\n'
allList = os.popen(cmd).readlines()
if screenAwakevalue in allList:
return True
else:
return False
def isLock(deviceid = ''):
'''
判断的依据是' mAwake=false\n'
'''
if deviceid == '':
cmd = 'adb shell dumpsys window policy'
else:
cmd = 'adb -s ' + deviceid + ' shell dumpsys window policy'
screenAwakevalue = ' mShowingLockscreen=true mShowingDream=false mDreamingLockscreen=true\n'
allList = os.popen(cmd).readlines()
if screenAwakevalue in allList:
return True # 锁着
else:
return False # 没锁
def sign():
start_app = f"adb shell am start -n {donyin_package_name}/{douyin_activity_name}"
if isAwaked():
if isLock():
print('unlock')
os.system(unlock)
else:
pass
else:
# 唤醒屏幕并解锁
print('wake up and unlock')
os.system(wake_up)
time.sleep(1)
os.system(unlock)
print("启动dd")
os.system(start_app)
time.sleep(20)
if isAwaked():
if isLock():
os.system(unlock)
else:
pass
else:
# 唤醒屏幕并解锁
os.system(wake_up)
time.sleep(1)
os.system(unlock)
# 操作钉钉
os.system(tap_place_index)
time.sleep(2)
os.system(tap_place)
# 下滑一下 保证位置
os.system(wrap_a)
# 收起
time.sleep(4)
print('收起')
os.system(shut_up)
time.sleep(3)
print('点击考勤')
os.system(tap_place_kaoqin2)
# os.system(tap_place_kaoqin)
# time.sleep(6)
# os.system(tap_place_daka)
time.sleep(10)
if current_time <= "10:30":
os.system(tap_place_shangban)
print(1)
time.sleep(5)
# 打卡完 返回
os.system(turnback)
else:
if current_time < "20:00":
os.system(tap_place_xiaban)
print("下班")
else:
os.system(tap_place_xiaban)
print("点击一次下班保证打了卡")
time.sleep(3)
tap_place_gengxin = "adb shell input tap 115 713"
tap_place_gengxin_queren = "adb shell input tap 569 713"
os.system(tap_place_gengxin)
print("更新")
time.sleep(3)
os.system(tap_place_gengxin_queren)
time.sleep(5)
# 打卡完 返回
os.system(turnback)
# 退出
os.system(return_a)
os.system(return_b)
os.system(return_ding_index)
os.system(return_index)
# 获取当前时间 格式20180213
nowTime = time.strftime('%Y%m%d', time.localtime())
date = nowTime
# print(date)
# 节假日接口
server_url = "http://api.goseek.cn/Tools/holiday?date="
vop_url_request = requests.get(server_url + date)
vop_response = vop_url_request.text
vop_data = json.loads(vop_response)
print(vop_data)
# 获取节假日结束
if vop_data['data'] == 1:
pass # 法定节假日跳过
else:
print('不是法定节假日')
if isAwaked():
if isLock():
print('unlock')
os.system(unlock)
else:
print('屏幕没锁')
pass
else:
# 唤醒屏幕并解锁
print('wake up and unlock2')
# 唤醒屏幕并解锁
os.system(wake_up)
time.sleep(1)
os.system(unlock)
time.sleep(1)
os.system(return_index)
# 双休打卡
if mod == 1 and int(current_weekday) in [1, 2, 3, 4, 5]:
if int(current_weekday) == 5 and current_time < "20:30" and current_time > "10:30":
sign() # 打卡
elif int(current_weekday) in [1, 2, 3, 4] and current_time > "20:30":
sign() # 打卡
elif int(current_weekday) in [1, 2, 3, 4, 5] and current_time < "10:30":
sign()
else:
if current_time > "18:00":
sign()
else:
print('不是周末,打卡太早1') # 跳过
# 单休打卡
elif mod == 0 and int(current_weekday) in [1, 2, 3, 4, 5, 6]:
if int(current_weekday) == 6 and current_time < "20:30" and current_time > "10:30":
sign() # 打下班卡
elif int(current_weekday) in [1, 2, 3, 4, 5] and current_time > "20:30":
sign() # 打下班卡
elif int(current_weekday) in [1, 2, 3, 4, 5,6] and current_time < "10:30":
sign() # 打上班卡
else:
if current_time > "18:00":
sign()
else:
print('不是周末,打卡太早_单休') # 跳过
else:
print('未知原因取消打卡') # 跳过
os.system(kill_dingding)
os.system(start_airdroid)
time.sleep(3)
os.system(wake_up)
time.sleep(3)
exit()
| [
"test@test.com"
] | test@test.com |
68f57b90cb2d6b9dd5cb519f88279a89150b0f99 | 070eabad59ba058bbdf4a8d76be03c692143cd32 | /exp/exp12.py | 554b19772087d50833d69b718d2632c0dc1dfe03 | [] | no_license | osuossu8/Kaggle_Bengali2019 | 1ebaabd36a0225a42e77d07ea2da04a7860a01d4 | a32397e73b2861cb677988c7d39abb719e229d8c | refs/heads/master | 2022-04-20T16:19:36.961665 | 2020-03-14T13:02:38 | 2020-03-14T13:02:38 | 239,647,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,950 | py | import numpy as np
import pandas as pd
import albumentations
import argparse
import collections
import cv2
import datetime
import gc
import glob
import logging
import math
import operator
import os
import pickle
import pkg_resources
import random
import re
import scipy.stats as stats
import seaborn as sns
import shutil
import sys
import time
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torchvision import models, transforms
from contextlib import contextmanager
from collections import OrderedDict
from sklearn import metrics
from sklearn import model_selection
from sklearn.model_selection import KFold, GroupKFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from torch.nn import CrossEntropyLoss, MSELoss
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import (Dataset,DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
import tensorflow as tf
import PIL
from PIL import Image
from tqdm import tqdm, tqdm_notebook, trange
import warnings
warnings.filterwarnings('ignore')
# from apex import amp
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
sys.path.append("/usr/src/app/kaggle/bengaliai-cv19")
from src.machine_learning_util import seed_everything, prepare_labels, DownSampler, timer, \
to_pickle, unpickle
from src.image_util import resize_to_square_PIL, pad_PIL, threshold_image, \
bbox, crop_resize, Resize, \
image_to_tensor, train_one_epoch, validate, macro_recall
from src.scheduler import GradualWarmupScheduler
from src.layers import ResidualBlock
from src.image_bengali import rand_bbox, cutmix, mixup, cutmix_criterion, mixup_criterion
from src.trainer_bengali import train_one_epoch_mixup_cutmix
SEED = 1129
seed_everything(SEED)
LOGGER = logging.getLogger()
FORMATTER = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
def setup_logger(out_file=None, stderr=True, stderr_level=logging.INFO, file_level=logging.DEBUG):
LOGGER.handlers = []
LOGGER.setLevel(min(stderr_level, file_level))
if stderr:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(FORMATTER)
handler.setLevel(stderr_level)
LOGGER.addHandler(handler)
if out_file is not None:
handler = logging.FileHandler(out_file)
handler.setFormatter(FORMATTER)
handler.setLevel(file_level)
LOGGER.addHandler(handler)
LOGGER.info("logger set up")
return LOGGER
EXP_ID = "exp12_mixup_cutmix_15epoch_4e-4"
LOGGER_PATH = f"logs/log_{EXP_ID}.txt"
setup_logger(out_file=LOGGER_PATH)
LOGGER.info("seed={}".format(SEED))
SIZE = 128
HEIGHT=137
WIDTH=236
OUT_DIR = 'models'
# https://albumentations.readthedocs.io/en/latest/api/augmentations.html
data_transforms = albumentations.Compose([
albumentations.Flip(p=0.2),
albumentations.Rotate(limit=15, p=0.2),
albumentations.ShiftScaleRotate(rotate_limit=15, p=0.2),
albumentations.Cutout(p=0.2),
# albumentations.RandomGridShuffle(grid=(3, 3), p=0.2),
])
data_transforms_test = albumentations.Compose([
albumentations.Flip(p=0),
])
class BengaliAIDataset(torch.utils.data.Dataset):
def __init__(self, df, y=None, transform=None):
self.df = df
self.y = y
self.transform = transform
self.size = 128
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
input_dic = {}
row = self.df.iloc[idx]
image = self.df.iloc[idx][1:].values.reshape(128,128).astype(np.float)
if self.transform is not None:
image = np.array(image)
image = threshold_image(image)
image = self.transform(image=image)['image']
image = (image.astype(np.float32) - 0.0692) / 0.2051
image = image_to_tensor(image, normalize=None)
else:
image = np.array(image)
image = (image.astype(np.float32) - 0.0692) / 0.2051
image = image_to_tensor(image, normalize=None)
input_dic["image"] = image
if self.y is not None:
label1 = self.y.vowel_diacritic.values[idx]
label2 = self.y.grapheme_root.values[idx]
label3 = self.y.consonant_diacritic.values[idx]
return input_dic, label1, label2, label3
else:
return input_dic
hidden_size = 64
channel_size = 1
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18,self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(channel_size,hidden_size,kernel_size=2,stride=2,padding=3,bias=False),
nn.BatchNorm2d(hidden_size),
nn.ReLU(True)
)
self.block2 = nn.Sequential(
nn.MaxPool2d(1,1),
ResidualBlock(hidden_size,hidden_size),
ResidualBlock(hidden_size,hidden_size,2)
)
self.block3 = nn.Sequential(
ResidualBlock(hidden_size,hidden_size*2),
ResidualBlock(hidden_size*2,hidden_size*2,2)
)
self.block4 = nn.Sequential(
ResidualBlock(hidden_size*2,hidden_size*4),
ResidualBlock(hidden_size*4,hidden_size*4,2)
)
self.block5 = nn.Sequential(
ResidualBlock(hidden_size*4,hidden_size*8),
ResidualBlock(hidden_size*8,hidden_size*8,2)
)
self.avgpool = nn.AvgPool2d(2)
self.fc = nn.Linear(512*4,512)
# vowel_diacritic
self.fc1 = nn.Linear(512,11)
# grapheme_root
self.fc2 = nn.Linear(512,168)
# consonant_diacritic
self.fc3 = nn.Linear(512,7)
def forward(self,x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.avgpool(x)
x = x.view(x.size(0),-1)
x = self.fc(x)
x1 = self.fc1(x)
x2 = self.fc2(x)
x3 = self.fc3(x)
return x1,x2,x3
with timer('load csv data'):
fold_id = 0
epochs = 15
batch_size = 64
train = pd.read_csv('input/train.csv')
y = train[["grapheme_root", "vowel_diacritic", "consonant_diacritic"]]
num_folds = 5
train_idx, val_idx = train_test_split(train.index.tolist(), test_size=0.2, random_state=SEED, stratify=train["grapheme_root"])
gc.collect()
with timer('load feather data'):
train_path = [
'input/resize_cropped_128_train_image_data_0.feather',
'input/resize_cropped_128_train_image_data_1.feather',
'input/resize_cropped_128_train_image_data_2.feather',
'input/resize_cropped_128_train_image_data_3.feather'
]
data0 = pd.read_feather(train_path[0])
data1 = pd.read_feather(train_path[1])
data2 = pd.read_feather(train_path[2])
data3 = pd.read_feather(train_path[3])
data = pd.concat([data0, data1, data2, data3])
print(data.shape)
del data0, data1, data2, data3
gc.collect()
with timer('prepare validation data'):
y_train = y.iloc[train_idx]
train_dataset = BengaliAIDataset(data.iloc[train_idx], y=y_train, transform=data_transforms)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size*4, shuffle=True, num_workers=0, pin_memory=True)
y_val = y.iloc[val_idx]
val_dataset = BengaliAIDataset(data.iloc[val_idx], y=y_val, transform=data_transforms_test)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size*2, shuffle=False, num_workers=0, pin_memory=True)
del train_dataset, val_dataset
gc.collect()
with timer('create model'):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# model = torchvision.models.resnet50(pretrained=True)
# model.load_state_dict(torch.load("../input/pytorch-pretrained-models/resnet101-5d3b4d8f.pth"))
model = ResNet18()
model = model.to(device)
criterion = nn.CrossEntropyLoss(reduction='mean').to(device)
optimizer = optim.Adam(model.parameters(), lr=4e-4)
scheduler = GradualWarmupScheduler(optimizer, multiplier=1.1, total_epoch=5,
after_scheduler=None)
with timer('training loop'):
best_score = -999
best_epoch = 0
for epoch in range(1, epochs + 1):
LOGGER.info("Starting {} epoch...".format(epoch))
# tr_loss = train_one_epoch(model, train_loader, criterion, optimizer, device)
tr_loss = train_one_epoch_mixup_cutmix(model, train_loader, criterion, optimizer, device)
LOGGER.info('Mean train loss: {}'.format(round(tr_loss, 5)))
val_pred, y_true, val_loss = validate(model, val_loader, criterion, device)
score = macro_recall(y_true, val_pred)
LOGGER.info('Mean valid loss: {} score: {}'.format(round(val_loss, 5), round(score, 5)))
if score > best_score:
best_score = score
best_epoch = epoch
torch.save(model.state_dict(), os.path.join(OUT_DIR, '{}_fold{}.pth'.format(EXP_ID, fold_id)))
np.save(os.path.join(OUT_DIR, "{}_fold{}.npy".format(EXP_ID, fold_id)), val_pred)
scheduler.step()
LOGGER.info("best score={} on epoch={}".format(best_score, best_epoch))
| [
"osuosuossu18@gmail.com"
] | osuosuossu18@gmail.com |
16d7d093326b863bf363be47886a033059a9f1f4 | 96f181736c9975adfabd45cc776cab7a37d2e7a1 | /transformer/SubLayers.py | 636b67878e40d8e50b27f1d0f49b7d9bf2797668 | [
"MIT"
] | permissive | fangxiaoquan/transformer-pytorch | 6b43fb75635bb512c38c6f2ac8ec306b6e6ba5d9 | c9c5c81151c37ad7a088ea96aa5248fd4f4ad2d1 | refs/heads/master | 2020-05-17T00:36:59.073875 | 2019-03-17T14:42:02 | 2019-03-17T14:42:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,844 | py | ''' Define the sublayers in encoder/decoder layer '''
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from transformer.Modules import ScaledDotProductAttention
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
| [
"1049136551@qq.com"
] | 1049136551@qq.com |
214e9acf4f63476ff161a03f36b9d65e5158d29c | e8c5d8a473a71f88616b692dcdfd2d604485f580 | /test_corrector.py | 1db70ec4aff76eef61b92ad17796f78834360551 | [
"Apache-2.0"
] | permissive | modernYan/corrector | a6907bf1dc0e5e91048704365b4ade7327939592 | 75c86075ea51a53ebe6d649d729e690e3b414f7a | refs/heads/master | 2021-04-03T01:51:33.407724 | 2018-03-07T12:49:45 | 2018-03-07T12:49:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | # -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief:
import unittest
from pycorrector.cn_spell import correct
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
@staticmethod
def test_text1():
error_sentence_1 = '机七学习是人工智能领遇最能体现智能的一个分知'
correct_sent = correct(error_sentence_1)
print("original sentence:{} => correct sentence:{}".format(error_sentence_1, correct_sent))
@staticmethod
def test_text2():
error_sentence_2 = '杭洲是中国的八大古都之一,因风景锈丽,享有“人间天棠”的美誉!'
correct_sent = correct(error_sentence_2)
print("original sentence:{} => correct sentence:{}".format(error_sentence_2, correct_sent))
@staticmethod
def test_text3():
error_sentence_3 = '我们现今所"使用"的大部分舒学符号,你们用的什么婊点符号'
correct_sent = correct(error_sentence_3)
print("original sentence:{} => correct sentence:{}".format(error_sentence_3, correct_sent))
if __name__ == '__main__':
unittest.main()
| [
"507153809@qq.com"
] | 507153809@qq.com |
249e8bc1a5af99696827d3c63022ba2318489a67 | cff7cf6a76d9c3009ba4092c65656cefa4261d91 | /pruning.py | c59123519f0ecae5a8e792264f02cdb5308c718e | [] | no_license | anyk8/ailab | d86c5947d9a2f71a4fdab44e38c41a28013b6594 | cb436cb3021317a2303b3784c1856098730a3d26 | refs/heads/main | 2023-02-07T00:58:02.064506 | 2020-12-23T20:10:37 | 2020-12-23T20:10:37 | 323,974,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,430 | py | MAX, MIN = 1000, -1000
# Returns optimal value for current player
#(Initially called for root and maximizer)
def minimax(depth, nodeIndex, maximizingPlayer,
values, alpha, beta):
# Terminating condition. i.e
# leaf node is reached
if depth == 3:
return values[nodeIndex]
if maximizingPlayer:
best = MIN
# Recur for left and right children
for i in range(0, 2):
val = minimax(depth + 1, nodeIndex * 2 + i,
False, values, alpha, beta)
best = max(best, val)
alpha = max(alpha, best)
# Alpha Beta Pruning
if beta <= alpha:
break
return best
else:
best = MAX
# Recur for left and
# right children
for i in range(0, 2):
val = minimax(depth + 1, nodeIndex * 2 + i,
True, values, alpha, beta)
best = min(best, val)
beta = min(beta, best)
# Alpha Beta Pruning
if beta <= alpha:
break
return best
# Driver Code
if __name__ == "__main__":
values = [3, 5, 6, 9, 1, 2, 0, -1]
print("The optimal value is :", minimax(0, 0, True, values, MIN, MAX))
| [
"noreply@github.com"
] | anyk8.noreply@github.com |
e0409dc7eb85b5766250889ef408b577012505a7 | 8338bde799fab50fa28b3c9e85035fce12f1e152 | /src/crystal_analysis/fluctuations.py | a31246b51e26e6bab8b8c6c145aa242d5bfe1576 | [
"MIT"
] | permissive | malramsay64/Crystal_Melting | c5941ad261ef71f1357d6064302344b093b22b53 | e8305928b06b536d7293cb751963d058d55627aa | refs/heads/master | 2021-03-24T10:24:23.291821 | 2020-08-07T07:19:09 | 2020-08-07T07:19:09 | 119,946,491 | 0 | 0 | MIT | 2020-02-12T07:35:47 | 2018-02-02T07:13:03 | Python | UTF-8 | Python | false | false | 8,047 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""A module to measure the structural fluctuations of each state.
The concept of this module is to understand how much particles within each state are
moving, to get an idea of the likelihood of transitioning from one state to anther.
States which are highly constrained will allow small amounts of motion, while states
which are more flexible will be able to rapidly change configuration.
"""
import logging
from pathlib import Path
from typing import Tuple
import click
import numpy as np
import pandas as pd
import scipy.optimize
from pandas.api.types import CategoricalDtype
from sdanalysis import order
from sdanalysis.read import open_trajectory
from sdanalysis.util import get_filename_vars
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@click.group()
def main():
pass
BINS = np.linspace(-1, 1, 5001)
BIN_VALUES = (BINS[1:] + BINS[:-1]) / 2
def aggregate(values: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Convert values to a histogram of bins and values
This takes a collection of values in the range [0,1], binning values into a
histogram. Values are binned in increments of 1a-4, with only the bins containing values being returned.
Args:
values: A collection of values which will be binned into
Returns:
centers: The centers of each of the bins
counts: The count of each bin
"""
hist = np.histogram(values, bins=BINS, density=True)[0]
non_zero = np.nonzero(hist)
return BIN_VALUES[non_zero], hist[non_zero]
def gaussian(x, A, mu, sigma):
return A * np.exp(-np.square(x - mu) / (2 * np.square(sigma)))
def fit_gaussian(bins: np.ndarray, count: np.ndarray):
# Initial guess at parameter values
p0 = (1.0, 0.0, 1.0)
coeffs, _ = scipy.optimize.curve_fit(gaussian, bins, count, p0=p0, maxfev=2000)
return coeffs
@main.command()
@click.argument("output", type=click.Path(file_okay=True, dir_okay=False))
@click.argument(
"infiles", nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False)
)
def collate_disc(output, infiles):
with pd.HDFStore(output) as dst:
for file in infiles:
file = Path(file)
print(file)
df = pd.read_csv(file)
fvars = get_filename_vars(file)
df["temperature"] = float(fvars.temperature)
df["pressure"] = float(fvars.pressure)
if fvars.crystal is None:
crystal = "liquid"
else:
crystal = fvars.crystal
df["crystal"] = crystal
bin_values, count = aggregate(df["hexatic_order"])
df = pd.DataFrame(
{
"temperature": float(df["temperature"].values[0]),
"pressure": float(df["pressure"].values[0]),
"crystal": df["crystal"].values[0],
"bins": bin_values,
"count": count,
"probability": count * (BINS[1] - BINS[0]),
}
)
df["crystal"] = df["crystal"].astype(
CategoricalDtype(
categories=["SquareCircle", "HexagonalCircle", "liquid"]
)
)
dst.append("ordering", df)
@main.command()
@click.argument("output", type=click.Path(file_okay=True, dir_okay=False))
@click.argument(
"infiles", nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False)
)
def collate(output, infiles):
with pd.HDFStore(output) as dst:
for file in infiles:
file = Path(file)
print(file)
if file.suffix == ".h5":
with pd.HDFStore(file) as src:
df = src.get("ordering")
elif file.suffix == ".csv":
df = pd.read_csv(file)
df = df.rename(columns={"orient_order": "orientational_order"})
fvars = get_filename_vars(file)
df["temperature"] = float(fvars.temperature)
df["pressure"] = float(fvars.pressure)
if fvars.crystal is None:
crystal = "liquid"
else:
crystal = fvars.crystal
df["crystal"] = crystal
else:
raise ValueError("Filetype is not supported")
bin_values, count = aggregate(df["orientational_order"])
df = pd.DataFrame(
{
"temperature": float(df["temperature"].values[0]),
"pressure": float(df["pressure"].values[0]),
"crystal": df["crystal"].values[0],
"bins": bin_values,
"count": count,
"probability": count * (BINS[1] - BINS[0]),
}
)
df["crystal"] = df["crystal"].astype(
CategoricalDtype(categories=["p2", "p2gg", "pg", "liquid"])
)
dst.append("ordering", df)
@main.command()
@click.argument("infile", type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument("outfile", type=click.Path(file_okay=True, dir_okay=False))
def analyse(infile, outfile):
dataframes = []
file_vars = get_filename_vars(infile)
crystal = file_vars.crystal
if crystal is None:
crystal = "liquid"
for snap in open_trajectory(infile, progressbar=True):
orientational_order = order.orientational_order(
snap.box, snap.position, snap.orientation
)
df = pd.DataFrame(
{
"molecule": np.arange(snap.num_mols),
"orientational_order": orientational_order,
"temperature": float(file_vars.temperature),
"pressure": float(file_vars.pressure),
"crystal": crystal,
}
)
df["crystal"] = df["crystal"].astype("category")
dataframes.append(df)
with pd.HDFStore(outfile) as dst:
dst.append("ordering", pd.concat(dataframes))
@main.command()
@click.argument("outfile", type=click.Path(file_okay=True, dir_okay=False))
@click.argument(
"infiles", nargs=-1, type=click.Path(exists=True, file_okay=True, dir_okay=False)
)
def thermodynamics(outfile, infiles):
dfs = []
for filename in infiles:
fvars = get_filename_vars(filename)
df = pd.read_csv(filename, sep="\t")
# All the values are written to the same output file, so make sure there is only
# a single trajectory worth of values.
df = df.drop_duplicates("timestep", keep="last")
# We want quantities for each
df = df.div(df.N, axis=0)
# Take the second half of the values to ensure there is no issue with
# equilibration
df = df.iloc[len(df) // 2 :, :]
# Calculate Total Energy
df["total_energy"] = df["kinetic_energy"] + df["potential_energy"]
# Calculate enthalpy.
# This is the total energy (potential + kinetic) + the configuration energy (pV)
# The multiplication by N is because the pressure was also divided by N above.
df["enthalpy"] = (
df["potential_energy"]
+ df["kinetic_energy"]
+ df["pressure"] * df["volume"] * df.N
)
if fvars.crystal is not None:
df["crystal"] = fvars.crystal
else:
df["crystal"] = "liquid"
df["pressure"] = float(fvars.pressure)
df["temperature"] = float(fvars.temperature)
df = df.set_index(["pressure", "temperature", "crystal"])
# Perform aggregations on the dataframe, making it much easier to work with.
df = df.groupby(["pressure", "temperature", "crystal"]).agg(["mean", "std"])
dfs.append(df)
pd.concat(dfs).to_hdf(outfile, "thermo")
if __name__ == "__main__":
main()
| [
"malramsay64@gmail.com"
] | malramsay64@gmail.com |
4c2de55d37b6463dc9cb09e1b3fab791b94fb59f | 1842d2e7989b9fb1bdd6edff2b2ce187ca9f27ad | /BIOMD0000000484/model.py | 9d53fe8ba1862ad4b82c03c37bafa36698e3a221 | [
"CC0-1.0"
] | permissive | biomodels/BIOMD0000000484 | cc08199b3d324bf10425829755d70e67d52b155d | 293ac221c1615e7446f55960cff130f784243220 | refs/heads/master | 2016-09-06T17:32:40.282597 | 2014-10-16T05:17:52 | 2014-10-16T05:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'BIOMD0000000484.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
743b71909c584becf61641187deafdd92d6211f5 | 9a791a7d91fd6706b1c1d99a35e88d0d6ec50305 | /inventory/wsgi.py | 89d49fb96df012997a592d7cc95ab057e4b8c643 | [] | no_license | farhapartex/inventory-system-api | f534ad4676f2f7d674e46ead9a2e272a028d7b9e | 1ba1c17ee6e58a59a5937d0de14a352fd76b89fe | refs/heads/master | 2023-07-17T12:04:40.953901 | 2021-09-01T04:38:02 | 2021-09-01T04:38:02 | 395,754,219 | 2 | 1 | null | 2021-08-31T21:07:01 | 2021-08-13T18:26:12 | Python | UTF-8 | Python | false | false | 397 | py | """
WSGI config for inventory project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'inventory.config.dev')
application = get_wsgi_application()
| [
"nazmul@sixads.net"
] | nazmul@sixads.net |
0dbdf5bc26ef8d897eb91a748996cad40f21c8e3 | 869d300c764911d468f5b1e5a98e5d07a27535ab | /python-selenium/baiscs/file_operation.py | 25acf23e4f3b95c1ee7ef7962a1c90ddd7e3ef56 | [] | no_license | thananauto/python-test-frameworks | df5962996cd9c4cded9355fef6cb2a099c69e3b1 | abaf9d11d8c65f2cd9f916b241898ad11e26bf43 | refs/heads/master | 2022-12-15T18:14:57.745930 | 2019-12-16T13:51:04 | 2019-12-16T13:51:04 | 228,375,228 | 0 | 0 | null | 2022-09-16T18:15:48 | 2019-12-16T11:47:11 | Python | UTF-8 | Python | false | false | 1,198 | py | import os.path as path
class file_operation(object):
def read_file(self):
# file name in same directory
file_name = 'HelloWorld.py'
if not path.isfile(file_name):
print("file does not exist")
else:
# read the lines in files
with open(file_name) as f:
content = f.read().splitlines()
# print the content line by line
for line in content:
print(line)
def write_file(self):
file_name = 'New_file.txt'
# open the file name for writing
myFile = open(file_name, 'w')
# write the content in to file
myFile.write("This is new content in the file")
# close the file
myFile.close()
def append_file(self):
file_name = 'New_file.txt'
# open the file name for writing
myFile = open(file_name, 'a')
# write the content in to file
myFile.write("\nThis is second line of content in the file")
# close the file
myFile.close()
if __name__ == '__main__':
reading = file_operation()
#reading.read_file()
reading.write_file()
reading.append_file() | [
"r.thananjayan@superp.nl"
] | r.thananjayan@superp.nl |
1dc318f1cbd37469290cdc9346b413cfb2444c2a | e3d758e32b5c3398826e77a07e00645b31c1cbb5 | /app/tests/test_algorithm.py | 67bc3937e3bd78648b62ed0c27ce1683384ce160 | [] | no_license | vittorfp/production-ml-model | a869d1856249f5954519aa34271015127787d377 | 1e71ee8e55f026a7626409a49b264bee3db535d0 | refs/heads/master | 2022-04-14T23:09:28.170591 | 2020-04-12T03:06:52 | 2020-04-12T03:06:52 | 253,011,072 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import numpy as np
import pandas as pd
from .common import *
test_cases = pd.read_csv('./app/tests/test_resources/test_samples_1.0.csv')
def test_municipal_boundary_1(client):
""" Point out of municipal boundary """
response = client.get('/predict', query_string=BaseInputs.valid_outside)
assert response.status_code != 500
assert response.status_code == 400
def test_municipal_boundary_2(client):
""" Point inside municipal boundary """
response = client.get('/predict', query_string=BaseInputs.campinas)
assert response.status_code != 400
assert response.status_code != 500
assert response.status_code == 200
@pytest.mark.parametrize("index,case", test_cases.iterrows())
def test_model_outputs_1(client, mocker, index, case):
count = pd.DataFrame(case).T.drop(columns=['Unnamed: 0', 'latitude', 'longitude', 'tipo_POI', 'response'])
count = count[sorted(count.columns)]
mocker.patch('app.repository.data_repository.DataRepository.get_points_count', return_value=count)
response = client.get('/predict', query_string={'lat': case.latitude, 'lng': case.longitude})
assert response.status_code != 500
assert response.status_code == 200
assert response.json['latitude'] == case.latitude
assert response.json['longitude'] == case.longitude
assert response.json['n_grandes_concorrentes'] == case.concorrentes__grandes_redes
assert response.json['n_pequeno_varejista'] == case.concorrentes__pequeno_varejista
assert np.isclose(response.json['predicao'], case.response, atol=1e-8)
| [
"vittorfpereira@gmail.com"
] | vittorfpereira@gmail.com |
563a9e4d56769ebc277637da90f87d84e0eb46b2 | 16dcbf88ae9514109151fe5ff447b2b653ddf48b | /2016/035-TheasGame/thea.py | 60cf62e1e123e8f303f839ac3e8286c58ec426a6 | [] | no_license | ChristerNilsson/Lab | efa55ef5e79dff84b232dfcf94473eacdb263175 | b1f730f45ec6e901bd14c1e4196aa5e0f591ecd2 | refs/heads/master | 2023-07-06T04:35:09.458936 | 2023-06-24T21:40:54 | 2023-06-24T21:40:54 | 48,474,249 | 8 | 8 | null | 2022-12-10T07:03:31 | 2015-12-23T06:51:11 | JavaScript | UTF-8 | Python | false | false | 690 | py | import pygame
pygame.init()
windowSurface = pygame.display.set_mode((1000, 750), pygame.DOUBLEBUF)
done = False
while not done:
# --- Main event loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
s = pygame.Surface((1000,750), pygame.SRCALPHA) # per-pixel alpha
s.fill((255,255,255,128)) # notice the alpha value in the color
pygame.draw.circle(s, pygame.Color(255, 0, 0, 128), (100, 100), 100)
pygame.draw.circle(windowSurface, pygame.Color(0, 255, 0, 128), (150, 100), 100)
windowSurface.blit(s, (0,0), pygame.BLEND_RGBA_ADD)
s.fill((255,255,255))
pygame.display.flip() | [
"janchrister.nilsson@gmail.com"
] | janchrister.nilsson@gmail.com |
9f65aa514c86af51f392a4fcb1202ccb289946a0 | 5c04b334c27dbef10cc7aef7a774d639bfcf50da | /Test_abb_demo.py | 8b9e0ae0ee331da6913b9a6eda02f13973a7bf94 | [
"MIT"
] | permissive | qingqingcaoyuanlin/Python-UIAutomation-for-Windows | 0043df5defcd8f9797c65e58bce0762cb6e434b7 | 1f5191cdf554646c659699758fe14c904381eb66 | refs/heads/master | 2021-01-13T03:36:23.416991 | 2017-01-07T04:54:27 | 2017-01-07T04:54:27 | 77,318,593 | 0 | 0 | null | 2016-12-25T09:07:29 | 2016-12-25T09:07:29 | null | UTF-8 | Python | false | false | 2,087 | py | # coding=utf-8
import os
import sys
import subprocess
import uiautomation
import time
#import wmi
from win32com.client import GetObject
TestAppName = "ABB HB Management Center.exe"
def Is64Winows():
return 'PROGRAMFILES(X86)' in os.environ
def AppPath():
if Is64Winows():
print 64
return "C:\Program Files (x86)\ABB HB Management Center\ABB HB Management Center.exe"
else:
print 32
return "C:\Program Files\ABB HB Management Center\ABB HB Management Center.exe"
def CheckAppRunning(imagename):
'''
这里需要from win32com.client import GetObject,直接使用GetObject("winmgmts:")就可以了
使用import win32com再使用win32com.client.GetObject('winmgmts:')有问题,不知为何
'''
objWMIService = GetObject("winmgmts:")
colProcesses = objWMIService.ExecQuery("Select * from Win32_Process")
for objProcess in colProcesses:
if objProcess.Name == imagename:
print "Process:" + objProcess.Name
print "Process ID: " + str(objProcess.ProcessID)
print "Working Set Size: " + str(objProcess.WorkingSetSize)
print "Page File Size: " + str(objProcess.PageFileUsage)
print "Page Faults: " + str(objProcess.PageFaults)
return True
return False
'''
#运行耗费时间长
p = os.popen('tasklist /FI "Imagename eq %s"' % imagename)
count = p.read().count(imagename)
print count
c = wmi.WMI()
for process in c.Win32_Process():
if process.Name == imagename :
print str(process.ProcessId) + " & " + process.Name
return True
return False
'''
x = raw_input("x:")
print x
if CheckAppRunning(TestAppName) == False:
print u"准备运行程序"
subprocess.Popen(AppPath())
time.sleep(.5)
window_login = uiautomation.WindowControl(searchDepth = 1, ClassName = 'WindowsForms10.Window.8.app.0.3e799b_r15_ad1')
button_exit = window_login.ButtonControl(AutomationId = 'exitbtn')
button_login = window_login.ButtonControl(AutomationId = 'loginbtn')
textBox_passwd = window_login.EditControl(AutomationId = 'passwdtextBox')
textBox_passwd.Click()
window_login.SendKeys('123456')
button_login.Click()
else:
print u"程序已运行" | [
"1451368913@qq.com"
] | 1451368913@qq.com |
96e362619f5e1ca63b616907a81a83d7ad5268b9 | 7a17f9e6706b6e3f6d55c8e30f0dcec97f495541 | /src/hyperka/hyperbolic/manifold.py | e5bfa0abf060d8a9093cb836b10fa666c81e5cfe | [
"MIT"
] | permissive | HELL-TO-HEAVEN/HyperKA | 8d097c58e0188961de6e4ea74f214e40d9408a04 | cadaf824a739b55211997e73d9948ddbfbe7ce83 | refs/heads/main | 2023-03-30T04:20:31.477323 | 2021-03-25T07:56:55 | 2021-03-25T07:56:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | from abc import abstractmethod
from abc import ABC
class Manifold(ABC):
def __init__(self, *args, **kwargs):
pass
@property
def name(self):
raise NotImplementedError
@staticmethod
def dim(dim):
return dim
def normalize(self, u):
return u
@abstractmethod
def distance(self, u, v):
"""
Distance function
"""
raise NotImplementedError
@abstractmethod
def expm(self, p, d_p, lr=None, out=None):
"""
Exponential map
"""
raise NotImplementedError
@abstractmethod
def logm(self, x, y):
"""
Logarithmic map
"""
raise NotImplementedError
| [
"sunzequn@live.cn"
] | sunzequn@live.cn |
c7cf64d046d44cec3e870758d1ac703c96d85b08 | 6a9acefd938ea7648d3f7662a54632f3e35f03ea | /train_sagemaker.py | a14177c5662a1d116c37c1e2940f3fddbb866a8a | [
"MIT"
] | permissive | ankushpanwar19/Multi-Task-Learning | 2a7c8c6c71add695e3ad1f7da91c2de154809375 | 1808ba1821ae3c4d414e31d583d31b15782daa38 | refs/heads/master | 2023-08-25T10:35:46.945803 | 2021-11-04T11:53:35 | 2021-11-04T11:53:35 | 424,565,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | #!/usr/bin/env python
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
import os
import traceback
log_dir = os.path.join(os.environ['SM_MODEL_DIR'], 'log')
dataset_root = os.environ['SM_CHANNEL_TRAINING']
exceptions_log_path = os.path.join(log_dir, 'exceptions.log')
def custom_excepthook(exc_type, exc_value, exc_traceback):
msg = f'type={exc_type} value={exc_value} traceback={traceback.format_tb(exc_traceback)}\n'
with open(exceptions_log_path, 'a') as fp:
fp.write(msg)
print(msg, file=sys.stderr)
sys.excepthook = custom_excepthook
from torchvision import datasets
from mtl.scripts.train import main
if __name__=='__main__':
assert '--log_dir' not in sys.argv
assert '--dataset_root' not in sys.argv
sys.argv.append('--log_dir')
sys.argv.append(log_dir)
sys.argv.append('--dataset_root')
sys.argv.append(os.path.join(dataset_root, 'miniscapes'))
datasets.utils.extract_archive(os.path.join(dataset_root, 'miniscapes.zip'), remove_finished=True)
main()
| [
"ankushpanwar19@gmail.com"
] | ankushpanwar19@gmail.com |
97143f84e721ba0460659c1d77f50b0db0636348 | 877b524e74239cf9b12479d41bb813f4658f48f3 | /PyBoss/main.py | 130082f552663c5f32668864f0fa40a6bb99431d | [] | no_license | mkung8889/python-challenge | 37b3a12c2ceae1c27e140eca6c3976d7b3477fe2 | 257d23c33e3c6bbc1b1a1fa77b0b5ec5657daf31 | refs/heads/master | 2020-04-04T20:21:15.373905 | 2019-03-20T04:51:49 | 2019-03-20T04:51:49 | 156,243,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,787 | py | import os
import csv
csvpath = os.path.join("employee_data.csv")
with open(csvpath, newline = '') as csvfile:
csvreader = csv.reader(csvfile, delimiter = ",")
rows = []
empid=[]
name=[]
dob=[]
ssn=[]
state=[]
newheader=['Emp ID','First Name','Last Name','DOB','SSN','State']
namesplit=[]
dobsplit=[]
ssnsplit=[]
firstname=[]
lastname=[]
newdob=[]
ssncensored=[]
state_abbrev=[]
for i, row in enumerate(csvreader):
if i==0:
header = i
else:
rows.append(row)
for i in rows:
empid.append(i[0])
name.append(i[1])
dob.append(i[2])
ssn.append(i[3])
state.append(i[4])
for i in name:
namesplit.append(i.split())
for i in namesplit:
firstname.append(i[0])
lastname.append(i[1])
for i in dob:
dobsplit.append(i.split("-"))
for i in dobsplit:
year = i[0]
month = i[1]
day = i[2]
newformat = month + "/" + day + "/" + year
newdob.append(newformat)
for i in ssn:
ssnsplit.append(i.split('-'))
for i in ssnsplit:
censor = "***-**-"
last4 = i[-1]
ssncensored.append(censor+last4)
d = {
'Alabama': 'AL',
'Alaska': 'AK',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'Florida': 'FL',
'Georgia': 'GA',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY',
}
for i in state:
for j in d:
if i==j:
state_abbrev.append(d[j])
new_employee_data = list(zip(empid,firstname,lastname,newdob,ssncensored,state_abbrev))
new_employee_data.insert(0,newheader)
with open('new_employee_data.csv', 'w',newline='') as csvfile:
csvwriter = csv.writer(csvfile,delimiter=',')
for i in new_employee_data:
csvwriter.writerow(i) | [
"m8464k@gmail.com"
] | m8464k@gmail.com |
4f909656863aee7b5f4d20c715e2587664bed01e | ae5acf841aa3cd54e7f39d460d586b6a3ba24523 | /laptop/3Revesion/mojarProgram/prime_number-4.py | 7768c6a78970412b0af68cef5739cf936ff15f8d | [] | no_license | JayedHoshen/python-practic | dee9503811899790d323d8aa4ca8b89621a8eec8 | 2f930346cadfb4389f95d1bc2a421fec7ee48fcf | refs/heads/master | 2021-04-20T10:24:30.975522 | 2020-03-24T10:21:00 | 2020-03-24T10:21:00 | 249,674,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | import math
def is_prime4(n):
if n < 2:
return False
if n==2:
return True
if n % 2 ==0:
return False
m = math.sqrt(n)
m = int(m)+1
for x in range(3,m,2):
if n % x ==0:
return False
return True
| [
"jayedkn64@gmail.com"
] | jayedkn64@gmail.com |
a004eb4e54de656cea5ce095940f98897f035dc3 | 1c583fea1f145df1fd1ad918cc5837636ac31fac | /longest_word/solution.py | c8d7f3537248dac3d5ce8e6923af81560d5658df | [] | no_license | rafaelcastanheira/python_challenges | be09bd380c5eef2d7592cbf22e96c555ce293efb | 1421a9690620044bb376e253f4603a338e9b9a70 | refs/heads/master | 2023-02-07T03:37:29.248987 | 2020-12-27T01:08:17 | 2020-12-27T01:08:17 | 324,663,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import re
def LongestWord(sen):
clean_sen = re.sub('[^a-zA-Z ]+', '', sen)
words = clean_sen.split(" ")
biggest_word = ""
print clean_sen
for word in words:
if len(word) > len(biggest_word):
biggest_word = word
return biggest_word
print LongestWord("abc123 fjHB!D dj7483t4i4")
# Another (better?) option
# def LongestWord(sen):
# ns = ""
# for i in sen:
# if i.isalpha() == True or i==" ":
# ns+=i
# else:
# ns+=""
# word = sorted(ns.split(" "),key=lambda x:len(x),reverse=True)[0]
# return word | [
"rafaelfrancisco@Rafaels-MBP.lan"
] | rafaelfrancisco@Rafaels-MBP.lan |
3f5d83c63578aa071d10dd553bc6853ce9f51a82 | 4a9e050b7728f61aacc10e5564359a5ab3616124 | /脑筋急转弯/292_Nim_Game.py | 32382e4167f8db29333bd4e69ab346ff9cd01882 | [
"MIT"
] | permissive | pdg0709/LeetCode_Python3_Solution | a3d779b12925f2f37396dee07dd9d2276222dab5 | 1ec4b73fcdec79382906d6ae20f82a273a76d984 | refs/heads/master | 2020-06-19T02:55:33.304005 | 2019-07-12T02:19:26 | 2019-07-12T02:19:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # -*- coding:utf-8 -*-
# &Author AnFany
# 292_Nim_Game Nim游戏
class Solution:
def canWinNim(self, n: int) -> bool:
# 最后剩下4块石头时,谁先拿谁输,因此:
# 只要我第一次拿完后,剩下的石头的块数可以被4整除,我就可以赢
# 因为以后的每一次我都可以拿去(4-对方刚才拿的块数)块石头
if n % 4 == 0:
return False
else:
return True | [
"noreply@github.com"
] | pdg0709.noreply@github.com |
2ad7dd43cec66e4027d9bdb249e97819d849cc61 | 1a80df8069c1f2bf67b00818529d39c34c19bee4 | /blog/tests.py | f08962a092a8dd4e03949a023e3dc89105e91104 | [] | no_license | subiee/do_it_django_a_to_z | ea6569f34bff79290d807d60eb4a2e57b0d7cec0 | d0cae35387234ac44bce827a73a856a8f4409413 | refs/heads/main | 2023-04-14T19:12:55.017482 | 2021-05-06T07:46:51 | 2021-05-06T07:46:51 | 330,678,964 | 0 | 0 | null | 2021-02-17T13:25:21 | 2021-01-18T13:43:16 | JavaScript | UTF-8 | Python | false | false | 19,521 | py | from django.test import TestCase, Client
from bs4 import BeautifulSoup
from django.contrib.auth.models import User
from .models import Post, Category, Tag, Comment
class TestView(TestCase):
def setUp(self):
self.client = Client()
self.user_trump = User.objects.create_user(username='trump', password='somepassword')
self.user_obama = User.objects.create_user(username='obama', password='somepassword')
self.user_obama.is_staff = True
self.user_obama.save()
self.category_programming = Category.objects.create(name='programming', slug='programming')
self.category_music = Category.objects.create(name='music', slug='music')
self.tag_python_kor = Tag.objects.create(name='파이썬 공부', slug='파이썬-공부')
self.tag_python = Tag.objects.create(name='python', slug='python')
self.tag_hello = Tag.objects.create(name='hello', slug='hello')
self.post_001 = Post.objects.create(
title='첫번째 포스트입니다.',
content='Hello World. We are the world.',
category=self.category_programming,
author=self.user_trump
)
self.post_001.tags.add(self.tag_hello)
self.post_002 = Post.objects.create(
title='두번째 포스트입니다.',
content='1등이 전부는 아니잖아요?',
category=self.category_music,
author=self.user_obama
)
self.post_003 = Post.objects.create(
title='세번째 포스트입니다.',
content='category가 없을 수도 있죠',
author=self.user_obama
)
self.post_003.tags.add(self.tag_python_kor)
self.post_003.tags.add(self.tag_python)
self.comment_001 = Comment.objects.create(
post=self.post_001,
author=self.user_obama,
content='첫 번째 댓글입니다. '
)
def navbar_test(self, soup):
navbar = soup.nav
self.assertIn('Blog', navbar.text)
self.assertIn('About Me', navbar.text)
logo_btn = navbar.find('a', text='Do It Django')
self.assertEqual(logo_btn.attrs['href'], '/')
home_btn = navbar.find('a', text='Home')
self.assertEqual(home_btn.attrs['href'], '/')
blog_btn = navbar.find('a', text='Blog')
self.assertEqual(blog_btn.attrs['href'], '/blog/')
about_me_btn = navbar.find('a', text='About Me')
self.assertEqual(about_me_btn.attrs['href'], '/about_me/')
def category_card_test(self, soup):
categories_card = soup.find('div', id='categories-card')
self.assertIn('Categories', categories_card.text)
self.assertIn(
f'{self.category_programming.name} ({self.category_programming.post_set.count()})',
categories_card.text
)
self.assertIn(
f'{self.category_music.name} ({self.category_music.post_set.count()})',
categories_card.text
)
self.assertIn(f'미분류 (1)', categories_card.text)
def test_post_list(self):
# Post가 있는 경우
self.assertEqual(Post.objects.count(), 3)
response = self.client.get('/blog/')
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.assertEqual(soup.title.text, 'Blog')
self.navbar_test(soup)
self.category_card_test(soup)
main_area = soup.find('div', id='main-area')
self.assertNotIn('아직 게시물이 없습니다', main_area.text)
post_001_card = main_area.find('div', id='post-1') # id가 post-1인 div를 찾아서, 그 안에
self.assertIn(self.post_001.title, post_001_card.text) # title이 있는지
self.assertIn(self.post_001.category.name, post_001_card.text) # category가 있는지
self.assertIn(self.post_001.author.username.upper(), post_001_card.text) # 작성자명이 있는지
self.assertIn(self.tag_hello.name, post_001_card.text)
self.assertNotIn(self.tag_python.name, post_001_card.text)
self.assertNotIn(self.tag_python_kor.name, post_001_card.text)
post_002_card = main_area.find('div', id='post-2')
self.assertIn(self.post_002.title, post_002_card.text)
self.assertIn(self.post_002.category.name, post_002_card.text)
self.assertIn(self.post_002.author.username.upper(), post_002_card.text)
self.assertNotIn(self.tag_hello.name, post_002_card.text)
self.assertNotIn(self.tag_python.name, post_002_card.text)
self.assertNotIn(self.tag_python_kor.name, post_002_card.text)
post_003_card = main_area.find('div', id='post-3')
self.assertIn('미분류', post_003_card.text)
self.assertIn(self.post_003.title, post_003_card.text)
self.assertIn(self.post_003.author.username.upper(), post_003_card.text)
self.assertNotIn(self.tag_hello.name, post_003_card.text)
self.assertIn(self.tag_python.name, post_003_card.text)
self.assertIn(self.tag_python_kor.name, post_003_card.text)
# Post가 없는 경우
Post.objects.all().delete()
self.assertEqual(Post.objects.count(), 0)
response = self.client.get('/blog/')
soup = BeautifulSoup(response.content, 'html.parser')
main_area = soup.find('div', id='main-area') # id가 main-area인 div태그를 찾습니다.
self.assertIn('아직 게시물이 없습니다', main_area.text)
def test_post_detail(self):
self.assertEqual(self.post_001.get_absolute_url(), '/blog/1/')
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.navbar_test(soup)
self.category_card_test(soup)
self.assertIn(self.post_001.title, soup.title.text)
main_area = soup.find('div', id='main-area')
post_area = main_area.find('div', id='post-area')
self.assertIn(self.post_001.title, post_area.text)
self.assertIn(self.category_programming.name, post_area.text)
self.assertIn(self.user_trump.username.upper(), post_area.text)
self.assertIn(self.post_001.content, post_area.text)
self.assertIn(self.tag_hello.name, post_area.text)
self.assertNotIn(self.tag_python.name, post_area.text)
self.assertNotIn(self.tag_python_kor.name, post_area.text)
# comment area
comments_area = soup.find('div', id='comment-area')
comment_001_area = comments_area.find('div', id='comment-1')
self.assertIn(self.comment_001.author.username, comment_001_area.text)
self.assertIn(self.comment_001.content, comment_001_area.text)
def test_category_page(self):
response = self.client.get(self.category_programming.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.navbar_test(soup)
self.category_card_test(soup)
self.assertIn(self.category_programming.name, soup.h1.text)
main_area = soup.find('div', id='main-area')
self.assertIn(self.category_programming.name, main_area.text)
self.assertIn(self.post_001.title, main_area.text)
self.assertNotIn(self.post_002.title, main_area.text)
self.assertNotIn(self.post_003.title, main_area.text)
def test_tag_page(self):
response = self.client.get(self.tag_hello.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.navbar_test(soup)
self.category_card_test(soup)
self.assertIn(self.tag_hello.name, soup.h1.text)
main_area = soup.find('div', id='main-area')
self.assertIn(self.tag_hello.name, main_area.text)
self.assertIn(self.post_001.title, main_area.text)
self.assertNotIn(self.post_002.title, main_area.text)
self.assertNotIn(self.post_003.title, main_area.text)
def test_create_post(self):
# 로그인 하지 않으면 status code가 200이면 안된다!
response = self.client.get('/blog/create_post/')
self.assertNotEqual(response.status_code, 200)
# staff가 아닌 trum가 로그인을 한다.
self.client.login(username='trump', password='somepassword')
response = self.client.get('/blog/create_post/')
self.assertNotEqual(response.status_code, 200)
# staff인 obama로 로그인 한다.
self.client.login(username='obama', password='somepassword')
response = self.client.get('/blog/create_post/')
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.assertEqual('Create Post - Blog', soup.title.text)
main_area = soup.find('div', id='main-area')
self.assertIn('Create New Post', main_area.text)
tag_str_input = main_area.find('input', id='id_tags_str')
self.assertTrue(tag_str_input)
self.client.post(
'/blog/create_post/',
{
'title': 'Post Form 만들기',
'content': "Post Form 페이지를 만듭시다.",
'tags_str': 'new tag; 한글 태그, python'
}
)
last_post = Post.objects.last()
self.assertEqual(last_post.title, "Post Form 만들기")
self.assertEqual(last_post.author.username, 'obama')
self.assertEqual(last_post.tags.count(), 3)
self.assertTrue(Tag.objects.get(name='new tag'))
self.assertTrue(Tag.objects.get(name='한글 태그'))
self.assertEqual(Tag.objects.count(), 5)
def test_update_post(self):
update_post_url = f'/blog/update_post/{self.post_003.pk}/'
# 로그인 하지 않은 경우
response = self.client.get(update_post_url)
self.assertNotEqual(response.status_code, 200)
# 로그인은 했지만, 작성자가 아닌 경우
self.assertNotEqual(self.post_003.author, self.user_trump)
self.client.login(
username=self.user_trump.username,
password='somepassword'
)
response = self.client.get(update_post_url)
self.assertEqual(response.status_code, 403)
# 작성자(obama)가 접근하는 경우
self.client.login(
username=self.post_003.author.username,
password='somepassword'
)
response = self.client.get(update_post_url)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.assertEqual('Edit Post - Blog', soup.title.text)
main_area = soup.find('div', id='main-area')
self.assertIn('Edit Post', main_area.text)
tag_str_input = main_area.find('input', id='id_tags_str')
self.assertTrue(tag_str_input)
self.assertIn('파이썬 공부; python', tag_str_input.attrs['value'])
response = self.client.post(
update_post_url,
{
'title': '세번째 포스트를 수정했습니다. ',
'content': '안녕 세계? 우리는 하나!',
'category': self.category_music.pk,
'tags_str': '파이썬 공부; 한글 태그, some tag'
},
follow=True
)
soup = BeautifulSoup(response.content, 'html.parser')
main_area = soup.find('div', id='main-area')
self.assertIn('세번째 포스트를 수정했습니다.', main_area.text)
self.assertIn('안녕 세계? 우리는 하나!', main_area.text)
self.assertIn(self.category_music.name, main_area.text)
self.assertIn('파이썬 공부', main_area.text)
self.assertIn('한글 태그', main_area.text)
self.assertIn('some tag', main_area.text)
self.assertNotIn('python', main_area.text)
def test_comment_form(self):
self.assertEqual(Comment.objects.count(), 1)
self.assertEqual(self.post_001.comment_set.count(), 1)
# 로그인 하지 않은 상태
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_area = soup.find('div', id='comment-area')
self.assertIn('Log in and leave a comment', comment_area.text)
self.assertFalse(comment_area.find('form', id='comment-form'))
# 로그인 한 상태
self.client.login(username='obama', password='somepassword')
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_area = soup.find('div', id='comment-area')
self.assertNotIn('Log in and leave a comment', comment_area.text)
comment_form = comment_area.find('form', id='comment-form')
self.assertTrue(comment_form.find('textarea', id='id_content'))
response = self.client.post(
self.post_001.get_absolute_url() + 'new_comment/',
{
'content': "오바마의 댓글입니다.",
},
follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Comment.objects.count(), 2)
self.assertEqual(self.post_001.comment_set.count(), 2)
new_comment = Comment.objects.last()
soup = BeautifulSoup(response.content, 'html.parser')
self.assertIn(new_comment.post.title, soup.title.text)
comment_area = soup.find('div', id='comment-area')
new_comment_div = comment_area.find('div', id=f'comment-{new_comment.pk}')
self.assertIn('obama', new_comment_div.text)
self.assertIn('오바마의 댓글입니다.', new_comment_div.text)
def test_comment_update(self):
comment_by_trump = Comment.objects.create(
post=self.post_001,
author=self.user_trump,
content='트럼프의 댓글입니다.'
)
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_area = soup.find('div', id='comment-area')
self.assertFalse(comment_area.find('a', id='comment-1-update-btn'))
self.assertFalse(comment_area.find('a', id='comment-2-update-btn'))
# 로그인 한 상태
self.client.login(username='obama', password='somepassword')
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_area = soup.find('div', id='comment-area')
self.assertFalse(comment_area.find('a', id='comment-2-update-btn'))
comment_001_update_btn = comment_area.find('a', id='comment-1-update-btn')
self.assertIn('edit', comment_001_update_btn.text)
self.assertEqual(comment_001_update_btn.attrs['href'], '/blog/update_comment/1/')
response = self.client.get('/blog/update_comment/1/')
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.assertEqual('Edit Comment - Blog', soup.title.text)
update_comment_form = soup.find('form', id='comment-form')
content_textarea = update_comment_form.find('textarea', id='id_content')
self.assertIn(self.comment_001.content, content_textarea.text)
response = self.client.post(
f'/blog/update_comment/{self.comment_001.pk}/',
{
'content': "오바마의 댓글을 수정합니다.",
},
follow=True
)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_001_div = soup.find('div', id='comment-1')
self.assertIn('오바마의 댓글을 수정합니다.', comment_001_div.text)
self.assertIn('Updated: ', comment_001_div.text)
def test_delete_comment(self):
comment_by_trump = Comment.objects.create(
post=self.post_001,
author=self.user_trump,
content='트럼프의 댓글입니다.'
)
self.assertEqual(Comment.objects.count(), 2)
self.assertEqual(self.post_001.comment_set.count(), 2)
# 로그인 하지 않은 상태
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_area = soup.find('div', id='comment-area')
self.assertFalse(comment_area.find('a', id='comment-1-delete-btn'))
self.assertFalse(comment_area.find('a', id='comment-2-delete-btn'))
# trump로 로그인 한 상태
self.client.login(username='trump', password='somepassword')
response = self.client.get(self.post_001.get_absolute_url())
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
comment_area = soup.find('div', id='comment-area')
self.assertFalse(comment_area.find('a', id='comment-1-delete-btn'))
comment_002_delete_modal_btn = comment_area.find(
'a', id='comment-2-delete-modal-btn'
)
self.assertIn('delete', comment_002_delete_modal_btn.text)
self.assertEqual(
comment_002_delete_modal_btn.attrs['data-target'],
'#deleteCommentModal-2'
)
delete_comment_modal_002 = soup.find('div', id='deleteCommentModal-2')
self.assertIn('Are You Sure?', delete_comment_modal_002.text)
really_delete_btn_002 = delete_comment_modal_002.find('a')
self.assertIn('Delete', really_delete_btn_002.text)
self.assertEqual(
really_delete_btn_002.attrs['href'],
'/blog/delete_comment/2/'
)
response = self.client.get('/blog/delete_comment/2/', follow=True)
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
self.assertIn(self.post_001.title, soup.title.text)
comment_area = soup.find('div', id='comment-area')
self.assertNotIn('트럼프의 댓글입니다.', comment_area.text)
self.assertEqual(Comment.objects.count(), 1)
self.assertEqual(self.post_001.comment_set.count(), 1)
def test_search(self):
post_about_python = Post.objects.create(
title='파이썬에 대한 포스트입니다.',
content='Hello World. We are the world.',
author=self.user_trump
)
response = self.client.get('/blog/search/파이썬/')
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html.parser')
main_area = soup.find('div', id='main-area')
self.assertIn('Search: 파이썬 (2)', main_area.text)
self.assertNotIn(self.post_001.title, main_area.text)
self.assertNotIn(self.post_002.title, main_area.text)
self.assertIn(self.post_003.title, main_area.text)
self.assertIn(post_about_python.title, main_area.text) | [
"asb2008@naver.com"
] | asb2008@naver.com |
4233d111f76ceabf87cbb1c1701d766aeaf5393b | 17e813f4f20a6ce2d82619be9fac517b5176a74b | /Trees/btToLinkedList.py | 84dacb4426c7a87aa171291005e233ea32317e16 | [] | no_license | AryanGanotra07/DSALGO | 0ee86bbca4b345d21f5d6eb60d96c7aff6f1fc93 | 8cbac991ceec43522a57c65d68f00b54ccb6ea3f | refs/heads/master | 2022-12-08T22:33:14.031636 | 2020-09-13T14:05:41 | 2020-09-13T14:05:41 | 283,800,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import sys
sys.setrecursionlimit(15000000)
def solve(a, b):
if a is None:
return
b.append(a.val)
solve(a.left, b)
solve(a.right, b)
def solve(a):
if a is None or (a.left is None and a.right is None):
return None
if a.left is not None:
solve(a.left)
tmp = a.right
a.right = a.left
a.left = None
b = a.right
while b.right is not None:
b= b.right
b.right = tmp
solve(a.right)
class Solution:
# @param A : root node of tree
# @return the root node in the tree
def flatten(self, A):
solve(A)
return A
# b = []
# solve(A, b)
# r = TreeNode(b[0])
# n = r
# for i in range(1, len(b)):
# n.right = TreeNode(b[i])
# n = n.right
# return r
| [
"aryanganotra7@gmail.com"
] | aryanganotra7@gmail.com |
d2d035ab631e4aacef58458847bb44fb566439df | 7b9c326abb4ad6656e684ce0bd94df3f1433aeaf | /blog/urls.py | d6097cc9b810cc366aabb1845d64a143f2d3c06b | [] | no_license | DavidSunley91031341/Django-Project | 3ab815b36c0aec9a345fb60873c877beb2568e63 | 506580331b0cf113485bf0e53db37a1d36216efd | refs/heads/master | 2020-06-13T16:03:04.166582 | 2019-07-02T08:34:05 | 2019-07-02T08:34:05 | 194,703,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django.urls import path
from .views import (
PostListView,
PostDetailView,
PostCreateView,
PostUpdateView,
PostDeleteView,
UserPostListView
)
from . import views
urlpatterns = [
path('', views.home, name='blog-home'),
path('post/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('order', PostCreateView.as_view(), name='post-create'),
]
| [
"noreply@github.com"
] | DavidSunley91031341.noreply@github.com |
9bba1997d99d77cb0ada3b6c36fe8ba255ca1ebf | c74b24d93f3c13bb91ead1416b408adedd3fb34b | /evaluate.py | 8585902ce1d1e675a3a4babdc75f0165e9e3f732 | [] | no_license | Muhanned/COVID19 | 61249694e49603cce990c5f3980800eff5000658 | b73952eb081c93d2975394a429d39675b42a7735 | refs/heads/master | 2022-11-06T05:50:31.953031 | 2020-07-04T06:00:36 | 2020-07-04T06:00:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,516 | py | import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import json
import math
from vis.visualization import visualize_cam, visualize_saliency, overlay
from vis.utils.utils import load_img, normalize, find_layer_idx
from keras.models import load_model, Model
from sklearn.metrics import auc, precision_recall_curve, roc_curve, confusion_matrix, roc_auc_score
from sklearn import manifold
import pandas
from config import config
sns.set()
def load(filepath):
return load_model(filepath)
def get_results(model, data):
results = model.predict_generator(data, steps=math.ceil(len(data)/config.BATCH_SIZE))
data.reset()
return results
def transform_binary_probabilities(results):
probabilities = results.flatten()
return probabilities
def transform_binary_predictions(results):
predictions = 1 * (results.flatten() > 0.5)
return predictions
def get_labels(data):
return data.classes
def calculate_accuracy_loss(model, data):
loss, accuracy = model.evaluate_generator(data, steps=math.ceil(len(data)/config.BATCH_SIZE))
return loss, accuracy
def calculate_precision_recall_curve(labels, results):
"""
restricted to binary classifications
returns precision, recall, thresholds
"""
probabilities = transform_binary_probabilities(results)
precision, recall, _ = precision_recall_curve(labels, probabilities)
return precision, recall
def calculate_average_precision(labels, results):
"""
restricted to binary classifications
returns
"""
probabilities = transform_binary_probabilities(results)
average_precision = average_precision_score(labels, probabilities)
return average_precision
def calculate_roc_curve(labels, probabilities):
"""
restricted to binary classifications
returns false positive rate, true positive rate
"""
fpr, tpr , _ = roc_curve(labels, probabilities)
return fpr, tpr
def calculate_confusion_matrix(labels, results):
"""
returns a confusion matrix
"""
predictions = transform_binary_predictions(results)
return confusion_matrix(labels, predictions)
def calculate_confusion_matrix_stats(labels, results):
confusion_matrix = calculate_confusion_matrix(labels, results)
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
Acc = (TN + TP)/(TN + TP + FN + FP)
return {
"Acc": Acc,
"TP": TP,
"TN": TN,
"FP": FP,
"FN": FN,
"TPR": TPR,
"TNR": TNR,
"PPV": PPV,
"NPV": NPV,
"FPR": FPR,
"FNR": FNR,
"FDR": FDR,
"AM": (TPR+TNR)/2,
"GM": np.sqrt(TPR*TNR),
}
def calculate_confusion_matrix_predictions(labels, predictions):
"""
returns a confusion matrix
"""
return confusion_matrix(labels, predictions)
def calculate_confusion_matrix_stats_predictions(labels, predictions):
confusion_matrix = calculate_confusion_matrix_predictions(labels, predictions)
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP/(TP+FN)
# Specificity or true negative rate
TNR = TN/(TN+FP)
# Precision or positive predictive value
PPV = TP/(TP+FP)
# Negative predictive value
NPV = TN/(TN+FN)
# Fall out or false positive rate
FPR = FP/(FP+TN)
# False negative rate
FNR = FN/(TP+FN)
# False discovery rate
FDR = FP/(TP+FP)
Acc = (TN + TP)/(TN + TP + FN + FP)
return {
"Acc": Acc,
"TP": TP,
"TN": TN,
"FP": FP,
"FN": FN,
"TPR": TPR,
"TNR": TNR,
"PPV": PPV,
"NPV": NPV,
"FPR": FPR,
"FNR": FNR,
"FDR": FDR,
"AM": (TPR+TNR)/2,
"GM": np.sqrt(TPR*TNR),
}
def calculate_pr_auc(labels, results):
precision, recall = calculate_precision_recall_curve(labels, results)
return auc(recall, precision)
def plot_precision_recall(labels, results, experts=[]):
precision, recall = calculate_precision_recall_curve(labels, results)
auc = calculate_pr_auc(labels, results)
stats = calculate_confusion_matrix_stats(labels, results)
points = [{
"name": "model default",
"precision": stats["PPV"][1],
"recall": stats["TPR"][1],
}]
if len(experts) > 0:
points = [
*points, *[{
"name": e["name"],
"precision": e["PPV"][1],
"recall": e["TPR"][1],
} for e in experts]
]
fig, ax = plt.subplots()
sns.scatterplot(
data=pandas.DataFrame(points),
x="recall",
y="precision",
hue="name",
ax=ax)
ax.step(recall, precision)
ax.set_ylim(-0.04, 1.04)
ax.set_xlim(-0.04, 1.04)
ax.text(
1,
0,
s="auc={:.2f}".format(auc),
horizontalalignment='right',
verticalalignment='bottom')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return fig
def plot_roc_curve(labels, probabilities, experts=[], name="model"):
auc = roc_auc_score(labels, probabilities)
fig, ax = plt.subplots()
if len(experts) > 0:
experts_data = pandas.DataFrame([{
"name": e["name"],
"FPR": e["FPR"][1],
"TPR": e["TPR"][1],
} for e in experts ])
sns.scatterplot(data=experts_data, x="FPR", y="TPR", hue="name", ax=ax)
fpr, tpr = calculate_roc_curve(labels, probabilities)
ax.plot([0, 1], [0, 1], linestyle='--')
ax.plot(fpr, tpr)
ax.text(1, 0, s="auc={:.3f}".format(auc), horizontalalignment='right', verticalalignment='bottom')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return fig
def plot_confusion_matrix(data, results):
fig, ax = plt.subplots()
confusion_matrix = calculate_confusion_matrix(get_labels(data), results)
labels = list(data.class_indices.keys())
labels.sort()
sns.heatmap(
confusion_matrix,
annot=True,
cmap="YlGnBu",
yticklabels=labels,
xticklabels=labels,
ax=ax,
fmt='g',
)
plt.xlabel('prediction', axes=ax)
plt.ylabel('label', axes=ax)
return fig
def plot_confusion_matrix_ensemble(labels, predictions, class_labels):
fig, ax = plt.subplots()
cm_data = confusion_matrix(labels, predictions)
sns.heatmap(
cm_data,
annot=True,
cmap="YlGnBu",
yticklabels=class_labels,
xticklabels=class_labels,
ax=ax,
fmt='g',
)
plt.xlabel('prediction', axes=ax)
plt.ylabel('label', axes=ax)
return fig
def plot_tsne(model, layer_name, data, labels, fieldnames=None, perplexity=5):
figures = list()
intermediate_layer_model = Model(
inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model.predict_generator(data, steps=math.ceil(len(data)/config.BATCH_SIZE))
embedding = manifold.TSNE(
perplexity=perplexity).fit_transform(intermediate_output)
for i, label in enumerate(labels):
labelname = "label"
if fieldnames is not None:
labelname = fieldnames[i]
pd = pandas.DataFrame.from_dict({
"x": [d[0] for d in embedding],
"y": [d[1] for d in embedding],
labelname: label,
})
fig, ax = plt.subplots()
sns.scatterplot(
x="x",
y="y",
data=pd,
hue=labelname,
hue_order=np.unique(label),
ax=ax)
ax.axis('off')
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
figures.append(fig)
plt.show()
return figures
from vis.utils import utils
import keras
def plot_grad_cam(image_file, model, layer, filter_idx=None, backprop_modifier="relu"):
img = np.load(image_file)
image = np.stack((img,img,img),axis=2)
layer_idx = utils.find_layer_idx(model, 'dense_6')
model.layers[layer_idx].activation = keras.activations.linear
model = utils.apply_modifications(model)
penultimate_layer_idx = utils.find_layer_idx(model, layer)
#image = image[:,:,:]
print(image.shape)
class_idx = 1
seed_input = image/255
grad_top1 = visualize_cam(model, layer_idx, filter_indices = filter_idx,seed_input = seed_input,
penultimate_layer_idx=penultimate_layer_idx, # None,
backprop_modifier=None,
grad_modifier=None)
fig, ax = plt.subplots(1, 2)
ax[0].imshow(overlay(grad_top1,image),cmap="jet", alpha=0.8)
ax[0].axis('off')
ax[1].imshow(image)
ax[1].axis('off')
return fig
def plot_multiple_grad_cam(
images,
model,
layer,
penultimate_layer=None,
filter_idx=None,
backprop_modifier=None,
grad_modifier=None,
experts=None,
expert_spacing=0.1,
):
rows = 2
if experts is not None:
rows = 3
fig, ax = plt.subplots(
rows, len(images), figsize=(4 * len(images), 4 * rows))
ax = ax.flatten()
penultimate_layer_idx = None
if penultimate_layer:
penultimate_layer_idx = find_layer_idx(model, penultimate_layer)
for i, filename in enumerate(images):
image = load_img(
filename, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
ax[i].imshow(image)
ax[i].axis('off')
for i, filename in enumerate(images):
image = load_img(
filename, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
grad = visualize_cam(
model,
find_layer_idx(model, layer),
filter_idx,
normalize(image),
penultimate_layer_idx=penultimate_layer_idx,
backprop_modifier=backprop_modifier,
grad_modifier=grad_modifier)
ax[i + len(images)].imshow(overlay(grad, image))
ax[i + len(images)].axis('off')
if experts:
for i, filename in enumerate(images):
for j, expert in enumerate(experts):
if i == 0:
message = "expert {}: {}".format(j + 1, expert[i])
ax[i + 2 * len(images)].text(
0.3,
1 - (expert_spacing * j),
message,
horizontalalignment='left',
verticalalignment='center')
else:
message = "{}".format(expert[i])
ax[i + 2 * len(images)].text(
0.5,
1 - (expert_spacing * j),
message,
horizontalalignment='center',
verticalalignment='center')
ax[i + 2 * len(images)].axis('off')
return fig, ax
def plot_multiple_saliency(images, model, layer, filter_idx=None, backprop_modifier=None, grad_modifier=None):
fig, ax = plt.subplots(2, len(images), figsize=(4 * len(images), 4))
ax = ax.flatten()
for i, filename in enumerate(images):
image = load_img(filename, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
ax[i].imshow(image)
ax[i].axis('off')
for i, filename in enumerate(images):
grad = visualize_saliency(model, find_layer_idx(model, layer), filter_idx, normalize(image), backprop_modifier=backprop_modifier, grad_modifier=grad_modifier)
image = load_img(filename, target_size=(config.IMAGE_SIZE, config.IMAGE_SIZE))
ax[i + len(images)].imshow(overlay(grad, image))
ax[i + len(images)].axis('off')
return fig
| [
"robinwang08@gmail.com"
] | robinwang08@gmail.com |
cba39aeeb90b4394b5e1603acec6f8d9f3631202 | 653e6ef1b55cd83dabd7576e8ecfe5cd4b53c223 | /drawing.py | 2f76af7acf02a149c535218c35b1195bcd7047d2 | [] | no_license | shkodaaniram/DR | a07626eb1e755849bb4eb2b5357343606d02624f | 1edd125ec02886800fa2d117befed8d5e9e1f348 | refs/heads/master | 2021-01-19T09:18:17.551481 | 2017-08-02T19:31:22 | 2017-08-02T19:31:22 | 82,100,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | import cv2
import numpy as np
def minimized_od_field(img, x1, y1, x2, y2, img_name):
test_img = img
width = img.shape[1]
height = img.shape[0]
cv2.line(test_img, (0, y1), (width, y1), color=(0, 255, 0), thickness=5)
cv2.line(test_img, (0, y2), (width, y2), color=(0, 255, 0), thickness=5)
cv2.line(test_img, (x1, 0), (x1, height), color=(0, 255, 0), thickness=5)
cv2.line(test_img, (x2, 0), (x2, height), color=(0, 255, 0), thickness=5)
cv2.imwrite(img_name, test_img)
def join_pictures_horizontally(img1, img2, num):
width = max(img1.shape[0], img2.shape[0])
height = max(img1.shape[1], img2.shape[1])
tmp = np.zeros((width, height), np.uint8)
tmp2 = np.zeros((width, height), np.uint8)
tmp[:img2.shape[0], :img2.shape[1]] = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
tmp2[:img1.shape[0], :img1.shape[1]] = img1
vis = np.concatenate((tmp2, tmp), axis=1)
cv2.imwrite('concatenation_' + num + '.png', vis)
return vis | [
"shkodaaniram@rambler.ru"
] | shkodaaniram@rambler.ru |
1cf57d3750f0ccb24547e63287695969cfd1b8b1 | 844f02cd5f52eb40c8bceaeb0fe6b4100d26b326 | /cdt.py | bdd9b2e28a30f2034cc1b0adc63bbfff6db08739 | [] | no_license | cream/tools | e1a382d46b01886fd794e979e4b91284ccf52c6e | 62d8e9c848821f86fb7d1239b7960c69861e30e8 | refs/heads/master | 2021-01-15T13:18:28.212623 | 2013-07-13T08:26:54 | 2013-07-13T08:26:54 | 457,080 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,082 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import optparse
import tempfile
import tarfile
import os
import sys
import shutil
import dulwich.repo
import dulwich.client
IGNORED_FILES = [
'.git',
'.gitignore'
]
def git_clone(remote, local):
client, host_path = dulwich.client.get_transport_and_path(remote)
if not os.path.exists(local):
os.mkdir(local)
r = dulwich.repo.Repo.init(local)
remote_refs = client.fetch(host_path, r,
determine_wants=r.object_store.determine_wants_all,
progress=sys.stdout.write)
r["HEAD"] = remote_refs["HEAD"]
class CreamDevelopmentTool(object):
def __init__(self):
parser = optparse.OptionParser()
options, args = parser.parse_args()
command = getattr(self, args[0])
command(*args[1:])
def release(self, target, repo, version):
print " » Cloning repository…"
tmp = tempfile.mkdtemp(prefix='cdt-')
git_clone(repo, tmp)
print " » Checking out working directory…"
cwd = os.getcwd()
os.chdir(tmp)
os.system('git checkout master')
os.chdir(cwd)
print " » Removing files related to the repository…"
for f in IGNORED_FILES:
print " » Removing '{0}'…".format(f)
if os.path.isdir(os.path.join(tmp, f)):
shutil.rmtree(os.path.join(tmp, f))
elif os.path.isfile(os.path.join(tmp, f)):
os.remove(os.path.join(tmp, f))
print " » Creating the tarball…"
tarball_name = '{0}-{1}.tar.bz2'.format(target, version)
tarball_path = os.path.join('/tmp', tarball_name)
tarball = tarfile.TarFile.open(tarball_path, 'w:bz2')
tarball.add(tmp, arcname='{0}-{1}'.format(target, version))
tarball.close()
print " » Removing temporary files…"
shutil.rmtree(tmp)
print " → You may find the tarball in '{0}'…".format(tarball_path)
if __name__ == '__main__':
CreamDevelopmentTool()
| [
"stein@cream-project.org"
] | stein@cream-project.org |
4531c63aed8b999a043f6ed11748ea0b9b240dab | d51d85dba14c26e75e07ee91da966167ad6782bd | /firm/apps.py | a3a2277c7b4e0f8b7591e6a883f4bf9d8b3a4649 | [] | no_license | TimBerk/studhunter | 569b6bf6d4a9ab0acbd2145f827543b6a0341800 | 8c37aad24b448d64ffeb7e7d8fbed63d97d4e659 | refs/heads/master | 2023-01-19T16:35:31.076450 | 2020-11-24T06:27:40 | 2020-11-24T06:27:40 | 315,214,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from django.apps import AppConfig
class FirmConfig(AppConfig):
name = 'firm'
verbose_name = 'Раздел фирмы'
| [
"lysak.tatiana2501@ya.ru"
] | lysak.tatiana2501@ya.ru |
8aaa1f1cdbe2605fc4372c493b89d4c209ed2a00 | cec176ec2ff4b710a11ac72055bb7618c34d0709 | /chainerLayers/cukernels/hotFunctions.py | 09ab65676cc0f081e79da30b9bacc62a78e83ed8 | [] | no_license | bordingj/chainer_layers | 4dd8a3cf864f39559bbaa60225db26fd1c20fa60 | 719a985e611e1599fba944ecacf4242e055a9cfc | refs/heads/master | 2021-01-22T01:00:00.136618 | 2015-08-10T13:48:04 | 2015-08-10T13:48:04 | 40,472,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,188 | py | import numpy as np
from chainerLayers.cukernels.utils import Get_bdim_and_gdim1D, Get_bdim_and_gdim2D
from chainerLayers.cukernels.utils import Get_bdim_and_gdimRowVec, Get_bdim_and_gdimSmallNBigM
try:
from pycuda.compiler import SourceModule
from chainer import cuda
HotDot1_code = SourceModule("""
__global__
void HotDot1(float* a, float* out, int* indices,
int K, int N, int H, int D, int B)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < H){
int n = i+i*(H-1)+j;
if(B){
out[n] = 0;
}
for (int k=0;k<K;k++){
int idx = indices[i+i*(K-1)+k];
out[n] += a[j+j*(D-1)+idx];
}
}
}
""")
HotDot1_kernel = HotDot1_code.get_function("HotDot1")
HotDot1_kernel.prepare("PPPiiiii")
HotDot2_code = SourceModule("""
__global__
void HotDot2(float* a, float* out, int* indices,
int N, int H, int D, int B)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < N && j < H){
int n = i+i*(H-1)+j;
int idx = indices[i];
if (B){
out[n] = a[j+j*(D-1)+idx];
}else{
out[n] += a[j+j*(D-1)+idx];
}
}
}
""")
HotDot2_kernel = HotDot2_code.get_function("HotDot2")
HotDot2_kernel.prepare("PPPiiii")
DotHot1_code = SourceModule("""
__global__
void DotHot1(float* a, float* out, int* indices,
int K, int N, int H, int D, int B)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < H){
for (int i=0;i<N;i++){
for (int k=0;k<K;k++){
int idx = indices[i+i*(K-1)+k];
out[j+j*(D-1)+idx] += a[i+i*(H-1)+j];
}
}
}
}
""")
DotHot1_kernel = DotHot1_code.get_function("DotHot1")
DotHot1_kernel.prepare("PPPiiiii")
DotHot2_code = SourceModule("""
__global__
void DotHot2(float* a, float* out, int* indices,
int N, int H, int D, int B)
{
int j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < H){
for (int i=0;i<N;i++){
int idx = indices[i];
out[j+j*(D-1)+idx] += a[i+i*(H-1)+j];
}
}
}
""")
DotHot2_kernel = DotHot2_code.get_function("DotHot2")
DotHot2_kernel.prepare("PPPiiii")
except:
pass
def hotdot(a, indices, out=None, dont_add=False):
"""
In:
a: a pycuda gpuarray
indices: hot indices a K-hot encoded matrix
out:
out: x.dot(a.T), where x is a K-hot encoded matrix
"""
H, D = a.shape
N, K = indices.shape
if N == 1:
bdim, gdim = Get_bdim_and_gdimRowVec(H)
elif H >= (N*4):
bdim, gdim = Get_bdim_and_gdimSmallNBigM(N,H)
else:
bdim, gdim = Get_bdim_and_gdim2D(N,H)
if dont_add:
B = np.int32(1)
else:
B = np.int32(0)
if out is None:
out = cuda.empty((N,H), dtype=np.float32)
B = np.int32(1)
if K > 1:
HotDot1_kernel.prepared_call(gdim, bdim,
a.gpudata, out.gpudata, indices.gpudata,
np.int32(K), np.int32(N), np.int32(H), np.int32(D), np.int32(B))
else:
HotDot2_kernel.prepared_call(gdim, bdim,
a.gpudata, out.gpudata, indices.gpudata,
np.int32(N), np.int32(H), np.int32(D), np.int32(B))
return out
def dothot(a, indices, in_size, out=None, dont_add=False):
"""
In:
a: a numpy array
indices: hot indices a K-hot encoded matrix
out:
out: a.T.dot(x), where x is a K-hot encoded matrix
"""
N, H = a.shape
_N, K = indices.shape
if _N != N:
raise ValueError( 'a.shape[0] != idx.shape[0]' )
bdim, gdim = Get_bdim_and_gdim1D(H)
if dont_add:
B = np.int32(1)
else:
B = np.int32(0)
if out is None:
out = cuda.zeros((H,in_size), dtype=np.float32)
if K > 1:
DotHot1_kernel.prepared_call(gdim, bdim,
a.gpudata, out.gpudata, indices.gpudata,
np.int32(K), np.int32(N), np.int32(H), np.int32(in_size), np.int32(B))
else:
DotHot2_kernel.prepared_call(gdim, bdim,
a.gpudata, out.gpudata, indices.gpudata,
np.int32(N), np.int32(H), np.int32(in_size), np.int32(B))
return out
| [
"bordingj@gmail.com"
] | bordingj@gmail.com |
0493e172f6cf409c1e77e364d48edf7b2e0f0c07 | 1ffa9ad2f07bc09c6eaf9fdb7ff8be21926078aa | /player.py | 7b50ce7c77260ba692673f62146927c944ee9871 | [] | no_license | Aganthor/FlappyClone | 9513c19e72c5053b53d21ad86ea2695417951a8b | a2eaa5791f7caa74bf81ca2c77bff051a6f261f3 | refs/heads/master | 2021-01-06T05:39:39.067597 | 2020-08-24T16:59:22 | 2020-08-24T16:59:22 | 241,225,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,404 | py | import pygame as pg
import os
import constants
class Player(pg.sprite.Sprite):
"""
Simple class to represent our little user controlled plane in the
game.
"""
MOVE_SPEED = 5
MAX_LIVES = 3
def __init__(self):
super(Player, self).__init__()
self.number_of_lives = self.MAX_LIVES
self.images = []
for i in range(3):
img = pg.image.load(os.path.join('assets/images/', 'planeRed' + str(i + 1) + '.png')).convert()
img.convert_alpha()
img.set_colorkey((0, 0, 0))
self.images.append(img)
self.current_index = 0
self.image = self.images[self.current_index]
self.rect = self.image.get_rect()
self.rect.topleft = (0, (constants.SCREEN_HEIGHT + constants.SCORE_SURFACE_HEIGHT) // 2)
self.score = 0
def update(self):
self.current_index += 1
if self.current_index >= len(self.images):
self.current_index = 0
self.image = self.images[self.current_index]
if self.rect.top < constants.SCORE_SURFACE_HEIGHT: # Can't go in the score display surface.
self.rect.top = constants.SCORE_SURFACE_HEIGHT
if self.rect.bottom >= constants.SCREEN_HEIGHT: # Clip so that the plane can't go below the max...
self.rect.bottom = constants.SCREEN_HEIGHT
def handle_input(self, pressed_keys):
if pressed_keys[pg.K_LEFT]:
self.rect.move_ip(-self.MOVE_SPEED, 0)
elif pressed_keys[pg.K_RIGHT]:
self.rect.move_ip(self.MOVE_SPEED, 0)
elif pressed_keys[pg.K_UP]:
self.rect.move_ip(0, -self.MOVE_SPEED)
elif pressed_keys[pg.K_DOWN]:
self.rect.move_ip(0, self.MOVE_SPEED)
if pressed_keys[pg.K_s]:
self.score += 10
def get_score(self):
return self.score
def get_lives(self):
return self.number_of_lives
def reset(self):
self.number_of_lives = self.MAX_LIVES
self.score = 0
self.rect.topleft = (0, (constants.SCREEN_HEIGHT + constants.SCORE_SURFACE_HEIGHT) // 2)
def player_death(self):
self.number_of_lives -= 1
if self.number_of_lives < 1:
return True
else:
return False
def reset_position(self):
self.rect.topleft = (0, (constants.SCREEN_HEIGHT + constants.SCORE_SURFACE_HEIGHT) // 2)
| [
"psyluche@gmail.com"
] | psyluche@gmail.com |
ee8f01d9ff709d7f69ae6cebd6b938844bdd5ee8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03290/s821428254.py | d06c09c404c991b8d3ef901f9f4a5f494259ea0e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from math import ceil
from itertools import product
D,G=map(int,input().split())
pc=[list(map(int,input().split())) for _ in range(D)]
def calc(P):
count = 0
g = G
for i in range(D):
p,c = pc[-1-i]
if P[-1-i]==1:
count += p
g -= (D-i)*100*p+c
if g<=0:
return count
for i in range(D):
p,c = pc[-1-i]
if P[-1-i]==0:
tmp = min(p-1,ceil(g/((D-i)*100)))
count += tmp
g -= (D-i)*100*tmp
if g<=0:
return count
return -1
MIN=10**9
for P in product(range(2),repeat=D):
tmp = calc(P)
if tmp != -1 and tmp < MIN: MIN=tmp
print(MIN) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
336adee3484b679923d2a78aa7da124b2b8993fb | 8a346fd556c74454c5c65181d213f851de57269a | /NeuralNetworkConti/Test.py | 80f9c660cf2bd7dee49b37493931838bb6b4a9c0 | [] | no_license | Suraj25P/fyp | 5d0755e9aeae909ed074bdff60591201162ca2ad | ce7befe622ec2d88adca0682037bd7cf7a2d711b | refs/heads/master | 2022-11-11T10:13:16.083807 | 2020-07-04T14:45:29 | 2020-07-04T14:45:29 | 277,127,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | import tensorflow as tf
data =[689,22,35334575,2692,12967]
reg1 = tf.keras.models.load_model('G:\\distributedscheduling-master\\NeuralNetworkConti\\NNCONTIDICloud.model')
pred = reg1.predict([[data]])
print(pred.argmax())
| [
"noreply@github.com"
] | Suraj25P.noreply@github.com |
230cf400a6556c6d1b655d5c6cd8481c5ad98c61 | 2ade4dbe9db122c5e7ac98e614e71a268303c122 | /test.py | b9404713abe13138c29854d8abf0342e43145aea | [] | no_license | rossspencer715/Red-Car-Finder-KNN- | 128edba6f6696cd51f91a5de7667628fbf84f25a | 5523e9194c7fc61e455e4434d4a60d6fac7b3415 | refs/heads/master | 2020-09-26T11:39:19.003687 | 2019-12-06T04:43:07 | 2019-12-06T04:43:07 | 226,247,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 9 11:04:16 2018
@author: rossspencer
"""
'=============== Imports =============='
import numpy as np
from matplotlib import pyplot as plt
import train as trainingFunctions
'a *function* that will run your testing code on an input data set X. Your test.py code should already be trained'
def findAllRedCarsTest(Y, X, x1, x2, y1, y2, stepsize):
clf_trained = trainingFunctions.train(X)
coords = []
testingArr = []
for i in range(x1, x2, stepsize):
for j in range(y1, y2, stepsize):
coords.append([i,j])
testingArr.append(Y[i, j])
predictions = clf_trained.predict(testingArr)
##### this code is used to generate plots of the predicted red cars against the actual test image:
plt.imshow(Y)
for i in range(len(predictions)):
if predictions[i] == 2:
plt.plot([coords[i][0]], [coords[i][1]],'o')
plt.xlim(x1, x2)
plt.ylim(y1, y2)
plt.show()
red_cars = []
for i in range(len(predictions)):
if predictions[i] == 2:
red_cars.append(coords[i])
return red_cars
| [
"rossspencer715@gmail.com"
] | rossspencer715@gmail.com |
22aaab1d01e9b4364424bcd7d47493c3d07f2aee | 4be3afb98e5469aa397aa03047baca5d78ee5a11 | /sellers/sellers_api/serializers.py | 58f39a331d846dc804027fb16e72aae13d52abf4 | [] | no_license | matheusreis0/sellers-api | 3fbe7a18df116e217cc66b24d44e6c9d1ed23fe4 | a11b7a8340c5aff1709aa7f739ab58b83ace37f4 | refs/heads/master | 2022-12-08T08:07:12.996587 | 2020-08-26T19:47:49 | 2020-08-26T19:47:49 | 290,553,538 | 0 | 0 | null | 2020-08-28T19:11:56 | 2020-08-26T16:50:37 | Python | UTF-8 | Python | false | false | 187 | py | from rest_framework import serializers
from .models import Seller
class SellerSerializer(serializers.ModelSerializer):
class Meta:
model = Seller
fields = '__all__'
| [
"matheus.reis@olist.com"
] | matheus.reis@olist.com |
9eb55066e90e41113b25a63cdb33625081d846da | 8157a8f899769458cc07d774ab59a837f21d4f25 | /playground/timer/read_local.py | ecfb183cd514e870f7f1aeb29efca4a68a446dff | [] | no_license | fu-ilab-swp18/bgp-group | 17be7d8f56fa7d7af922137abd673bf17fd0d116 | 17362761f5faed0bf230f4b67c6b75cf901480cc | refs/heads/master | 2020-03-14T12:04:14.639440 | 2018-07-25T22:19:12 | 2018-07-25T22:19:12 | 131,603,647 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import time
# Open the file with read only permit
f = open('/Users/mx/Projects/Uni/bgp-group/bgp_dump.txt')
# use readline() to read the first line
t = time.time()
line = f.readline()
while line:
line = f.readline()
f.close()
print('Elapsed Time:', time.time() - t )
| [
"max.stauss@gmail.com"
] | max.stauss@gmail.com |
df353c1470c57f958c87b2ae52a0e9ab0e2f6a95 | 959a20c87bdcf3a5fbd1d83546dad0d6af96a77f | /week_one/user_input.py | cb1e434e88d9e46fe88aa85f4f429f5fd52a7538 | [] | no_license | tylerLoh/onemonthpython | 2131a9befc8c1d8c08a69aa9f812cfcfa9687fa9 | 02e916c25fb3583bc4563d7ae19700e3d5009f8b | refs/heads/master | 2020-11-24T07:37:20.396219 | 2019-12-22T18:30:32 | 2019-12-22T18:30:32 | 228,032,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | """To capture user input from propmt terminal
built-int input() mtehod
"""
name = input("What's your name? ")
age = float(input("How old are you? "))
age_in_dog_years = age * 7
age_in_dog_years = int(age_in_dog_years) \
if age_in_dog_years % 1 == 0 \
else age_in_dog_years
print(f"{name} you are {age_in_dog_years} in dog years. Woof!")
| [
"tyleryin811@gmail.com"
] | tyleryin811@gmail.com |
bcf4f86043750b69eed83dad9603a1d113b66fb7 | a176f3705c92ec1974ada17af2a891e0bf763b97 | /core/get_input.py | 21fcd1c9c836417783cbba4c9450586ea9653811 | [
"Apache-2.0"
] | permissive | indrajithbandara/OWASP-Nettacker | 593bdf5426606e67a94e447b4a9534bf79b0396b | 087ce32f06758db03039a34e6e32fbc57fb4ffef | refs/heads/master | 2021-05-08T14:22:54.520184 | 2018-01-27T20:34:59 | 2018-01-27T20:34:59 | 120,082,592 | 1 | 0 | null | 2018-02-03T10:49:34 | 2018-02-03T10:49:33 | null | UTF-8 | Python | false | false | 514 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from core.compatible import version
from core.alert import __input_msg
def __input(msg, default):
if version() is 2:
try:
data = raw_input(__input_msg(msg))
if data == '':
data = default
except:
data = default
else:
try:
data = input(__input_msg(msg))
if data == '':
data = default
except:
data = default
return data
| [
"ali.razmjoo@owasp.org"
] | ali.razmjoo@owasp.org |
ec0fe22ab52822601adcf965f531dec7895c63aa | 66fb5bbf3cd0f2c7b00db7081271c376812b68dd | /control_planner/scripts/purepursuit.py | c4385ac0777d26b0a19d879eb7b8d5ef81f9ef78 | [] | no_license | freesloth/wecar_2 | d5e95ae67d65bcd78a60ceae95a48161656e4fab | c05888cc70ddd775a3151b722db06aa41705f6b9 | refs/heads/master | 2023-01-22T15:47:39.505150 | 2020-12-10T03:29:36 | 2020-12-10T03:29:36 | 278,523,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,179 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import rospkg
from sensor_msgs.msg import LaserScan,PointCloud,Imu
from std_msgs.msg import Float64
from vesc_msgs.msg import VescStateStamped
from laser_geometry import LaserProjection
from math import cos,sin,pi,sqrt,pow,atan2
from geometry_msgs.msg import Point32,PoseStamped, Point32,PoseStamped,Point, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry,Path
import tf
from tf.transformations import euler_from_quaternion,quaternion_from_euler
class pure_pursuit:
def __init__(self):
rospy.init_node("make_path", anonymous=True)
rospy.Subscriber("path", Path, self.path_callback)
rospy.Subscriber("odom",Odometry, self.odom_callback)
# rospy.Subscriber("/amcl_pose",PoseWithCovarianceStamped, self.amcl_callback)
self.motor_pub=rospy.Publisher('commands/motor/speed',Float64,queue_size=1)
self.servo_pub=rospy.Publisher('commands/servo/position',Float64,queue_size=1)
self.motor_msg=Float64()
self.servo_msg=Float64()
self.is_path=False
self.is_odom=False
self.is_amcl=False
self.forward_point=Point()
self.current_position=Point()
self.is_look_forward_point=False
self.vehicle_length=0.5
self.lfd=0.5
self.steering=0
self.steering_angle_to_servo_gain=-1.2135
self.steering_angle_to_servo_offset=0.5304
rate=rospy.Rate(30)
while not rospy.is_shutdown():
if self.is_path==True and(self.is_odom==True or self.is_amcl==True):
vehicle_position=self.current_position
rotated_point=Point()
self.is_look_forward_point=False
for num,i in enumerate(self.path.poses):
path_point=i.pose.position
dx=path_point.x-vehicle_position.x
dy=path_point.y-vehicle_position.y
rotated_point.x=cos(self.vehicle_yaw)*dx+sin(self.vehicle_yaw)*dy
rotated_point.y=sin(self.vehicle_yaw)*dx-cos(self.vehicle_yaw)*dy
if rotated_point.x>0:
dis=sqrt(pow(rotated_point.x,2)+pow(rotated_point.y,2))
if dis>=self.lfd:
self.forward_point=path_point
self.is_look_forward_point=True
break
theta=-atan2(rotated_point.y,rotated_point.x)
if self.is_look_forward_point:
self.steering=atan2((2*self.vehicle_length*sin(theta)),self.lfd) #rad
print(self.steering*180/pi)
self.motor_msg.data=2000
else:
self.steering=0
print("no found forward point")
self.motor_msg.data=0
self.steering_command=(self.steering_angle_to_servo_gain*self.steering)+self.steering_angle_to_servo_offset
self.servo_msg.data=self.steering_command
self.servo_pub.publish(self.servo_msg)
self.motor_pub.publish(self.motor_msg)
rate.sleep()
def path_callback(self,msg):
self.is_path=True
self.path=msg
def odom_callback(self,msg):
self.is_odom=True
odom_quaternion=(msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,msg.pose.pose.orientation.z,msg.pose.pose.orientation.w)
_,_,self.vehicle_yaw=euler_from_quaternion(odom_quaternion)
self.current_position.x=msg.pose.pose.position.x
self.current_position.y=msg.pose.pose.position.y
def amcl_callback(self,msg):
self.is_amcl=True
amcl_quaternion=(msg.pose.pose.orientation.x,msg.pose.pose.orientation.y,msg.pose.pose.orientation.z,msg.pose.pose.orientation.w)
_,_,self.vehicle_yaw=euler_from_quaternion(amcl_quaternion)
self.current_position.x=msg.pose.pose.position.x
self.current_position.y=msg.pose.pose.position.y
if __name__ == '__main__':
try:
test_track=pure_pursuit()
except rospy.ROSInterruptException:
pass
| [
"you@example.com"
] | you@example.com |
6fb892e8583fa7eb8e4ec468bbb83d3a7077a633 | fec9d5e77fe0248853c11084cb27ed3b1dcb500c | /users/arxiv/users/legacy/tests/test_endorsements.py | f38ff79e7bc0d63bd241498e861451266964fc1c | [
"MIT"
] | permissive | SamanthaFeidFischer/arxiv-auth | bb1c41f018a3573fa1cbc7b5def9532deb6bdd9c | 08df4e0196a04a06eac1d26477b3ad56ebf56f08 | refs/heads/develop | 2021-06-13T09:58:48.592901 | 2018-09-12T21:31:04 | 2018-09-12T21:31:04 | 148,851,802 | 1 | 0 | MIT | 2021-04-29T19:40:12 | 2018-09-14T23:37:49 | Python | UTF-8 | Python | false | false | 11,057 | py | """Tests for :mod:`arxiv.users.legacy.endorsements` using a live test DB."""
import os
from unittest import TestCase, mock
from datetime import datetime
from pytz import timezone
from flask import Flask
from mimesis import Person, Internet, Datetime
from .. import endorsements, util, models
from ... import domain
EASTERN = timezone('US/Eastern')
class TestAutoEndorsement(TestCase):
"""Tests for :func:`get_autoendorsements`."""
def setUp(self):
"""Generate some fake data."""
self.app = Flask('test')
util.init_app(self.app)
self.app.config['CLASSIC_DATABASE_URI'] = 'sqlite:///test.db'
self.app.config['CLASSIC_SESSION_HASH'] = 'foohash'
with self.app.app_context():
util.create_all()
with util.transaction() as session:
person = Person('en')
net = Internet('en')
ip_addr = net.ip_v4()
email = person.email()
approved = 1
deleted = 0
banned = 0
first_name = person.name()
last_name = person.surname()
suffix_name = person.title()
joined_date = util.epoch(
Datetime('en').datetime().replace(tzinfo=EASTERN)
)
db_user = models.DBUser(
first_name=first_name,
last_name=last_name,
suffix_name=suffix_name,
share_first_name=1,
share_last_name=1,
email=email,
flag_approved=approved,
flag_deleted=deleted,
flag_banned=banned,
flag_edit_users=0,
flag_edit_system=0,
flag_email_verified=1,
share_email=8,
email_bouncing=0,
policy_class=2, # Public user. TODO: consider admin.
joined_date=joined_date,
joined_ip_num=ip_addr,
joined_remote_host=ip_addr
)
session.add(db_user)
self.user = domain.User(
user_id=str(db_user.user_id),
username='foouser',
email=db_user.email,
name=domain.UserFullName(
forename=db_user.first_name,
surname=db_user.last_name,
suffix=db_user.suffix_name
)
)
def tearDown(self):
"""Remove the test DB."""
os.remove('./test.db')
def test_invalidated_autoendorsements(self):
"""The user has two autoendorsements that have been invalidated."""
with self.app.app_context():
with util.transaction() as session:
issued_when = util.epoch(
Datetime('en').datetime().replace(tzinfo=EASTERN)
)
session.add(models.DBEndorsement(
endorsee_id=self.user.user_id,
archive='astro-ph',
subject_class='CO',
flag_valid=0,
endorsement_type='auto',
point_value=10,
issued_when=issued_when
))
session.add(models.DBEndorsement(
endorsee_id=self.user.user_id,
archive='astro-ph',
subject_class='CO',
flag_valid=0,
endorsement_type='auto',
point_value=10,
issued_when=issued_when
))
session.add(models.DBEndorsement(
endorsee_id=self.user.user_id,
archive='astro-ph',
subject_class='CO',
flag_valid=1,
endorsement_type='auto',
point_value=10,
issued_when=issued_when
))
session.add(models.DBEndorsement(
endorsee_id=self.user.user_id,
archive='astro-ph',
subject_class='CO',
flag_valid=1,
endorsement_type='user',
point_value=10,
issued_when=issued_when
))
result = endorsements.invalidated_autoendorsements(self.user)
self.assertEqual(len(result), 2, "Two revoked endorsements are loaded")
def test_category_policies(self):
"""Load category endorsement policies from the database."""
with self.app.app_context():
with util.transaction() as session:
session.add(models.DBCategory(
archive='astro-ph',
subject_class='CO',
definitive=1,
active=1,
endorsement_domain='astro-ph'
))
session.add(models.DBEndorsementDomain(
endorsement_domain='astro-ph',
endorse_all='n',
mods_endorse_all='n',
endorse_email='y',
papers_to_endorse=3
))
policies = endorsements.category_policies()
category = domain.Category('astro-ph', 'CO')
self.assertIn(category, policies, "Data are loaded for categories")
self.assertEqual(policies[category]['domain'], 'astro-ph')
self.assertFalse(policies[category]['endorse_all'])
self.assertTrue(policies[category]['endorse_email'])
self.assertEqual(policies[category]['min_papers'], 3)
def test_domain_papers(self):
"""Get the number of papers published in each domain."""
with self.app.app_context():
with util.transaction() as session:
# User owns three papers.
session.add(models.DBPaperOwners(
document_id=1,
user_id=self.user.user_id,
flag_author=0, # <- User is _not_ an author.
valid=1
))
session.add(models.DBDocuments(
document_id=1,
paper_id='2101.00123',
dated=util.epoch(datetime.now(tz=EASTERN))
))
session.add(models.DBDocumentInCategory(
document_id=1,
archive='cs',
subject_class='DL',
is_primary=1
))
session.add(models.DBCategory(
archive='cs',
subject_class='DL',
definitive=1,
active=1,
endorsement_domain='firstdomain'
))
# Here's another paper.
session.add(models.DBPaperOwners(
document_id=2,
user_id=self.user.user_id,
flag_author=1, # <- User is an author.
valid=1
))
session.add(models.DBDocuments(
document_id=2,
paper_id='2101.00124',
dated=util.epoch(datetime.now(tz=EASTERN))
))
session.add(models.DBDocumentInCategory(
document_id=2,
archive='cs',
subject_class='IR',
is_primary=1
))
session.add(models.DBCategory(
archive='cs',
subject_class='IR',
definitive=1,
active=1,
endorsement_domain='firstdomain'
))
# Here's a paper for which the user is an author.
session.add(models.DBPaperOwners(
document_id=3,
user_id=self.user.user_id,
flag_author=1,
valid=1
))
session.add(models.DBDocuments(
document_id=3,
paper_id='2101.00125',
dated=util.epoch(datetime.now(tz=EASTERN))
))
# It has both a primary and a secondary classification.
session.add(models.DBDocumentInCategory(
document_id=3,
archive='astro-ph',
subject_class='EP',
is_primary=1
))
session.add(models.DBDocumentInCategory(
document_id=3,
archive='astro-ph',
subject_class='CO',
is_primary=0 # <- secondary!
))
session.add(models.DBCategory(
archive='astro-ph',
subject_class='EP',
definitive=1,
active=1,
endorsement_domain='seconddomain'
))
session.add(models.DBCategory(
archive='astro-ph',
subject_class='CO',
definitive=1,
active=1,
endorsement_domain='seconddomain'
))
papers = endorsements.domain_papers(self.user)
self.assertEqual(papers['firstdomain'], 2)
self.assertEqual(papers['seconddomain'], 2)
def test_is_academic(self):
"""Determine whether a user is academic based on email."""
ok_patterns = ['%w3.org', '%aaas.org', '%agu.org', '%ams.org']
bad_patterns = ['%.com', '%.net', '%.biz.%']
with self.app.app_context():
with util.transaction() as session:
for pattern in ok_patterns:
session.add(models.DBEmailWhitelist(
pattern=str(pattern)
))
for pattern in bad_patterns:
session.add(models.DBEmailBlacklist(
pattern=str(pattern)
))
self.assertTrue(endorsements.is_academic(domain.User(
user_id='2',
email='someone@fsu.edu',
username='someone'
)))
self.assertFalse(endorsements.is_academic(domain.User(
user_id='2',
email='someone@fsu.biz.edu',
username='someone'
)))
self.assertTrue(endorsements.is_academic(domain.User(
user_id='2',
email='someone@aaas.org',
username='someone'
)))
self.assertFalse(endorsements.is_academic(domain.User(
user_id='2',
email='someone@foo.com',
username='someone'
)))
| [
"brp53@cornell.edu"
] | brp53@cornell.edu |
8aaf8f3f3727e9f197dde7885aeb985345df5674 | 8073d1dfce9df3bdc1155c9a6052a0a2a8075201 | /pframe_dataset_torch.py | 446ce56f2efcd2e739d5461f10e1eb252aa56e3b | [] | no_license | relational/clic2020-devkit | a7c19dd28b9b7ddf3d5a7e609f0d396b8250bc54 | 9a02862c4ebfdc8eba7edb5bd168aeaf589057ef | refs/heads/master | 2020-09-15T15:14:27.761441 | 2019-11-22T16:09:25 | 2019-11-22T16:09:25 | 223,486,717 | 0 | 0 | null | 2019-11-22T21:08:49 | 2019-11-22T21:08:48 | null | UTF-8 | Python | false | false | 2,069 | py | import torch
from PIL import Image
import glob
from torch.utils.data.dataset import Dataset
from torch.nn import functional as F
from torchvision.transforms.functional import to_tensor
import pframe_dataset_shared
class YUVFramesDataset(Dataset):
"""
Yields frames either as tuples (Y, U, V) or, if merge_channels=True, as a single tensor (YUV).
Dataformat is always torch default, CHW, and dtype is float32, output is in [0, 1]
"""
def __init__(self, data_root, merge_channels=False):
self.frame_ps = YUVFramesDataset.get_frames_paths(data_root)
self.merge_channels = merge_channels
@staticmethod
def get_frames_paths(data_root):
""" :return a list of tuples, [(Y, U, V)]"""
globs = pframe_dataset_shared.get_yuv_globs(data_root)
ys, us, vs = (sorted(glob.glob(g)) for g in globs)
return list(zip(ys, us, vs))
def __len__(self):
return len(self.frame_ps)
def __getitem__(self, idx):
y, u, v = (to_tensor(Image.open(p)) for p in self.frame_ps[idx])
if not self.merge_channels:
return y, u, v
yuv = yuv_422_to_444(y, u, v)
return yuv
class FramePairsDataset(Dataset):
def __init__(self, data_root, merge_channels=False):
self.yuv_frames_dataset = YUVFramesDataset(data_root, merge_channels)
def __getitem__(self, idx):
frame_1 = self.yuv_frames_dataset[idx]
frame_2 = self.yuv_frames_dataset[idx + 1]
return frame_1, frame_2
def __len__(self):
# substract one because we always look at tuples, final one is (N-1, N)
return len(self.yuv_frames_dataset) - 1
def yuv_422_to_444(y, u, v):
""" Convert Y, U, V, given in 422, to RGB 444. Expects CHW dataformat """
u, v = map(_upsample_nearest_neighbor, (u, v)) # upsample U, V
return torch.cat((y, u, v), dim=0) # merge
def _upsample_nearest_neighbor(t, factor=2):
""" Upsample tensor `t` by `factor`. """
return F.interpolate(t.unsqueeze(0), scale_factor=factor, mode='nearest').squeeze(0)
| [
"fmentzer@me.com"
] | fmentzer@me.com |
a8d0358b14e0899a93fda27fcd872490e907be31 | 5c0a253bf2fb83db01abc99097871c965f4cf565 | /study/machinelearning/clustering/flat/KMeans/kMeansWithScratch.py | 6e509df62cf33b8d8436cb1ad679ecef276bd6d5 | [] | no_license | airuibel/python-1 | 3b16553ede9d069ec56efbb12a89a4de6917a447 | 94f387e2d406fab2128bcfffce6146da720b2ccc | refs/heads/master | 2020-07-05T15:43:00.957221 | 2017-09-17T14:05:48 | 2017-09-17T14:05:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,870 | py | # -*- utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
import numpy as np
X = np.array([[1, 2],
[1.5, 1.8],
[5, 8],
[8, 8],
[1, 0.6],
[9, 11],
[1, 3],
[8, 9],
[0, 3],
[5, 4],
[6, 4], ])
plt.scatter(X[:, 0], X[:, 1], s = 50)
plt.show()
colors = 10 * ["g", "r", "c", "b", "k"]
class K_Means:
def __init__(self, k = 2, tol = 0.001, max_iter = 300):
self.k = k
self.tol = tol
self.max_iter = max_iter
def fit(self, data):
self.centroids = {}
for i in range(self.k):
self.centroids[i] = data[i]
for i in range(self.max_iter):
self.classifications = {}
for i in range(self.k):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
prev_centroids = dict(self.centroids)
for classification in self.classifications:
self.centroids[classification] = np.average(self.classifications[classification], axis = 0)
optimized = True
for c in self.centroids:
original_centroid = prev_centroids[c]
current_centroid = self.centroids[c]
if np.sum((current_centroid - original_centroid) / original_centroid * 100) > self.tol:
print(np.sum((current_centroid - original_centroid) / original_centroid * 100.0))
optimized = False
if optimized:
break
def predict(self, data):
distances = [np.linalg.norm(data - self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
clf = K_Means()
clf.fit(X)
for centroid in clf.centroids:
plt.scatter(clf.centroids[centroid][0], clf.centroids[centroid][1],
marker = "o", color = "k", s = 150, linewidths = 5)
for classification in clf.classifications:
color = colors[classification]
for featureset in clf.classifications[classification]:
plt.scatter(featureset[0], featureset[1], marker = "x", color = color, s = 150, linewidths = 5)
unknowns = np.array([[1, 3],
[8, 9],
[0, 3],
[5, 4],
[6, 4], ])
for unknown in unknowns:
classification = clf.predict(unknown)
plt.scatter(unknown[0], unknown[1], marker = "x", color = colors[classification], s = 50, linewidths = 5)
plt.show()
| [
"cysuncn@126.com"
] | cysuncn@126.com |
1fc8c3edd2c2ef3d16c220f36cb7d72c3bcad84f | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-datalabeling/samples/generated_samples/datalabeling_v1beta1_generated_data_labeling_service_list_annotated_datasets_async.py | 5f3fe6b186baf941dd84aef169d534faaebfa3cf | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 2,012 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListAnnotatedDatasets
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datalabeling
# [START datalabeling_v1beta1_generated_DataLabelingService_ListAnnotatedDatasets_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import datalabeling_v1beta1
async def sample_list_annotated_datasets():
# Create a client
client = datalabeling_v1beta1.DataLabelingServiceAsyncClient()
# Initialize request argument(s)
request = datalabeling_v1beta1.ListAnnotatedDatasetsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_annotated_datasets(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END datalabeling_v1beta1_generated_DataLabelingService_ListAnnotatedDatasets_async]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
d49d4df253e01b51cbef0fd3337a5d662b8bb43c | 6a2c2af113bb8b4d55db6ceabc6e78a0bbcd1f91 | /genus processing/Shorts Back Pocket Flap.py | c5157eb6d81a0e4a0684d901d89b3339771afb61 | [] | no_license | JinghongM/Everlasting_Data_Cleansing | 4a966aca5cba102961f64338411d76e51f60f51e | 237073980b2bd1697db578013c7463dcbc1492fb | refs/heads/master | 2021-04-26T23:48:38.083155 | 2018-06-21T20:00:11 | 2018-06-21T20:00:11 | 123,861,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import pandas as pd
import copy
import os.path
Pattern=6
Material=7
Species=4
CGP = pd.read_excel("../Realdata.xlsx")
for row in range(1,CGP.shape[0]):
genus = str(CGP.iat[row,3])
if "Shorts Back Pocket Flap" == genus:
print(row)
CGP.iat[row,Species] = "Back Pocket"
CGP.iat[row,3] = "Shorts"
i=0 #process headers
while i<len(CGP.columns.values):
if "Unnamed" in CGP.columns.values[i]:
CGP.columns.values[i] = ''
i+=1
CGP.to_excel('../Realdata.xlsx',index=False) | [
"noreply@github.com"
] | JinghongM.noreply@github.com |
db163f23c75034ea1dd7b2d3897238e64962d9d5 | 9b85af3301f88c6d3c319d1cda641fd690d0d367 | /handwrite/DigitRecognizer.py | c2646217a3527f814a6b4b00a575c053941880f6 | [] | no_license | mmm311/kaggle | 44d424252a56bfb854c6086b8aa425aee5a957cb | 79ba34399d577beb7e15d7986fb9ddf4869d7da4 | refs/heads/master | 2020-06-20T06:43:58.222753 | 2016-12-11T06:57:21 | 2016-12-11T06:57:21 | 74,873,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,159 | py | '''
Created on 2016年10月24日
@author: liu
'''
from sklearn.ensemble import RandomForestClassifier
from numpy import genfromtxt, savetxt, dtype
import numpy as np
def main():
#创建训练集
dataset = np.loadtxt(r'E:\graduate program\code\python\data\train.csv',
dtype ='f8',
delimiter = ',')
target = [x[0] for x in dataset] # 第一列为label
train = [x[1:] for x in dataset]
#创建测试集
test = np.loadtxt(r'E:\graduate program\code\python\data\test.csv',
dtype = 'f8',
delimiter = ',')
#创建并且训练一个随机森林模型
rf = RandomForestClassifier(n_estimators=100)
rf.fit(train, target)
predicted_result = [[index + 1, x] for index , x in enumerate(rf.predict(test))]
#利用随机森林对森林对测试集进行预测,并将结果保存到输出文件中
savetxt('result.csv',predicted_result,delimiter = ',', fmt = '%d, %d',
header = 'ImagedId,Label',comments = '')
if __name__ == '__main__':
main()
| [
"1034930026@qq.com"
] | 1034930026@qq.com |
e3aa31f0202dec4519341cd7b14b24a1eed94c12 | 1ff980bd6c225d09b9bf10942336a74caf7d0ccd | /myweb/urls.py | bf71b11f8580ee8fe909066a9699c43313a00b94 | [] | no_license | dofospider/myweb | feb6907544edb76156364fc32667ba82c02c56d8 | d12bb1f1604260369805180c6c13afa2899f2832 | refs/heads/master | 2021-08-30T16:03:13.648106 | 2017-12-18T14:56:57 | 2017-12-18T14:56:57 | 110,114,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | """myweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from mainsite.views import homepage, showpost, home, index
from threeD.views import maintd
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', index),
url(r'^main/$', homepage),
url(r'^post/(\w+)$', showpost),
url(r'^3d/$', maintd),
url(r'^home/$', home),
]
| [
"dofospider@gmail.com"
] | dofospider@gmail.com |
843d17cca1e8dbc0f76f485585fb22132ff28259 | 044ec056bc99ba090d3ea3368974d52cc5ac32f3 | /Modul6/coding.py | ba5e22b8be7ad99c0ac253832e160b786fc1e345 | [] | no_license | L200184044/prak_algostruk | d527115b7dce7b4bac5908a276798a15bf796e10 | 5e330ff77a7024d11dbccb68a3af66b82553a276 | refs/heads/master | 2021-02-12T21:15:39.757178 | 2020-07-04T06:43:34 | 2020-07-04T06:43:34 | 244,631,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,813 | py | Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 22:45:29) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> #NOMER 1
print ('-----Nomer 1-----')
class MhsTIF(object) :
def __init__(self, nama, nim, asal, uangsaku) :
self.nama = nama
self.nim = nim
self.asal = asal
self.uangsaku = uangsaku
m0 = MhsTIF('Baity', 9, 'Klaten', 300000)
m1 = MhsTIF('Lutfi', 10, 'Semarang', 320000)
m2 = MhsTIF('Mifta', 23, 'Kartasura', 350000)
m3 = MhsTIF('Falah', 45, 'Solo', 290000)
m4 = MhsTIF('Dewi', 27, 'Karanganyar', 310000)
m5 = MhsTIF('Lia', 56, 'Wonogiri', 380000)
m6 = MhsTIF('Bagus', 2, 'Boyolali', 280000)
m7 = MhsTIF('Wahyu', 8, 'Sragen', 330000)
m8 = MhsTIF('Laila', 34, 'Purwodadi', 340000)
m9 = MhsTIF('Alfina', 60, 'Sleman', 390000)
m10 = MhsTIF('Wafiq', 51, 'Magelang', 370000)
urut =[m0.nim, m1.nim, m2.nim, m3.nim, m4.nim, m5.nim,
m6.nim, m7.nim,m8.nim, m9.nim, m10.nim]
def mergeSort(nlist):
print("Membelah ", nlist)
if len(nlist)>1:
mid = len(nlist)//2
lefthalf = nlist[:mid]
righthalf = nlist[mid:]
mergeSort(lefthalf)
mergeSort(righthalf)
i=j=k=0
while i < len(lefthalf) and j < len(righthalf):
if lefthalf[i] < righthalf[j]:
nlist[k]=lefthalf[i]
i=i+1
else:
nlist[k]=righthalf[j]
j=j+1
k=k+1
while i < len(lefthalf):
nlist[k]=lefthalf[i]
i=i+1
k=k+1
while j < len(righthalf):
nlist[k]=righthalf[j]
j=j+1
k=k+1
print("Menggabungkan ",nlist)
nlist = urut
print("Hasil MergeSort")
mergeSort(nlist)
print(nlist)
def quickSort(data_list):
quickSortHlp(data_list,0,len(data_list)-1)
def quickSortHlp(data_list,first,last):
if first < last:
splitpoint = partition(data_list,first,last)
quickSortHlp(data_list,first,splitpoint-1)
quickSortHlp(data_list,splitpoint+1,last)
def partition(data_list,first,last):
pivotvalue = data_list[first]
leftmark = first+1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and data_list[leftmark] <= pivotvalue:
leftmark = leftmark + 1
while data_list[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark = rightmark -1
if rightmark < leftmark:
done = True
else:
temp = data_list[leftmark]
data_list[leftmark] = data_list[rightmark]
data_list[rightmark] = temp
temp = data_list[first]
data_list[first] = data_list[rightmark]
data_list[rightmark] = temp
return rightmark
data_list = urut
quickSort(data_list)
print("\n"+"Hasil QuickSort")
print(data_list)
#NOMER 3
print('-----Nomer 3-----')
from time import time as detak
from random import shuffle as kocok
import time
k = [i for i in range(1,6001)]
kocok(k)
def bubbleSort(X) :
n = len(X)
for i in range(n):
for j in range(0, n-i-1):
if X[j] > X[j+1] :
X[j], X[j+1] = X[j+1], X[j]
def selectionSort(X) :
for i in range(len(X)):
min_idk = i
for j in range(i+1, len(X)):
if X[min_idk] > X[j]:
min_idk = j
X[i], X[min_idk] = X[min_idk], X[i]
def insertSort(X) :
n = len (X)
for i in range (1, n) :
nilai = X[i]
abc = i-1
while abc >= 0 and nilai < X[abc-1] :
X[abc] = X[abc+1]
abc -=1
X[abc+1] = nilai
def mergeSort(X):
if len(X) >1:
mid = len(X)//2
L = X[:mid]
R = X[mid:]
mergeSort(L)
mergeSort(R)
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] < R[j]:
X[k] = L[i]
i+=1
else:
X[k] = R[j]
j+=1
k+=1
while i < len(L):
X[k] = L[i]
i+=1
k+=1
while j < len(R):
X[k] = R[j]
j+=1
k+=1
def partition(X,low,high):
i = ( low-1 )
pivot = X[high]
for j in range(low , high):
if X[j] <= pivot:
i = i+1
X[i],X[j] = X[j],X[i]
X[i+1],X[high] = X[high],X[i+1]
return ( i+1 )
def quickSort(X,low,high):
if low < high:
pi = partition(X,low,high)
quickSort(X, low, pi-1)
quickSort(X, pi+1, high)
u_bub = k[:]
u_sel = k[:]
u_ins = k[:]
u_mer = k[:]
u_qck = k[:]
aw = detak () ; bubbleSort (u_bub) ; ak = detak() ; print('bubble : % g detik' % (ak - aw)) ;
aw = detak () ; selectionSort (u_sel) ; ak = detak() ; print('selection : % g detik' % (ak - aw)) ;
aw = detak () ; insertSort (u_ins) ; ak = detak() ; print('insert : % g detik' % (ak - aw)) ;
aw = detak () ; mergeSort (u_mer) ; ak = detak() ; print('merge : % g detik' % (ak - aw)) ;
aw = detak () ; quickSort (u_qck, 0, len(u_qck)-1) ; ak = detak() ; print('quick : % g detik' % (ak - aw)) ;
# NOMER 5
print ('-----Nomer 5-----')
import random
def _merge_sort(indices, the_list):
start = indices[0]
end = indices[1]
half_way = (end - start)//2 + start
if start < half_way:
_merge_sort((start, half_way), the_list)
if half_way + 1 <= end and end - start != 1:
_merge_sort((half_way + 1, end), the_list)
sort_sub_list(the_list, indices[0], indices[1])
return the_list
def sort_sub_list(the_list, start, end):
orig_start = start
initial_start_second_list = (end - start)//2 + start + 1
list2_first_index = initial_start_second_list
new_list = []
while start < initial_start_second_list and list2_first_index <= end:
first1 = the_list[start]
first2 = the_list[list2_first_index]
if first1 > first2:
new_list.append(first2)
list2_first_index += 1
else:
new_list.append(first1)
start += 1
while start < initial_start_second_list:
new_list.append(the_list[start])
start += 1
while list2_first_index <= end:
new_list.append(the_list[list2_first_index])
list2_first_index += 1
for i in new_list:
the_list[orig_start] = i
orig_start += 1
return the_list
def merge_sort(the_list):
return _merge_sort((0, len(the_list) - 1), the_list)
print(merge_sort([13,45,12,3,10,2]))
# NOMER 6
print ('-----Nomer 6-----')
def quickSort(L, ascending = True):
quicksorthelp(L, 0, len(L), ascending)
def quicksorthelp(L, low, high, ascending = True):
result = 0
if low < high:
pivot_location, result = Partition(L, low, high, ascending)
result += quicksorthelp(L, low, pivot_location, ascending)
result += quicksorthelp(L, pivot_location + 1, high, ascending)
return result
def Partition(L, low, high, ascending = True):
result = 0
pivot, pidx = median_of_three(L, low, high)
L[low], L[pidx] = L[pidx], L[low]
i = low + 1
for j in range(low+1, high, 1):
result += 1
if (ascending and L[j] < pivot) or (not ascending and L[j] > pivot):
L[i], L[j] = L[j], L[i]
i += 1
L[low], L[i-1] = L[i-1], L[low]
return i - 1, result
def median_of_three(L, low, high):
mid = (low+high-1)//2
a = L[low]
b = L[mid]
c = L[high-1]
if a <= b <= c:
return b, mid
if c <= b <= a:
return b, mid
if a <= c <= b:
return c, high-1
if b <= c <= a:
return c, high-1
return a, low
liste1 = list([14,4,2,104,23,50])
quickSort(liste1, False) # descending order
print('sorted:')
print(liste1)
# N0MER 7
print ('-----Nomer 7-----')
from time import time as detak
from random import shuffle as kocok
import time
k = [i for i in range(1,6001)]
kocok(k)
def mergeSort(arr):
if len(arr) >1:
mid = len(arr)//2
L = arr[:mid]
R = arr[mid:]
mergeSort(L)
mergeSort(R)
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i+=1
else:
arr[k] = R[j]
j+=1
k+=1
while i < len(L):
arr[k] = L[i]
i+=1
k+=1
while j < len(R):
arr[k] = R[j]
j+=1
k+=1
def partition(arr,low,high):
i = ( low-1 )
pivot = arr[high]
for j in range(low , high):
if arr[j] <= pivot:
i = i+1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high] = arr[high],arr[i+1]
return ( i+1 )
def quickSort(arr,low,high):
if low < high:
pi = partition(arr,low,high)
quickSort(arr, low, pi-1)
quickSort(arr, pi+1, high)
import random
def _merge_sort(indices, the_list):
start = indices[0]
end = indices[1]
half_way = (end - start)//2 + start
if start < half_way:
_merge_sort((start, half_way), the_list)
if half_way + 1 <= end and end - start != 1:
_merge_sort((half_way + 1, end), the_list)
sort_sub_list(the_list, indices[0], indices[1])
def sort_sub_list(the_list, start, end):
orig_start = start
initial_start_second_list = (end - start)//2 + start + 1
list2_first_index = initial_start_second_list
new_list = []
while start < initial_start_second_list and list2_first_index <= end:
first1 = the_list[start]
first2 = the_list[list2_first_index]
if first1 > first2:
new_list.append(first2)
list2_first_index += 1
else:
new_list.append(first1)
start += 1
while start < initial_start_second_list:
new_list.append(the_list[start])
start += 1
while list2_first_index <= end:
new_list.append(the_list[list2_first_index])
list2_first_index += 1
for i in new_list:
the_list[orig_start] = i
orig_start += 1
def merge_sort(the_list):
return _merge_sort((0, len(the_list) - 1), the_list)
def quickSortMOD(L, ascending = True):
quicksorthelp(L, 0, len(L), ascending)
def quicksorthelp(L, low, high, ascending = True):
result = 0
if low < high:
pivot_location, result = Partition(L, low, high, ascending)
result += quicksorthelp(L, low, pivot_location, ascending)
result += quicksorthelp(L, pivot_location + 1, high, ascending)
return result
def Partition(L, low, high, ascending = True):
result = 0
pivot, pidx = median_of_three(L, low, high)
L[low], L[pidx] = L[pidx], L[low]
i = low + 1
for j in range(low+1, high, 1):
result += 1
if (ascending and L[j] < pivot) or (not ascending and L[j] > pivot):
L[i], L[j] = L[j], L[i]
i += 1
L[low], L[i-1] = L[i-1], L[low]
return i - 1, result
def median_of_three(L, low, high):
mid = (low+high-1)//2
a = L[low]
b = L[mid]
c = L[high-1]
if a <= b <= c:
return b, mid
if c <= b <= a:
return b, mid
if a <= c <= b:
return c, high-1
if b <= c <= a:
return c, high-1
return a, low
mer = k[:]
qui = k[:]
mer2 = k[:]
qui2 = k[:]
aw=detak();mergeSort(mer);ak=detak();print('merge : %g detik' %(ak-aw));
aw=detak();quickSort(qui,0,len(qui)-1);ak=detak();print('quick : %g detik' %(ak-aw));
aw=detak();merge_sort(mer2);print('merge mod : %g detik' %(ak-aw));
aw=detak();quickSortMOD(qui2, False);print('quick mod : %g detik' %(ak-aw));
# NOMER 8
print ('-----Nomer 8-----')
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def appendList(self, data):
node = Node(data)
if self.head == None:
self.head = node
else:
curr = self.head
while curr.next != None:
curr = curr.next
curr.next = node
def appendSorted(self, data):
node = Node(data)
curr = self.head
prev = None
while curr is not None and curr.data < data:
prev = curr
curr = curr.next
if prev == None:
self.head = node
else:
prev.next = node
node.next = curr
def printList(self):
curr = self.head
while curr != None:
print ("%d"%curr.data),
curr = curr.next
def mergeSorted(self, list1, list2):
if list1 is None:
return list2
if list2 is None:
return list1
if list1.data < list2.data:
temp = list1
temp.next = self.mergeSorted(list1.next, list2)
else:
temp = list2
temp.next = self.mergeSorted(list1, list2.next)
return temp
list1 = LinkedList()
list1.appendSorted(13)
list1.appendSorted(12)
list1.appendSorted(3)
list1.appendSorted(16)
list1.appendSorted(7)
print("List 1 :"),
list1.printList()
list2 = LinkedList()
list2.appendSorted(9)
list2.appendSorted(10)
list2.appendSorted(1)
print("List 2 :"),
list2.printList()
list3 = LinkedList()
list3.head = list3.mergeSorted(list1.head, list2.head)
print("Merged List :"),
list3.printList() | [
"noreply@github.com"
] | L200184044.noreply@github.com |
32f7a189d793b0b0da1823a492e1a0590968f29a | cad4f7e40dc575a3224c4a554319245d6d09162f | /ksh/ch3/condition06.py | 417bad7bd071ac56383b4ed294b1fdff988bc553 | [] | no_license | ksh60706/python_study | 56c3dddbdb492db332dd0931771c92b1a0ef81eb | 02c9457e56d39d7649c566ab01110fe3ff65ea0c | refs/heads/master | 2022-11-17T05:33:52.708897 | 2020-07-07T06:39:49 | 2020-07-07T06:39:49 | 274,009,716 | 0 | 1 | null | 2020-07-07T06:39:50 | 2020-06-22T01:05:25 | Python | UTF-8 | Python | false | false | 658 | py | # 변수를 선언합니다.
score = float(input("학점 입력 > "))
# 조건문을 적용합니다.
if score == 4.5:
print("신")
elif 4.2 <= score < 4.5:
print("교수님의 사랑")
elif 3.5 <= score < 4.2:
print("현 체제의 수호자")
elif 2.8 <= score < 3.5:
print("일반인")
elif 2.3 <= score < 2.8:
print('일탈을 꿈꾸는 소시민')
elif 1.75 <= score < 2.3:
print("오락문화의 선구자")
elif 1.0 <= score < 1.75:
print("불가촉천민")
elif 0.5 <= score < 1.0:
print("자벌레")
elif 0 < score < 0.5:
print("플랑크톤")
elif score == 0:
print("시대를 앞서가는 혁명의 씨앗") | [
"ksh60706@hansol.com"
] | ksh60706@hansol.com |
c25c460122f690887e2795f8a510fd73595b7ae8 | c56d36c10e52f569f3864790611fe9cf11bcb050 | /lesson9/mitm_xueqiu.py | 8d2d4019d65f5d1f4b7158c744e5c9c3bbbf9d92 | [] | no_license | yr-rui/LG7 | c9ba3e062eba4e5444a2e82a5bba8648da3258c9 | 471318a2af4e1d9e8b964d73f12f8f7ab713495f | refs/heads/main | 2023-07-30T15:47:47.702967 | 2021-09-15T12:48:09 | 2021-09-15T12:48:09 | 360,015,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,499 | py | """HTTP-specific events."""
import json
import mitmproxy.http
from mitmproxy import http
class Events:
def http_connect(self, flow: mitmproxy.http.HTTPFlow):
"""
An HTTP CONNECT request was received. Setting a non 2xx response on
the flow will return the response to the client abort the
connection. CONNECT requests and responses do not generate the usual
HTTP handler events. CONNECT requests are only valid in regular and
upstream proxy modes.
"""
pass
def requestheaders(self, flow: mitmproxy.http.HTTPFlow):
"""
HTTP request headers were successfully read. At this point, the body
is empty.
"""
pass
def request(self, flow: mitmproxy.http.HTTPFlow):
"""
The full HTTP request has been read.
"""
pass
def responseheaders(self, flow: mitmproxy.http.HTTPFlow):
"""
HTTP response headers were successfully read. At this point, the body
is empty.
"""
pass
def response(self, flow: mitmproxy.http.HTTPFlow):
"""
The full HTTP response has been read.
"""
"""对第一个股票保持原样,对第二个股票名字加长一倍,对第三个股票名字变成空"""
if 'https://stock.xueqiu.com/v5/stock/batch/quote.json?' in flow.request.url:
data = json.load(open("./xueqiu.json"), encoding="utf-8")
name2=data["data"]["items"][1]["quote"]["name"]
data["data"]["items"][1]["quote"]["name"]=name2+name2
data["data"]["items"][2]["quote"]["name"] = ""
with open('xueqiu2.json', 'w', encoding='utf-8') as f:
json.dump(data, f)
with open('xueqiu2.json', encoding='utf-8') as f:
flow.response=http.HTTPResponse.make(200,f.read())
def error(self, flow: mitmproxy.http.HTTPFlow):
"""
An HTTP error has occurred, e.g. invalid server responses, or
interrupted connections. This is distinct from a valid server HTTP
error response, which is simply a response with an HTTP error code.
"""
pass
addons=[Events()]
if __name__ == '__main__':
#__file__指的当前文件,会打印出当前文件的绝对路径 print(__file__)
from mitmproxy.tools.main import mitmdump
#使用debug模式启动mitmdump
mitmdump(['-p','8080','-s',__file__])
| [
"rui.yue@changhong.com"
] | rui.yue@changhong.com |
4a14a4a9d4659be2920bea4854f201229b0f3159 | 38183a86c6a383715302c9d6f94850e4e5d748a3 | /api/check_car.py | 71322b03b6b6821f56512189afdbcee06354fd92 | [] | no_license | MRAproject/Backend-V2 | b1cb0879eb802e07f7408b21a17b0ef41f6cbf7b | 9613aafaaec66d78918ddc084e5e75951a5b8619 | refs/heads/master | 2020-04-27T18:07:45.409649 | 2019-04-09T07:45:10 | 2019-04-09T07:45:10 | 174,555,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | from flask import request, jsonify
from datetime import datetime
import sqlite3
import base64
from plate_recognition.Main import main
def check_car():
image = request.args.get('carNumber')
img_data = base64.b64decode(str(image))
filename = 'some_image.jpg' # I assume you have a way of picking unique filenames
with open(filename, 'wb') as f:
f.write(img_data)
try:
car_number = main(img_data)
print(f'car number {car_number}')
if car_number is None:
return jsonify({'message': 'car number is alreday exist',
'status': 'failed'})
db = sqlite3.connect('db.db')
cur = db.cursor()
cur.execute('SELECT * FROM Cars WHERE carNumber = ?', (car_number,))
car = cur.fetchone()
except Exception as e:
print(e)
return jsonify({'message': 'car number is alreday exist',
'status': 'failed'})
if car is None:
db.close()
return jsonify({"state": 'authorized'})
date = datetime.now().strftime("%d/%m/%y, %H:%M")
username = car[0]
# find last action
cur.execute('SELECT * FROM Times WHERE carNumber = ? AND username = ? ', (car_number, username))
car_time = cur.fetchall()
last_action = car_time[len(car_time) - 1]
time_enter = last_action[2] if last_action[3] is None else last_action[3]
ans = datetime.now() - datetime.strptime(time_enter, "%d/%m/%y, %H:%M")
if ans.seconds <= 60:
print('car need to wait')
return jsonify({'message': 'car number is alreday exist',
'status': 'failed'})
if car[2] == 0:
# enter to parking
cur.execute('INSERT INTO Times (username,carNumber,enter,exit) VALUES (?,?,?,?)',
(username, car_number, date, None))
cur.execute('UPDATE Cars SET isInside = ? WHERE carNumber = ? AND username = ?', (1, car_number, username))
else:
# exist from parking
cur.execute('SELECT * FROM Times WHERE carNumber = ? AND username = ? AND exit IS NULL', (car_number, username))
car_time = cur.fetchone()
cur.execute('UPDATE Times SET exit = ? WHERE username = ? AND carNumber = ? AND enter = ?',
(date, username, car_number, car_time[2]))
cur.execute('UPDATE Cars SET isInside = ? WHERE carNumber = ? AND username = ?',
(0, car_number, username))
db.commit()
db.close()
return jsonify({"status": 'done'})
| [
"amitmarkovich92@gmail.com"
] | amitmarkovich92@gmail.com |
42ae99636bef2b466a152cd47dbb195677753fcc | 4960e3e005ba04ec0a8b0defc6642dff5e71f5ae | /knowledge/cron/flow4/scan_domain2sentiment.py | 82aed86bc47562f200906949fc65791e17a4ba4e | [] | no_license | jianjian0dandan/knowledge_revised | aa7d772ba9efcaa579907b0418e145d6b440a9c9 | ffc80dcca932c977755128c80c17dca603ee8a8b | refs/heads/master | 2021-01-24T18:27:41.117166 | 2017-04-25T14:55:42 | 2017-04-25T14:55:42 | 84,448,466 | 1 | 0 | null | 2017-06-15T07:32:31 | 2017-03-09T14:02:46 | HTML | UTF-8 | Python | false | false | 1,974 | py | # -*- coding:utf-8 -*-
'''
use to scan user domain to redis hash for compute sentiment
update: one month
'''
import sys
import time
from elasticsearch.helpers import scan
reload(sys)
sys.path.append('../../')
from global_utils import es_user_portrait, portrait_index_name, portrait_index_type
from global_utils import R_DOMAIN, r_domain_name
from parameter import domain_ch2en_dict
from time_utils import ts2datetime, datetime2ts
def del_domain_redis():
R_DOMAIN.delete(r_domain_name)
#use to scan user domain to redis which save as english
def scan_domain2redis():
count = 0
s_re = scan(es_user_portrait, query={'query':{'match_all':{}}, 'size':1000}, index=portrait_index_name, doc_type=portrait_index_type)
start_ts = time.time()
hmset_dict = {}
while True:
try:
scan_re = s_re.next()['_source']
count += 1
uid = scan_re['uid']
domain_en = domain_ch2en_dict[scan_re['domain']]
hmset_dict[uid] = domain_en
if count % 1000 == 0 and count != 0:
R_DOMAIN.hmset(r_domain_name, hmset_dict)
end_ts = time.time()
print '%s sec count 1000' % (end_ts -start_ts)
start_ts = end_ts
hmset_dict = {}
except StopIteration:
if hmset_dict:
R_DOMAIN.hmset(r_domain_name, hmset_dict)
hmset_dict = {}
break
except Exception as e:
raise e
break
if hmset_dict:
R_DOMAIN.hmset(r_domain_name, hmset_dict)
print 'all count:', count
if __name__=='__main__':
log_time_ts = time.time()
log_time_date = ts2datetime(log_time_ts)
print 'cron/flow4/scan_domain2sentiment.py&start&' + log_time_date
del_domain_redis()
scan_domain2redis()
log_time_ts = time.time()
log_time_date = ts2datetime(log_time_ts)
print 'cron/flow4/scan_domain2sentiment&end&' + log_time_date
| [
"1257819385@qq.com"
] | 1257819385@qq.com |
bd4313e16c7f1ea0d9500ef337a3402bb36987d0 | a6d96bc4cedee2f77d1b5399ff480b86f17ea310 | /Mo6 indi5/migrate_01/migrations/0001_initial.py | 8c966d1965bea66ded333fae9ce73e87c03f61c2 | [] | no_license | sezniv/modulo6_indivigrupal_5 | 11966d56cb82cd4720a6a9595786c6b510190ab3 | f5e431941148038b9833463a9a813971a976a3d2 | refs/heads/main | 2023-02-18T08:08:01.836672 | 2021-01-18T00:46:26 | 2021-01-18T00:46:26 | 330,516,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | # Generated by Django 3.1.5 on 2021-01-13 21:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Departamento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Profesor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=25)),
('apellido', models.CharField(max_length=25)),
('escuela', models.CharField(max_length=30)),
('fecha_de_contratacion', models.DateField()),
('sueldo', models.IntegerField()),
],
),
migrations.CreateModel(
name='Asignatura',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=25)),
('descripcion', models.CharField(max_length=250)),
('departamento_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='migrate_01.departamento')),
],
),
]
| [
"sezniv@gmail.com"
] | sezniv@gmail.com |
cfdaccfe846dadf7a82012fb5f6919842cddec02 | 1308e7ce4ad8eb2303303ef7ae8fd0d9a07d5e98 | /src/app.py | d6b97dd460309ca10b825f4204b4857afcc26eb4 | [] | no_license | biraj094/flask-github-actions | c9f24f4c4ec0da866b377ab9748f829691336bd9 | 9bf535480877b480806abe1bbd4218372433a909 | refs/heads/master | 2023-02-27T15:26:04.286635 | 2021-01-29T06:58:12 | 2021-01-29T06:58:12 | 333,686,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py |
from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Hello world"
def add(x,y):
return x+y
if __name__=="__main__":
app.run()
| [
"koiralabiraj@gmail.com"
] | koiralabiraj@gmail.com |
2bc4c9e6069212b6686c4d96d16cb7c202bd50ba | cc48a48e10b819825b47053566b34c4f0676dbd4 | /14nov.py | 8492ccdf13b0f460e376911c042d2f82cb594f23 | [] | no_license | Klankers/411_M2M | fc912aced83c8bc05348fd79c3814ac8f8e24cf6 | e44a43abe031cd9af98e3ec67efc92608141d4ff | refs/heads/master | 2021-04-30T00:52:01.946364 | 2018-02-14T03:49:12 | 2018-02-14T03:49:12 | 120,809,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | USERNAME = 'OOIAPI-TFWIQ9W27S5XJI'
TOKEN = 'TSB1PZMOBD1H4T'
import requests, json
from pprint import pprint #pprint used to format json objects. pprint(data[0]) is formatted to be legible.
#response = requests.get('https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/RS03CCAL/MJ03F/05-BOTPTA301/streamed/botpt_nano_sample?beginDT=2017-07-04T17:54:58.050Z&endDT=2017-07-04T17:55:58.050Z&limit=1000¶meters=7,848', auth=(USERNAME, TOKEN)) #the bt sensor
#response = requests.get('https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/RS03INT1/MJ03C/10-TRHPHA301/streamed/trhph_sample?beginDT=2017-11-07T00:00:00.000Z&limit=1000', auth=(USERNAME, TOKEN)) #the trhph
try:
while True:
response = requests.get('https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/RS03INT1/MJ03C/10-TRHPHA301/streamed/trhph_sample?beginDT=2017-11-07T00:00:00.000Z&limit=1000', auth=(USERNAME, TOKEN))
data = response.json()
#for the TRHPH
time = data[0]['time'] #sec since 1-1-1900
temperature = data[0]['vent_fluid_temperaure'] #deg C
chloride = data[0]['vent_fluid_chloride_conc'] #mmol/kg
print(time, temperature, chloride)
except KeyboardInterrupt:
print('Halting data acquisition') | [
"noreply@github.com"
] | Klankers.noreply@github.com |
462dfd6ce89d105b27804c48b359032cd031ba41 | 161edb3d2fdc0ca247e0b08d76c5d2a49093bce0 | /manage.py | 1ec76941f11c81d6459ccabe0d4f365bae9dcb71 | [] | no_license | mkdabc/CRM | a5b6eb6cfef435f5d63e40a145dc41265cb98523 | e00ff8e613a83d22cd6bce69cef97020c7f840e5 | refs/heads/master | 2020-05-24T00:45:19.966904 | 2019-05-16T12:15:31 | 2019-05-16T12:15:31 | 187,022,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crm_mkd.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"458351424@qq.com"
] | 458351424@qq.com |
1d2e2eb2c10108687a1dc49559484804e918c456 | 93d700b0275bca7bbe10da7b05afb63129180327 | /cmsplugin_rst/forms.py | 52c718fb178b45fdcd2bb193c48470d6edaec1f1 | [
"BSD-3-Clause"
] | permissive | nwojcik/cmsplugin-rst | d251a4bc029b4f804ee81b8cb5a4efbe719d3270 | afc564dc32fff5fa5e0ad7a9449088cb49737db6 | refs/heads/master | 2021-01-16T20:26:48.392784 | 2011-10-09T13:29:06 | 2011-10-09T13:29:06 | 2,537,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | from cmsplugin_rst.models import RstPluginModel
from django import forms
help_text = '<a href="http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html">Reference</a>'
class RstPluginForm(forms.ModelForm):
body = forms.CharField(
widget=forms.Textarea(attrs={
'rows':30,
'cols':80,
'style':'font-family:monospace'
}),
help_text=help_text
)
class Meta:
model = RstPluginModel | [
"jonas.obrist@divio.ch"
] | jonas.obrist@divio.ch |
bb17f170a96d6910eed0134d74a5d07a2cffd5bb | 97626032008febd273330996ee7a8d27d9d0788e | /datastructure/practice/c7/c_7_26.py | 1fa7b4dc7d5b3436a0ea2ab39ab4f466baa70de3 | [
"MIT"
] | permissive | stoneyangxu/python-kata | ee03597a00ce80c7d1d34dc63b2302a3d2bfdc3e | 979af91c74718a525dcd2a83fe53ec6342af9741 | refs/heads/master | 2020-08-28T15:02:48.253340 | 2019-11-05T15:04:52 | 2019-11-05T15:04:52 | 217,733,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import unittest
from datastructure.links.LinkedQueue import LinkedQueue
class MyTestCase(unittest.TestCase):
def test_something(self):
queue1 = LinkedQueue()
queue1.enqueue(1)
queue1.enqueue(2)
queue1.enqueue(3)
queue2 = LinkedQueue()
queue2.enqueue(4)
queue2.enqueue(5)
self.assertEqual([1, 2, 3], queue1.to_list())
self.assertEqual([4, 5], queue2.to_list())
queue1.concatenate(queue2)
self.assertEqual([1, 2, 3, 4, 5], queue1.to_list())
self.assertEqual([], queue2.to_list())
if __name__ == '__main__':
unittest.main()
| [
"stoneyangxu@icloud.com"
] | stoneyangxu@icloud.com |
b1b7fec1ef05d9659b007e38d824978221d4191c | 0aa2db201678205e9eccd3f4f2dcb6f95a97b5f6 | /tut_23.py | ca22460439ce1e75e3520ad687c01f55a1515c70 | [] | no_license | udoy382/PyTutorial_telusko | ffa76b4b6772d289c787e4b682df2d0965a2bf62 | 5dc5f3fc331605310f7c3923d7865f55a4592e28 | refs/heads/main | 2023-06-09T11:00:41.915456 | 2021-06-30T14:29:56 | 2021-06-30T14:29:56 | 381,730,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # run this type of shape
# # # #
# # # #
# # # #
'''
for i in range(4):
for j in range(4):
print("# ", end="")
print()
'''
# run this type of shape
#
# #
# # #
# # # #
'''
for i in range(4):
for j in range(i+1):
print("# ", end="")
print()
'''
# run this type of shape
# # # #
# # #
# #
#
for i in range(4):
for j in range(4-i):
print("# ", end="")
print()
| [
"srudoy436@gmail.com"
] | srudoy436@gmail.com |
68bb0815f024aa9943dbd62a2323742323b09af2 | 33d5e763d3904bbfc2c5ad7269a81bd5dcd5422f | /FileServer/fileServer.py | c0b234b256d6109ff45cfc9c47ead0dd5de05479 | [] | no_license | shadowamy/python | 5bc053dc64658c65d8d0e6e232dd91e1896b8c20 | e195f40fd6f77c110eb39dcc42dec2fdfec5f8fb | refs/heads/master | 2020-05-04T21:36:43.097596 | 2019-04-04T11:15:16 | 2019-04-04T11:15:16 | 179,439,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,851 | py | # -*- coding: UTF-8 -*-
import socket, time, socketserver, struct
import os, threading
host = 'localhost'
port = 12307
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 定义socket类型
s.bind((host, port)) # 绑定需要监听的Ip和端口号,tuple格式
s.listen(1)
fileHouse = 'fileHouse'
def file_send(connection, filepath):
#filepath = fileHouse+'\\\\'+ filepath_temp
#filepath = os.path.join(fileHouse, filepath_temp)
if os.path.isfile(filepath):
#fileinfo_size = struct.calcsize('128sl') # 定义打包规则
# 定义文件头信息,包含文件名和文件大小
fhead = struct.pack('128sl', os.path.basename(filepath.encode()), os.stat(filepath.encode()).st_size)
connection.send(fhead)
print('client filepath: ', filepath)
# with open(filepath,'rb') as fo: 这样发送文件有问题,发送完成后还会发一些东西过去
fo = open(filepath, 'rb')
while True:
filedata = fo.read(1024)
if not filedata:
break
connection.send(filedata)
fo.close()
print('send over...')
def add_thread(connection, address):
connection.settimeout(600)
fileinfo_size = struct.calcsize('128sl')
buf = connection.recv(fileinfo_size)
if buf: # 如果不加这个if,第一个文件传输完成后会自动走到下一句
filename, filesize = struct.unpack('128sl', buf)
filename_f = (filename.decode()).strip('\00')
#filenewname = os.path.join('./', filename_f)
filenewname = os.path.join(fileHouse, filename_f)
print ('file new name is %s, filesize is %s' % (filenewname, filesize))
recvd_size = 0 # 定义接收了的文件大小
file = open(filenewname, 'wb')
print ('start receiving...')
while not recvd_size == filesize:
if filesize - recvd_size > 1024:
rdata = connection.recv(1024)
recvd_size += len(rdata)
else:
rdata = connection.recv(filesize - recvd_size)
recvd_size = filesize
file.write(rdata)
file.close()
print ('receive done')
# connection.close()
def show_thread(connection):
filelist = os.listdir(fileHouse)
filestr = ' '.join(filelist)
connection.send(filestr.encode())
print('show file list successfully')
def delete_thread(connection, filename_temp):
filename = fileHouse + '\\\\' + filename_temp
if not os.path.exists(filename):
message = 'Your entering file is not exist, please confirm your file name'
else:
os.remove(filename)
message = 'delete'+' '+filename+' '+'successfully'
print(message)
connection.send(message.encode())
def change_thread(connection, address, listOrder):
oldfilename_or = listOrder[1]
oldfilename = fileHouse + '\\\\' + oldfilename_or
newfilename = listOrder[2]
if not os.path.exists(oldfilename):
connection.send('unable'.encode())
else:
connection.send('enable'.encode())
os.remove(oldfilename)
add_thread(connection, address)
print('change'+' '+oldfilename_or+' '+'into'+' '+newfilename+' '+'successfully')
def get_thread(connection, address, filename_temp):
filename = fileHouse+'\\\\'+filename_temp
#filename = os.path.join(fileHouse, filename_temp)
if not os.path.exists(filename):
connection.send('unable'.encode())
else:
connection.send('enable'.encode())
file_send(connection, filename)
def quit_thread(connection):
connection.close()
print("The fileHouse system exit...")
os._exit(0)
def order_thread(connection, address):
while True:
try:
connection.settimeout(600)
order = connection.recv(512)
#print (order.decode())
listOrder = (order.decode()).split()
if listOrder[0] == 'show':
show_thread(connection)
elif listOrder[0] == 'add':
add_thread(connection, address)
elif listOrder[0] == 'change':
change_thread(connection, address, listOrder)
elif listOrder[0] == 'delete':
delete_thread(connection, listOrder[1])
elif listOrder[0] == 'get':
get_thread(connection, address, listOrder[1])
elif listOrder[0] == 'quit':
quit_thread(connection)
except socket.timeout:
connection.close()
while True:
connection, address = s.accept()
print('Connected by ', address)
#thread = threading.Thread(target=conn_thread,args=(connection,address)) #使用threading也可以
thread = threading.Thread(target=order_thread, args=(connection, address))
thread.start()
s.close() | [
"547822367@qq.com"
] | 547822367@qq.com |
45afd3a8a7c4e27c8a14cae91ba000ca278b0c88 | e0f13152e4575f09f0b1e4b1811726bbe5066f90 | /tests/spend.py | 6f12c1c3b03880c9039e75a9eab6c88f5ec189dc | [] | no_license | yagamidev/amoveo | 88bc0dea994fab72c9f430f838ffc54418e30abf | 4dfd6cc8a5cb740500a873c83ff979fa521ec4e7 | refs/heads/master | 2021-04-09T11:45:23.697801 | 2018-03-16T09:13:35 | 2018-03-16T09:13:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | from get_request import request
def spend_test():
print("spend test")
pub = "BGRv3asifl1g/nACvsJoJiB1UiKU7Ll8O1jN/VD2l/rV95aRPrMm1cfV1917dxXVERzaaBGYtsGB5ET+4aYz7ws="
priv = "nJgWyLTX1La8eCbPv85r3xs7DfmJ9AG4tLrJ5fiW6qY="
brainwallet = ''
request(2, "load_key", [pub, priv, brainwallet], 1)
request(1, "create_account", [pub, 1], 0.1)
request(1, "sync", [[127,0,0,1], 3020], 0.1)
request(1, "spend", [pub, 2])
request(1, "spend", [pub, 3])
request(1, "spend", [pub, 1])
request(1, "spend", [pub, 1])
request(1, "sync", [[127,0,0,1], 3020], 0.1)
request(1, "mine_block", [1,100000], 0.3)
request(1, "sync", [[127,0,0,1], 3020])
if __name__ == "__main__":
spend_test()
| [
"zack.bitcoin@gmail.com"
] | zack.bitcoin@gmail.com |
29954117c6931a528ca8feec50c7348da1488d7b | 001f168a412cd1c80c5e0fece0775a41f41ad365 | /project4/server/amqp_receive.py | b7fe88fb7f8e97773afcfb0b41310167dc04afd4 | [] | no_license | aakashpk/ecen5053-002_eid | ea9d5b78c5af9c0887097a4b72d9c0be00008262 | c13ea5512eb35176ed558b23649f22bcc5ee2440 | refs/heads/master | 2020-04-29T09:48:12.508826 | 2019-03-17T00:55:51 | 2019-03-17T00:55:51 | 176,038,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | #!/usr/bin/env python
import pika
credentials = pika.PlainCredentials('project4', 'abcd')
connection = pika.BlockingConnection(pika.ConnectionParameters('192.168.43.189',credentials=credentials,blocked_connection_timeout=600))
#192.168.43.189
channel = connection.channel()
channel.queue_declare(queue='transmit')
channel.queue_declare(queue='receive')
def callback(ch, method, properties, body):
#print(" [x] Received %r" % body)
print("Recieved :",len(body))
channel.basic_publish(exchange='',
routing_key='receive',
body=body)
print("Sent back :",len(body))
channel.basic_consume(callback,
queue='transmit',
no_ack=True)
print(' [*] Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
| [
"Aakash.Kumar@colorado.edu"
] | Aakash.Kumar@colorado.edu |
c5c02b528246d16171faa687ec9e7fb3d4df0a74 | 039ba9dba0f131496a959338c32e811904f00708 | /mycrm/shopcrm/shopcrm/settings.py | bd18f1c8d64d8efad0647ac52c6bf4b7e4fd5f5d | [] | no_license | mageshrocky/python_django_projects | 49e12aff8fe08704c2d17b60d5373f0990120336 | 444c30780632ceea5c6b7377356ed2c3c5ce6253 | refs/heads/master | 2023-05-18T11:57:01.691868 | 2021-06-15T11:18:26 | 2021-06-15T11:18:26 | 377,125,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,265 | py | """
Django settings for shopcrm project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kqgrn!^+k=hkp)_mxpm+9_)0w=k)b@lsjyibe$qsog*$^3%hs7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp.apps.MyappConfig',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shopcrm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shopcrm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images') | [
"magesh1699@gmail.com"
] | magesh1699@gmail.com |
d3d599ac0a078b0b7694041e0631328a31cf3277 | a6632d728726f8c3e07f9e8c7dedb7da2d772165 | /Youtube_cloneAPI/youtubecloneAPI/wsgi.py | ea7af36541305aeea598d3d2d181055458d91ce2 | [] | no_license | asnchz/Youtube_Clone_API | a9372c937f3e528ef742cacb44f6098ebe7e0530 | 2abe0b32902fe00176dd0e7beae26fff3356d4c3 | refs/heads/main | 2023-08-22T20:25:49.458191 | 2021-10-14T21:34:29 | 2021-10-14T21:34:29 | 416,890,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for youtubecloneAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'youtubecloneAPI.settings')
application = get_wsgi_application()
| [
"asnchz27@yahoo.com"
] | asnchz27@yahoo.com |
b30b589f5979dd5defb995c813fba4cbb053a9f6 | d314f5f27dea761b5e25a4a34cd84dccfc8d3ff5 | /Elevator.py | 879dce25295bef71f10003f32d9f43de0baa77d9 | [] | no_license | daniiloleshchuk/Python-Lab10 | 7fd87418862480582f23eb684679b1f5354ae152 | 4db34442dacd72e3c1fc4959eab259a43ebb2121 | refs/heads/master | 2022-07-20T06:22:12.288445 | 2020-05-21T19:12:14 | 2020-05-21T19:12:14 | 254,920,356 | 0 | 0 | null | 2020-04-22T10:06:20 | 2020-04-11T17:33:03 | null | UTF-8 | Python | false | false | 1,588 | py | class Elevator:
# static field
staticVar = "This is static variable"
# constructor
def __init__(self, producer_name=None, carrying_capacity_in_kg=None,
engine_power_consumption_in_watts=None, height_in_sm=None, width_in_sm=None, length_in_sm=None):
self.producer_name = producer_name
self.carrying_capacity_in_kg = carrying_capacity_in_kg
self.engine_power_consumption_in_watts = engine_power_consumption_in_watts
self.height_in_sm = height_in_sm
self.width_in_sm = width_in_sm
self.length_in_sm = length_in_sm
# destructor
def __del__(self):
return
def __str__(self):
return "Producer name is: " + str(self.producer_name) + "\n" + \
"Carrying capacity in kg is: " + str(self.carrying_capacity_in_kg) + "\n" + \
"Engine power consumption in watts is: " + str(self.engine_power_consumption_in_watts) + "\n" + \
"Height in sm is: " + str(self.height_in_sm) + "\n" + \
"Width in sm is: " + str(self.width_in_sm) + "\n" + \
"Length in sm is: " + str(self.length_in_sm) + "\n"
@staticmethod
def static_method():
return Elevator.staticVar
@staticmethod
def main():
print()
elevator_1 = Elevator()
elevator_2 = Elevator("prod name 2", 2, 3)
elevator_3 = Elevator("prod name 3", 3, 4, 5, 6, 7)
print(elevator_1.__str__())
print(elevator_2.__str__())
print(elevator_3.__str__())
if __name__ == '__main__':
Elevator.main()
| [
"danieloleshuk@gmail.com"
] | danieloleshuk@gmail.com |
93fb80555ba83304ee0774e8a8d306de3231038c | d8edd97f8f8dea3f9f02da6c40d331682bb43113 | /networks439.py | 52a3ed76800ad9bf46c5c6733e530a3fa5cc21d3 | [] | no_license | mdubouch/noise-gan | bdd5b2fff3aff70d5f464150443d51c2192eeafd | 639859ec4a2aa809d17eb6998a5a7d217559888a | refs/heads/master | 2023-07-15T09:37:57.631656 | 2021-08-27T11:02:45 | 2021-08-27T11:02:45 | 284,072,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,889 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
__version__ = 205
# Number of wires in the CDC
n_wires = 3606
# Number of continuous features (E, t, dca)
n_features = 3
class Gen(nn.Module):
def __init__(self, ngf, latent_dims, seq_len, encoded_dim):
super().__init__()
self.ngf = ngf
self.seq_len = seq_len
self.version = __version__
# Input: (B, latent_dims, 1)
self.act = nn.ReLU()
self.lin0 = nn.Linear(latent_dims, seq_len//64*8192, bias=True)
class GBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convp = nn.ConvTranspose1d(in_channels, out_channels, 1, 1, 0)
self.convu = nn.ConvTranspose1d(in_channels, out_channels, 4, 2, 1)
self.conv1 = nn.ConvTranspose1d(out_channels, out_channels, 3, 1, 1)
self.bnu = nn.BatchNorm1d(out_channels)
self.bn1 = nn.BatchNorm1d(out_channels)
self.act = nn.ReLU()
def forward(self, x):
y0 = F.interpolate(self.convp(x), scale_factor=2, mode='nearest')
y = self.act(self.bnu(self.convu(x)))
y = self.act(y0 + self.bn1(self.conv1(y)))
return y
self.conv1 = nn.ConvTranspose1d(8192, 6144, 4, 4, 0)
self.conv2 = nn.ConvTranspose1d(6144, 4096, 4, 4, 0)
self.conv3 = nn.ConvTranspose1d(4096, n_wires, 4, 4, 0)
self.bn1 = nn.BatchNorm1d(6144)
self.bn2 = nn.BatchNorm1d(4096)
self.bn3 = nn.InstanceNorm1d(n_wires)
self.convw1 = nn.ConvTranspose1d(n_wires, n_wires, 1, 1, 0)
#self.bnp0 = nn.BatchNorm1d(n_wires)
self.convxp = nn.ConvTranspose1d(n_wires, 256, 1, 1, 0)
self.bnp1 = nn.InstanceNorm1d(256)
self.convp2 = nn.ConvTranspose1d(256, 64, 1, 1, 0)
self.bnp2 = nn.InstanceNorm1d(64)
self.convp3 = nn.ConvTranspose1d(64, n_features, 1, 1, 0)
self.out = nn.Tanh()
def forward(self, z, wire_to_xy):
# z: random point in latent space
x = self.act(self.lin0(z).view(-1, 8192, self.seq_len // 64))
x = self.act(self.bn1(self.conv1(x)))
x = self.act(self.bn2(self.conv2(x)))
x = self.act(self.bn3(self.conv3(x)))
w = self.convw1(x)
wg = F.gumbel_softmax(w, dim=1, hard=True, tau=2/3)
xy = torch.tensordot(wg, wire_to_xy, dims=[[1],[1]]).permute(0,2,1)
p = self.act(self.bnp1(self.convxp(x)))
p = self.act(self.bnp2(self.convp2(p)))
p = self.convp3(p)
return torch.cat([self.out(p), xy], dim=1), wg
class Disc(nn.Module):
def __init__(self, ndf, seq_len, encoded_dim):
super().__init__()
self.version = __version__
# (B, n_features, 256)
self.act = nn.LeakyReLU(0.2)
class DBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convd = nn.Conv1d(in_channels, out_channels, 3, 2, 1)
self.act = nn.LeakyReLU(0.2)
def forward(self, x):
y = self.act(self.convd(x))
return y
self.convpxy = nn.Conv1d(n_features+2, 64, 1, 1, 0)
self.db1 = DBlock(64, 128)
self.db2 = DBlock(128, 256)
#self.conv2 = nn.Conv1d(256, 512, 3, 2, 1)
#self.conv3 = nn.Conv1d(512, 1024, 3, 2, 1)
#self.conv4 = nn.Conv1d(1024, 2048, 3, 2, 1)
#self.lin0 = nn.Linear(256 * seq_len // 1, 1, bias=True)
self.lin0 = nn.Linear(seq_len//4*256, 1)
self.out = nn.Identity()
def forward(self, x_):
# x_ is concatenated tensor of p_ and w_, shape (batch, features+n_wires, seq_len)
# p_ shape is (batch, features, seq_len),
# w_ is AE-encoded wire (batch, encoded_dim, seq_len)
seq_len = x_.shape[2]
x = x_
#dist = ((xy - nn.ConstantPad1d((1, 0), 0.0)(xy[:,:,:-1]))**2).sum(dim=1).unsqueeze(1)
p = x[:,:n_features]
w = x[:,n_features:n_features+2]
wg = x[:,n_features+2:]
pxy = x[:,:n_features+2]
#x = torch.cat([p, w], dim=1)
#x = self.act(self.conv0(pxy))
p = self.convpxy(x[:,:n_features+2])
#x = torch.cat([xy, xwg], dim=1)
x = p
x = self.db1(x)
x = self.db2(x)
x = self.lin0(x.flatten(1,2))
return self.out(x)#.squeeze(1)
class VAE(nn.Module):
def __init__(self, encoded_dim):
super().__init__()
class Enc(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.LeakyReLU(0.2)
self.lin1 = nn.Linear(n_wires, hidden_size)
self.lin2 = nn.Linear(hidden_size, encoded_dim)
self.out = nn.Tanh()
def forward(self, x):
x = self.act(self.lin1(x))
return self.out(self.lin2(x))
class Dec(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.act = nn.ReLU()
self.lin1 = nn.Linear(encoded_dim, hidden_size)
self.lin2 = nn.Linear(hidden_size, n_wires)
def forward(self, x):
x = self.act(self.lin1(x))
return self.lin2(x)
self.enc_net = Enc(512)
self.dec_net = Dec(512)
def enc(self, x):
return self.enc_net(x.permute(0, 2, 1)).permute(0,2,1)
def dec(self, x):
return self.dec_net(x.permute(0, 2, 1)).permute(0,2,1)
def forward(self, x):
y = self.dec_net(self.enc_net(x))
return y
def get_n_params(model):
return sum(p.reshape(-1).shape[0] for p in model.parameters())
| [
"m.dubouchet18@imperial.ac.uk"
] | m.dubouchet18@imperial.ac.uk |
0d4ab8da4f25648c8d0d15a9733749624e19c72d | e4c38f01c1d564156185b87235751fe5d59ca7c6 | /4. Trees and Graphs/routeBetweenNodes.py | 4afbd6245a05cbb2636317784d2a1d0e271f779a | [] | no_license | tommydo89/CTCI | 07c6efc98bde3a421c07607824351ea7d67a1723 | d03b5d2ccfd3fc24dc46d1f799881114079f50b3 | refs/heads/master | 2020-04-27T07:00:46.224438 | 2019-12-18T10:42:06 | 2019-12-18T10:42:06 | 174,124,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | # Given a directed graph, design an algorithm to find out whether there is a
# route between two nodes.
class Node:
def __init__(self, val):
self.val = val
self.adjacent = []
self.visited = False
class Stack:
def __init__(self):
self.items = []
def push(self, val):
self.items.append(val)
def pop(self):
if self.isEmpty():
return False
return self.items.pop()
def isEmpty(self):
return len(self.items) == 0
def peek(self):
if self.isEmpty():
return False
last_index = len(self.items) - 1
return self.items[last_index]
class MyQueue:
def __init__ (self):
self.queue = Stack()
self.helper_stack = Stack()
def enqueue(self, val):
while (not self.queue.isEmpty()):
self.helper_stack.push(self.queue.pop())
self.helper_stack.push(val)
while (not self.helper_stack.isEmpty()):
self.queue.push(self.helper_stack.pop())
def dequeue(self):
if self.isEmpty():
return False
return self.queue.pop()
def peek(self):
return self.queue.peek()
def isEmpty(self):
return self.queue.isEmpty()
# Directed Graph
# A -> B -> C
NodeA = Node('A')
NodeB = Node('B')
NodeC = Node('C')
NodeA.adjacent.append(NodeB)
NodeB.adjacent.append(NodeC)
def routeBetweenNodes(Node1, Node2):
Node1toNode2 = routeExists(Node1, Node2)
Node2toNode1 = routeExists(Node2, Node1)
if (Node1toNode2 or Node2toNode1):
return True
return False
def routeExists(start, end):
queue = MyQueue()
queue.enqueue(start)
while (not queue.isEmpty()):
poppedNode = queue.dequeue()
poppedNode.visited = True
if (poppedNode == end):
return True
for node in poppedNode.adjacent:
if (node.visited == False):
queue.enqueue(node)
return False
| [
"tommy.td89@gmail.com"
] | tommy.td89@gmail.com |
4bbd4e1900beb6ff27f7d3b88e7c7d6ca27bfe17 | a865208496172a8e6eb9f7ddcc52f3a3052718ea | /website/music/views.py | 8f5b531a9af0b7b0ece52b3984edcd3e8bac3005 | [] | no_license | Vishal026/python-django-project | 74f8d6e84a81c172a9644fe4c42005389d63694c | 11f0c4e9c93e63b45eb1058e7d47f8a6cb8f5096 | refs/heads/master | 2021-01-22T13:23:48.403429 | 2017-09-04T13:37:29 | 2017-09-04T13:37:29 | 102,365,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,768 | py | from django.views.generic.edit import CreateView,UpdateView,DeleteView
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render,redirect
from django.contrib.auth import authenticate,login
from django.views import generic
from django.views.generic import View
from .models import Album
from .forms import UserForm
class IndexView(generic.ListView):
template_name='music/index.html'
def get_queryset(self):
return Album.objects.all()
class DetailView(generic.DetailView):
model=Album
template_name = 'music/detail.html'
class AlbumCreate(CreateView):
model=Album
fields=['artist','album_title','genre','album_logo']
class AlbumUpdate(UpdateView):
model=Album
fields=['artist','album_title','genre','album_logo']
class AlbumDelete(DeleteView):
model=Album
success_url = reverse_lazy('music:index')
class UserFormView(View):
form_class=UserForm
template_name='music/registration_form.html'
def get(self,request):
form=self.form_class(None)
return render(request,self.template_name,{'form':form})
def post(self, request):
form = self.form_class(request.POST)
if form.is_valid():
user=form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.set_password(password)
user.save()
#return user object credentials
user= authenticate(username=username,password=password)
if user is not None:
if user.is_active:
login(request,user)
return redirect('music:index')
return render(request,self.template_name,{'form':form})
| [
"31509721+Vishal026@users.noreply.github.com"
] | 31509721+Vishal026@users.noreply.github.com |
05369d52316f17c3049325e4676bd2ef58c11da5 | dcf59838030d329733ecb166be831cf3c73991c9 | /fatorial2.py | 5c6a2a16db9a780d23935e1475002ce41dc32762 | [] | no_license | ender8086/FIAP | 14416514ebe54f37dc3af6135125548f3c36b60e | a0691b5efc68dc0633b5d79a3d51a17439e0e25d | refs/heads/master | 2022-10-16T21:40:06.342986 | 2020-06-14T19:25:59 | 2020-06-14T19:25:59 | 272,268,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | print("Pesquisa sequencial")
n = int(input("Digite n: "))
l = list(range(2, n+1, 2)) #função list + tipo range | [
"noreply@github.com"
] | ender8086.noreply@github.com |
4aa58e954b0b4010715814b0abc520a20475a2a0 | abcbcf29319774e80ca2d844c204f1f91da8b443 | /tests/unit/sagemaker/feature_store/test_inputs.py | 836ed5187e0b8de1b3081c2271995ea54e98024e | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | giuseppeporcelli/sagemaker-python-sdk | e56f5c60d88d4fe2e3589f1994bbc0c402d35a58 | 86143245227139b6883ee72a5fa396b48049cb3c | refs/heads/master | 2023-02-04T08:36:05.234799 | 2020-12-22T17:39:37 | 2020-12-22T17:39:37 | 290,551,159 | 0 | 0 | Apache-2.0 | 2020-08-26T16:40:19 | 2020-08-26T16:40:18 | null | UTF-8 | Python | false | false | 2,627 | py | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from sagemaker.feature_store.inputs import (
OnlineStoreSecurityConfig,
OnlineStoreConfig,
S3StorageConfig,
DataCatalogConfig,
OfflineStoreConfig,
)
def ordered(obj):
"""Helper function for dict comparison"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def test_online_store_security_config():
config = OnlineStoreSecurityConfig(kms_key_id="kms")
assert ordered(config.to_dict()) == ordered({"KmsKeyId": "kms"})
def test_online_store_config():
config = OnlineStoreConfig(enable_online_store=True)
assert ordered(config.to_dict()) == ordered({"EnableOnlineStore": True})
config_with_kms = OnlineStoreConfig(
enable_online_store=True,
online_store_security_config=OnlineStoreSecurityConfig(kms_key_id="kms"),
)
assert ordered(config_with_kms.to_dict()) == ordered(
{
"EnableOnlineStore": True,
"OnlineStoreSecurityConfig": {
"KmsKeyId": "kms",
},
}
)
def test_s3_store_config():
config = S3StorageConfig(s3_uri="uri", kms_key_id="kms")
assert ordered(config.to_dict()) == ordered({"S3Uri": "uri", "KmsKeyId": "kms"})
def test_data_catalog_config():
config = DataCatalogConfig(
table_name="table",
catalog="catalog",
database="database",
)
assert ordered(config.to_dict()) == ordered(
{
"TableName": "table",
"Catalog": "catalog",
"Database": "database",
}
)
def test_offline_data_store_config():
config = OfflineStoreConfig(s3_storage_config=S3StorageConfig(s3_uri="uri"))
assert ordered(config.to_dict()) == ordered(
{
"S3StorageConfig": {"S3Uri": "uri"},
"DisableGlueTableCreation": False,
}
)
| [
"choidan@amazon.com"
] | choidan@amazon.com |
5df3c7499dbe7f360e3bcf3df2fe00dbd9c8f6ce | 4277bbbe447b4b60d9ce8e3725cad2218058bdd0 | /home/forms.py | c746797eedfb6cbfa7747fd1d35f24c57c2065fd | [] | no_license | pritpalxyz/carrer_lama | 71ae11c80f3f6457f7f6e6758dcdd8aa962b3103 | 5b95f59a91f9cac9f1d6142f799da7287a220626 | refs/heads/master | 2023-05-26T00:38:51.395811 | 2017-10-22T20:36:35 | 2017-10-22T20:36:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | from django import forms
from .models import *
class addContactedForm(forms.ModelForm):
name = forms.CharField(label='Name', max_length=30, widget=forms.TextInput(attrs={'required':'required','name':'name','class':'form-control'}))
subject = forms.CharField(label='Subject', max_length=30, widget=forms.TextInput(attrs={'required':'required','name':'subject','class':'form-control'}))
email = forms.EmailField(label='Email', max_length=30, widget=forms.TextInput(attrs={'required':'required','name':'email','class':'form-control'}))
comments = forms.CharField(label='Description', widget=forms.Textarea(attrs={'required':'required','name':'email','class':'form-control'}))
class Meta:
model = contacted
fields = ['name','email','subject','comments']
| [
"pritsingh0117@gmail.com"
] | pritsingh0117@gmail.com |
da64cffd7313594a39968f872c075638d777d22e | 9811940577f6ffdda540c9735904e7aa30be3f62 | /cython_tutorials/test.py | a623d3d9ed8b6efe2991df006d03091b1b4e96f0 | [] | no_license | YellowPig-zp/python-projects | 300cb0b7862cf2e67a86540daf21030ed05a776b | 11715367bcd937b502d19a11b83b64bab09902e4 | refs/heads/master | 2018-09-17T17:35:18.605860 | 2018-06-05T21:02:16 | 2018-06-05T21:02:16 | 125,166,870 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | import timeit
cy = timeit.timeit('example_cy.test(5000)', setup='import example_cy', number=10000)
py = timeit.timeit('example_py.test(5000)', setup='import example_py', number=10000)
print(cy, py)
print('Cython is {}x faster'.format(py/cy))
| [
"zipeng.qin.1997@outlook.com"
] | zipeng.qin.1997@outlook.com |
0fbcb4dace7a1a4154e9787babcc6e51e2fb5d94 | 46769b03aa33875bf4c7b1d4a2c51635a397cdfc | /new_test_22_dev_10089/wsgi.py | 71a95a093114e20c8b2cd6e41198ec4cfed9c106 | [] | no_license | crowdbotics-apps/new-test-22-dev-10089 | 89b7e69c0dc0144f507ad6a0b84f06386d9a4e1c | ff18ba29e44581c139829607c663d731730b7bd9 | refs/heads/master | 2022-12-14T20:30:56.738780 | 2020-09-07T13:15:20 | 2020-09-07T13:15:20 | 293,532,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | """
WSGI config for new_test_22_dev_10089 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'new_test_22_dev_10089.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
1cd44967f3be59b5da4887abb18f70c55b13da1d | 79c1f1e1f9a123a146a314b9e4cd7f57c2301ed3 | /visualize/CSVVsersion/SceneCSV.py | 1fa2468f95df5ef35c99938e975a8dbef066891a | [
"MIT"
] | permissive | ys1998/motion-forecast | 41a1ebc553edddcf8c463d67237f53c23aa53729 | ef8fa9d597906a756f28952a731f6bc8d178f2bf | refs/heads/master | 2020-04-17T16:41:20.276427 | 2019-07-07T06:19:30 | 2019-07-07T06:19:30 | 166,751,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | import vtk
class SceneCSV(object):
def __init__(self):
self.sceneSources = list()
self.sceneMappers = list()
self.sceneActors = list()
self.sceneLights = list()
self.addLight(1.0, 1.0, 1.0, 1000, 1000, -1000, 0.75, 180, 0.75)
self.addLight(1.0, 1.0, 1.0, -1000, 500, 1000, 0.5, 180, 0.0)
self.addLight(1.0, 1.0, 1.0, -1000, 500,- 1000, 0.5, 180, 0.0)
def addLight(self, cR, cG, cB, pX, pY, pZ, Intensity, ConeAngle, Attenuation):
self.sceneLights.append(vtk.vtkLight())
self.sceneLights[-1].SetColor(cR, cG, cB)
self.sceneLights[-1].SetPosition(pX, pY, pZ)
self.sceneLights[-1].SetIntensity(Intensity)
self.sceneLights[-1].SetConeAngle(ConeAngle)
self.sceneLights[-1].SetShadowAttenuation(Attenuation)
self.sceneLights[-1].SetLightTypeToSceneLight()
| [
"yashshah2398@gmail.com"
] | yashshah2398@gmail.com |
50bb72f6d2260ed83164526fc0355bfe4a41dc23 | 6cb9a8da836352f522d47946dc12afb5f9ac758a | /benchmark/parsers/batch-parser.py | 6240720aecbc026bb9c61e131fde087662e004ac | [] | no_license | animeshbaranawal/giraphuc | a70d1f1839e96302ec7ce13247299ba780c6b297 | 32adbc8a9bc0d73ccc037996b79a45c1f9a42e69 | refs/heads/master | 2021-12-08T05:48:45.499526 | 2016-02-22T00:22:51 | 2016-02-22T00:22:51 | 395,278,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,319 | py | #!/usr/bin/env python
"""Batch parser that extracts and prints out results for given log files."""
import os, sys, glob
import argparse, itertools
# do some parallel computing
#from joblib import Parallel, delayed
###############
# Constants
###############
BYTE_PER_GB = 1024*1024*1024.0
KB_PER_GB = 1024*1024.0
MS_PER_SEC = 1000.0
ALG_PR = 'pagerank'
ALG_DELTAPR = 'deltapr'
ALG_PREMIZAN = 'premizan'
SYSTEMS = ('giraph', 'gps', 'mizan', 'graphlab')
SYS_GIRAPH, SYS_GPS, SYS_MIZAN, SYS_GRAPHLAB = SYSTEMS
###############
# Parse args
###############
def check_system(system):
try:
s = int(system)
if (s < 0) or (s >= len(SYSTEMS)):
raise argparse.ArgumentTypeError('Invalid system')
return s
except:
raise argparse.ArgumentTypeError('Invalid system')
def check_cores(cores):
try:
c = int(cores)
if c < 1:
raise argparse.ArgumentTypeError('Invalid core count')
return c
except:
raise argparse.ArgumentTypeError('Invalid core count')
parser = argparse.ArgumentParser(description='Outputs experimental data for specified log files.')
parser.add_argument('system', type=check_system,
help='system: 0 for Giraph, 1 for GPS, 2 for Mizan, 3 for GraphLab (invalid system will result in invalid time values)')
parser.add_argument('log', type=str, nargs='+',
help='an experiment\'s time log file, can be a regular expression (e.g. pagerank_orkut-adj.txt_16_0_20140101-123050_time.txt or page*or*_0_*time.txt)')
parser.add_argument('--master', action='store_true', default=False,
help='get mem/net statistics for the master rather than the worker machines')
#parser.add_argument('--cores', type=check_cores, dest='n_cores', default=4,
# help='number of cores to use (> 0), default=4')
system = SYSTEMS[parser.parse_args().system]
logs_re = parser.parse_args().log
do_master = parser.parse_args().master
#n_cores = parser.parse_args().n_cores
logs = [f for re in logs_re for f in glob.glob(re)]
###############
# Main parsers
###############
def time_parser(log_prefix, system, alg):
"""Parses running and IO times for a single run.
Arguments:
log_prefix -- the prefix of one experiment run's log files (str)
system -- the system tested (str)
alg -- the algorithm tested (str)
Returns:
A tuple (running time, IO time) or (0,0) if logs files are
missing.
"""
log_files = glob.glob(log_prefix + '_time.txt')
if len(log_files) != 1:
return (0,0)
log_file = log_files[0]
io = run = total = 0
if system == SYS_GIRAPH:
io = 0
for line in open(log_file):
if "Setup " in line:
io = io + float(line.split()[5].split('=')[1])
elif "Input superstep " in line:
io = io + float(line.split()[6].split('=')[1])
elif "Vertex input superstep " in line:
# for compatability w/ Giraph 0.1
io = io + float(line.split()[7].split('=')[1])
elif "Shutdown " in line:
io = io + float(line.split()[5].split('=')[1])
# initialize is not included in total time, so add it too
elif "Initialize " in line:
io = io + float(line.split()[5].split('=')[1])
total = total + float(line.split()[5].split('=')[1])
elif "Total (m" in line:
total = total + float(line.split()[5].split('=')[1])
return ((total - io)/(MS_PER_SEC), io/(MS_PER_SEC))
elif system == SYS_GPS:
start = computestart = end = 0
for line in open(log_file):
if "SYSTEM_START_TIME " in line:
start = float(line.split()[1])
elif "START_TIME " in line:
computestart = float(line.split()[1])
elif "-1-LATEST_STATUS_TIMESTAMP " in line:
end = float(line.split()[1])
return ((end - computestart)/(MS_PER_SEC),
(computestart - start)/(MS_PER_SEC))
elif system == SYS_GRAPHLAB:
for line in open(log_file):
if "TOTAL TIME (sec)" in line:
total = float(line.split()[3])
elif "Finished Running engine" in line:
run = float(line.split()[4])
return (run, (total - run))
elif system == SYS_MIZAN:
if alg == ALG_PREMIZAN:
for line in open(log_file):
if "TOTAL TIME (sec)" in line:
io = float(line.split()[3])
return (0.0, io)
else:
for line in open(log_file):
if "TIME: Total Running Time without IO =" in line:
run = float(line.split()[7])
elif "TIME: Total Running Time =" in line:
total = float(line.split()[5])
return (run, (total - run))
def mem_parser(log_prefix, machines):
"""Parses memory usage of a single run.
Arguments:
log_prefix -- the prefix of one experiment run's log files (str)
machines -- number of machines tested (int)
Returns:
A tuple (minimum mem, avg mem, maximum mem), where "mem" corresponds to
the max memory used at each machine (GB), or (0,0,0) if logs are missing.
"""
if do_master:
log_files = glob.glob(log_prefix + '_0_mem.txt')
if len(log_files) != 1:
return (0,0,0)
else:
log_files = [f for f in glob.glob(log_prefix + '_*_mem.txt') if "_0_mem.txt" not in f]
if len(log_files) < machines:
return (0,0,0)
def parse(log):
"""Parses a single log file for mem stats.
Returns: the max memory usage in GB.
"""
# note that this "mems" is the memory usage (per second) of a SINGLE machine
mems = [float(line.split()[2]) for line in open(log).readlines()]
return (max(mems) - min(mems))/KB_PER_GB
# list of each machine's maximum memory usage
mems = [parse(log) for log in log_files]
return (min(mems), sum(mems)/len(mems), max(mems))
def net_parser(log_prefix, machines):
"""Parses network usage of a single run.
Arguments:
log_prefix -- the prefix of one experiment run's log files (str)
machines -- number of machines tested (int)
Returns:
A tuple (eth recv, eth sent), where eth recv/sent is the total network data
received/sent across all worker machines (GB), or (0,0) if logs are missing.
"""
if do_master:
log_files = glob.glob(log_prefix + '_0_nbt.txt')
if len(log_files) != 1:
return (0,0)
else:
log_files = [f for f in glob.glob(log_prefix + '_*_nbt.txt') if "_0_nbt.txt" not in f]
if len(log_files) < machines:
return (0,0)
def parse(log):
"""Parses a single log file for net stats.
Returns: (recv, sent) tuple in GB.
"""
# bash equivalent:
# recv=$((-$(cat "$log" | grep "eth0" | awk '{print $2}' | tr '\n' '+')0))
# sent=$((-$(cat "$log" | grep "eth0" | awk '{print $10}' | tr '\n' '+')0))
recv = 0
sent = 0
for line in open(log).readlines():
# lines appear as initial followed by final, so this does the correct computation
if "eth0" in line:
recv = float(line.split()[1]) - recv
sent = float(line.split()[9]) - sent
return (recv/BYTE_PER_GB, sent/BYTE_PER_GB)
eth = [parse(log) for log in log_files]
eth = zip(*eth)
return (sum(eth[0]), sum(eth[1]))
def l1norm_parser(log_prefix, alg):
"""Parses l1-norm for a single PageRank run, if applicable.
Arguments:
log_prefix -- the prefix of one experiment run's log files (str)
alg -- the algorithm tested (str)
Returns:
The l1-norm as a string, or 'N/A' on error or if not applicable.
"""
if (alg != ALG_PR and alg != ALG_DELTAPR):
return " N/A"
log_files = glob.glob(log_prefix + '_time.txt')
if len(log_files) != 1:
return " N/A"
log_file = log_files[0]
for line in open(log_file):
if "L1-NORM: " in line:
return line.split()[1]
return " N/A"
def check_files(log_prefix, machines):
"""Ensures all log files are present.
Arguments:
log_prefix -- the prefix of one experiment run's log files (str)
machines -- number of machines tested (int)
Returns:
A tuple of a boolean and a string. The booleand is False if there
is a critical missing log, and True otherwise. The string gives the
source of the error, or a warning for missing CPU/net logs.
"""
logname = os.path.basename(log_prefix)
if len(glob.glob(log_prefix + '_time.txt')) == 0:
return (False, "\n ERROR: " + logname + "_time.txt missing!")
stats = ['nbt', 'mem', 'cpu', 'net']
if do_master:
for stat in stats:
if len(glob.glob(log_prefix + '_0_' + stat + '.txt')) == 0:
return (False, "\n ERROR: " + logname + "_0_" + stat + ".txt missing!")
else:
for stat in stats:
# machines+1, as the master has those log files too
if len(glob.glob(log_prefix + '_*_' + stat + '.txt')) < machines+1:
return (False, "\n ERROR: " + logname + "_*_" + stat + ".txt missing!")
return (True, "")
###############
# Output data
###############
def single_iteration(log):
"""Outputs results for one run of an experiment.
Arguments: time log file name.
Returns: results for the run as an output friendly string.
"""
# cut via range, in case somebody decides to put _time.txt in the path
logname = os.path.basename(log)[:-len('_time.txt')]
alg, _, machines = logname.split('_')[:3]
# header string
if (system == SYS_MIZAN) and (alg != ALG_PREMIZAN):
header = logname + " (excludes premizan time)"
elif (system == SYS_GIRAPH) and (len(glob.glob(log)) != 0):
header = logname + " (cancelled job)"
for line in open(log):
if "Job complete: " in line:
header = logname + " (" + line.split()[6] + ")"
break
else:
header = logname
log_prefix = log[:-len('_time.txt')]
is_ok, err_str = check_files(log_prefix, int(machines))
# get l1-norms for PR, if applicable
l1norm = l1norm_parser(log_prefix, alg)
if is_ok:
time_run, time_io = time_parser(log_prefix, system, alg)
mem_min, mem_avg, mem_max = mem_parser(log_prefix, int(machines))
eth_recv, eth_sent = net_parser(log_prefix, int(machines))
stats = (time_run+time_io, time_io, time_run, mem_min, mem_avg, mem_max, eth_recv, eth_sent, l1norm)
separator = "------------+------------+------------+--------------------------------+-----------------------------+------------------------"
return header + err_str + "\n" + separator + "\n %8.2fs | %8.2fs | %8.2fs | %7.3f / %7.3f / %7.3f GB | %9.3f / %9.3f GB | %s \n" % stats + separator
else:
return header + err_str
# no point in doing parallel computation b/c # of logs parsed is usually not very large
#out = Parallel(n_jobs=n_cores)(delayed(single_iteration)(log) for log in logs)
# output results serially
print("")
print("==============================================================================================================================")
print(" Total time | Setup time | Comp. time | Memory usage (min/avg/max) | Total net I/O (recv/sent) | l1-norm (PR) ")
print("============+============+============+================================+=============================+========================")
print("")
for log in logs:
print(single_iteration(log))
print("")
# another friendly reminder of what each thing is...
print("============+============+============+================================+=============================+========================")
print(" Total time | Setup time | Comp. time | Memory usage (min/avg/max) | Total net I/O (recv/sent) | l1-norm (PR) ")
print("==============================================================================================================================")
print("")
| [
"young.han@uwaterloo.ca"
] | young.han@uwaterloo.ca |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.