index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,900 | 560bd79a1e8912b9feb536836226bd1517fae3e2 | from .memcev import Client |
20,901 | a5a59b137a15913808803bf0eadbd315544f33e9 | from ddtrace.internal._queue import TraceQueue
def test_queue_no_limit():
q = TraceQueue()
for i in range(0, 10000):
q.put([i])
items = q.get()
assert len(items) == 10000
for i in range(0, 10000):
assert items[i] == [i]
dropped, accepted, lengths = q.pop_stats()
assert dropped == 0
assert accepted == 10000
assert lengths == 10000
def test_queue_full():
q = TraceQueue(maxsize=3)
q.put([1])
q.put(2)
q.put([3])
q.put([4, 4])
assert len(q) == 3
state = list(q._queue)
assert state == [[1], 2, [4, 4]] or state == [[1], [4, 4], [3]] or state == [[4, 4], 2, [3]]
assert q._dropped == 1
assert q._accepted == 4
assert q._accepted_lengths == 5
dropped, accepted, accepted_lengths = q.pop_stats()
assert dropped == 1
assert accepted == 4
assert accepted_lengths == 5
def test_queue_get():
q = TraceQueue(maxsize=3)
q.put(1)
q.put(2)
assert q.get() == [1, 2]
assert q.get() == []
def test_multiple_queues():
q1 = TraceQueue()
q2 = TraceQueue()
q1.put(1)
q2.put(1)
assert len(q1) == 1
assert len(q2) == 1
q1.get()
assert len(q1) == 0
assert len(q2) == 1
q2.get()
assert len(q1) == 0
assert len(q2) == 0
def test_queue_overflow():
q = TraceQueue(maxsize=1000)
for i in range(10000):
q.put([])
assert len(q) <= 1000
dropped, accepted, accepted_lengths = q.pop_stats()
assert dropped == 9000
assert accepted == 10000
assert accepted_lengths == 0
|
20,902 | 091e5ae504016049aa505f1627f67fa27efa0eb6 | include_rules = [
"+components/keyed_service",
"+components/power_bookmarks",
]
|
20,903 | 8f76e3d7730b755e1854f643d0295e4d3bff9fce | """
@author: Badita Marin-Georgian
@email: geo.badita@gmail.com
@date: 12/11/2017 19:52
"""
from Domain.medicine import Medicine
class MedicineRepository:
'''
Class for the Medicine repository
'''
def __init__(self, file_name):
'''
Initialising the repository
:param file_name: -the name of the file
'''
self.__file_name = file_name
def load_from_file(self):
'''
Function which will load all Medicine from the file
:return: list of medicine, None if te file couldn't be read
'''
try:
f = open(self.__file_name, "r")
except IOError:
#couldn't read the file
return
medicines = []
line = f.readline().strip()
while line != "":
params = line.split(",")
medicine = Medicine(params[0], float(params[1]))
medicines.append(medicine)
line = f.readline().strip()
f.close()
return medicines
def store_to_file(self, med_list):
'''
Function that stores to file a list of medicines
:param med_list:
:return:
'''
with open(self.__file_name, "w") as f:
for med in med_list:
to_write = med.get_name() + ","+ str(med.get_price())
to_write += "\n"
f.write(to_write)
def get_all(self):
'''
Function that gets all medicines as a lsit
:return: The list of medicines stored in the fle
'''
return self.load_from_file()
def find_by_name(self, med_name):
'''
Function that finds all medicines by name
:return: the found medicine if it exists, None otherwise
'''
med_list =[]
all_med = self.load_from_file()
for med in all_med:
if med_name in med.get_name():
med_list.append(med)
if len(med_list) > 0:
return med_list
return None
|
20,904 | c857046ce8a8bd3ef1d3d3ae1e280a7e819abb12 | import logging
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import requests
from requests.exceptions import HTTPError
from lxml import etree
GMAIL_USERNAME = 'user@gmail.com'
GMAIL_SECRET = ''
FROM = GMAIL_USERNAME
TO = 'target@outlook.com'
CHECK_LIST = ['href="#"', 'coming soon']
def init_logging():
logging.basicConfig(level=logging.DEBUG)
def get_raw_html(url):
logging.info('Getting {0}...'.format(url))
r = requests.get(url=url)
if r.status_code != 200:
raise HTTPError('Request to target URL failed.\n'
+ str(r.text))
return r.content
def get_smtp_client():
return smtplib.SMTP_SSL(host='smtp.gmail.com', port=465)
def clean_raw_html(html):
return (html.replace('\r\n', '')
.replace('\n', '')
.replace('\t', '')
.replace(' ', ''))
def check_ticket_status(raw_html):
for e in etree.HTML(raw_html).cssselect('.fee-aside'):
# Parse and clean up the html string
source = clean_raw_html(etree.tostring(e).decode(encoding='utf-8').lower())
logging.info(source)
if not all(string in source for string in CHECK_LIST):
raise Exception('Ticket sales started!')
if __name__ == '__main__':
url = 'http://www.anime-expo.org/activity/concerts/'
init_logging()
smtp_client = get_smtp_client()
smtp_client.login(GMAIL_USERNAME, GMAIL_SECRET)
try:
raw_html = get_raw_html(url)
check_ticket_status(raw_html)
except Exception as e:
logging.exception(e)
msg = MIMEMultipart('alternative')
msg['Subject'] = "Ticket Monitor Alert"
msg['From'] = FROM
msg['To'] = TO
body = MIMEText(str(e))
msg.attach(body)
smtp_client.sendmail(from_addr=FROM, to_addrs=TO, msg=msg.as_string())
|
20,905 | 83c299aa74d66929795dd9895e3fb77e27c4de4f |
##################################
## Michael Hamilton
## michael.l.hamilton@wsu.edu
## #cougballoon
## v1.0 Mar 1, 2015
## v1.1 Mar 13, 2015 - added JSON
## v1.2 Apr 5, 2015 - finalized graphs
## v1.3 Apr 6, 2015 - repaired value errors
##################################
#Axis titles and legends have been created, verify they remain upon running.
#Will return previous value if there is an error with the reading, and
#will output the error to the serial monitor below
#add heading back to graph
import re
import json
import plotly
plotly.__version__
import plotly.plotly as py
import plotly.tools as tls
from plotly.graph_objs import *
import numpy as np
#https://plot.ly/python/streaming-line-tutorial/
import serial
import io
import os
#Set initial values? Why? Didn't work without it....
RMClongitude = 0
RMClatitude = 0
GGAaltitude = 0
RMCspeed = 0
RMCheading = 0
RMCday = 0
RMCmonth = 0
RMCyear = 0
RMChours = 0
RMCminutes = 0
RMCseconds = 0
extTemp = 70.0 #A
intTemp = 0 #C
vidTemp = 40.0 #E
COlevel = 0 #F
CH4level = 0 #G
HackStatus = "000000" #Hack
roll = 0
pitch = 0
heading = 0
pressure = 0
pressureAltitude = 0
temperature10DOF = 0
GGAreceived = False
RMCreceived = False
#Depending on the port we are plugged into
#ser = serial.Serial('/dev/tty.usbmodem1411', 9600)
ser = serial.Serial('/dev/tty.usbmodem1421', 9600)
#Change time to local time zone
def UTCtoPSTDST(hours):
hours = hours.rstrip('\n');
hours = hours.rstrip('\r');
hours = int(hours) + 17
if (hours > 24):
hours = hours - 24
hours = str(hours)
return hours
#Save all incoming data with a current date/time string
def saveData(a):
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
saveAllIncomingData(x)
saveAllIncomingData(a)
#Convert GPS strings to floats (Couldn't get str.isnumeric() to work correctly)
def StringToFloatGPS(a):
a = a.rstrip('\n');
a = a.rstrip('\r');
a = float(a)
return a
#FIX SO IT DOES NOT RETURN A ZERO!!!!!!!!
#COnvert data strings to floats
def StringToFloat(a, b):
#print len(a)
if (len(a) < 4):
print "Incomplete data, returning a zero."
return b
a = a[1:len(a)]
a = a.rstrip('\n');
a = a.rstrip('\r');
if (a == "-"):
print "Only a negative sign in string, returning a zero."
return b
a = float(a)
return a
#Saves all incoming data to a file on the desktop
def saveAllIncomingData(c):
f = open('/Users/michaelhamilton/Desktop/cougballoonData.txt', 'a')
f.write(c)
f.close
#Convert nmea string to .kml file, send to server
def handleGPSdata(nmeaString):
#Commented out lines are for .docs, we are using .txt files instead.
#f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.doc', 'a')
f = open('/Users/michaelhamilton/gpsbabel/nmeaRawData.txt', 'a')
f.write(nmeaString)
f.close()
saveAllIncomingData(nmeaString)
#os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.doc -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && ./gpsbabel -i nmea -f nmeaRawData.txt -o kml,deficon=http://encs.vancouver.wsu.edu/~mikehmbn/balloon-icon-map.png,line_color=FF321E98,floating=1 -F cougballoon.kml")
os.system("cd /Users/michaelhamilton/gpsbabel && scp cougballoon.kml mikehmbn@lx.encs.vancouver.wsu.edu:Sites/")
print "Updated KML file was sent to the server"
return
#Get JSON data and send it to the server
def parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus):
JSONdata2 = { 'cougballoon':[ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD':HackStatus } ] }
data_string2 = json.dumps(JSONdata2)
#Now post it to json_data.json for the map legend
f = open('/Users/michaelhamilton/Desktop/json_data.json', 'w')
f.write(data_string2)
f.close()
os.system("scp /Users/michaelhamilton/Desktop/json_data.json mikehmbn@lx.encs.vancouver.wsu.edu:Sites/")
#Now to handle it for json_data.html
JSONdata = [ { 'Longitude':RMClongitude, 'Latitude':RMClatitude, 'Altitude':GGAaltitude, 'Speed(mph)':RMCspeed, 'Heading':RMCheading, 'Time':{'Day':RMCday, 'Month':RMCmonth, 'Year':RMCyear, 'Hours':RMChours, 'Minutes':RMCminutes, 'Seconds':RMCseconds},'External temperature(deg F)':extTemp, 'Internal temperature(deg F)':intTemp, 'Video Transmitter temperature(deg F)':vidTemp, 'Carbon Monoxide level(ppm)':COlevel, 'Methane level(ppm)':CH4level, 'HackHD camera statuses':HackStatus } ]
data_string = json.dumps(JSONdata)
JSONdataString = str(data_string)
newJSONdata = re.match(r'\[(.*)', JSONdataString)
newJSONdata2 = "," + newJSONdata.group(1)
f = open('/Users/michaelhamilton/Desktop/json_data.html', 'r+')
jumpBack = -1 #jump back 1 spot from the end
f.seek(jumpBack, 2) #2 is the end of the file
last = f.readline() #read the last line
while (last != "]"): #if it's not a ], then keep jumping back
jumpBack = jumpBack - 1 #decrement
if (last == "]"):
f.seek(-1, 2)
f.write(newJSONdata2)
f.close()
#Send it to the server
os.system("cd /Users/michaelhamilton/Desktop && scp json_data.html mikehmbn@lx.encs.vancouver.wsu.edu:Sites/")
print "Updated JSON information was sent to the server."
return
#Parse out the data from an RMC nmea string
def RegExprNMEAdataRMC(line):
#if it's an RMC string....
print line
newRMCline = re.match( r'\$GPRMC,(\d\d)(\d\d)(\d\d).*,\D,(\d+.\d+),\D,(\d+.\d+),\D,(\d+.\d+),(\d+.\d+),(\d\d)(\d\d)(\d\d),.*,.*', line, re.I)
#All data are strings, not integers
if (newRMCline):
global RMChours
RMChours = newRMCline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
RMChours = UTCtoPSTDST(RMChours)
global RMCminutes
RMCminutes = newRMCline.group(2)
global RMCseconds
RMCseconds = newRMCline.group(3)
global RMClatitude
RMClatitude = newRMCline.group(4)
RMClatitude = StringToFloatGPS(RMClatitude)
global RMClongitude
RMClongitude = newRMCline.group(5)
RMClongitude = StringToFloatGPS(RMClongitude)
global RMCspeed
RMCspeed = newRMCline.group(6)
RMCspeed = StringToFloatGPS(RMCspeed)
global RMCheading
RMCheading = newRMCline.group(7)
RMCheading = StringToFloatGPS(RMCheading)
global RMCday
RMCday = newRMCline.group(8)
global RMCmonth
RMCmonth = newRMCline.group(9)
global RMCyear
RMCyear = newRMCline.group(10)
return True
else:
return False
#Parse out the data from an GGA nmea string
def RegExprNMEAdataGGA(line):
#if it's a GGA string....
print line
newGGAline = re.match( r'\$GPGGA,(\d\d)(\d\d)(\d\d).*,(.*..*),\D,(.*..*),\D,\d,\d\d\,\d.\d\d,(\d+.\d),\D.*', line, re.I)
#All data are strings, not integers
if (newGGAline):
global GGAhours
GGAhours = newGGAline.group(1)
#Convert UTC hours to PST(Daylight Savings Time)
GGAhours = UTCtoPSTDST(GGAhours)
global GGAminutes
GGAminutes = newGGAline.group(2)
global GGAseconds
GGAseconds = newGGAline.group(3)
global GGAlatitude
GGAlatitude = newGGAline.group(4)
GGAlatitude = StringToFloatGPS(GGAlatitude)
global GGAlongitude
GGAlongitude = newGGAline.group(5)
GGAlongitude = StringToFloatGPS(GGAlongitude)
global GGAaltitude
GGAaltitude = newGGAline.group(6)
GGAaltitude = StringToFloatGPS(GGAaltitude)
s2.write(dict(x=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), y=GGAaltitude))
return True
else:
return False
#Get my login and keys ready ro send data to plot.ly
stream_ids = tls.get_credentials_file()['stream_ids']
#Set up the plotly streams
stream_id1 = stream_ids[0]#External temperature #A
stream_id2 = stream_ids[1]#GGAaltitude #B
stream_id3 = stream_ids[2]#Internal temperature #C
#stream_id4 = stream_ids[3]#Internal pressure #D
stream_id4 = stream_ids[3]#pressureAltitude #D
#stream_id5 = stream_ids[4]#Videolynx temperature #E
stream_id5 = stream_ids[4]#10DOF temperature #E
stream_id6 = stream_ids[5]#CO level in ppm #F
stream_id7 = stream_ids[6]#CH4 level in ppm #G
stream_id8 = stream_ids[7]#Humidity #J
stream_id9 = stream_ids[8]#Roll #L
stream_id10 = stream_ids[9]#Pitch #P
#stream_id11 = stream_ids[10]#Heading #Q
#stream_id12 = stream_ids[11]#Pressure #T
stream_id13 = stream_ids[12]#PressureAltitude #U
#Graph 1 data, stream names coincide with stream_ids for simplicity
#External temperature #A
stream1 = Stream(
token=stream_id1,
maxpoints=20
)
#GGAaltitude #A
stream2 = Stream(
token=stream_id2,
maxpoints=4
)
#Internal temperature #C
stream3 = Stream(
token=stream_id3,
maxpoints=20
)
#pressureAltitude #C
stream4 = Stream(
token=stream_id4,
maxpoints=20
)
#10DOF temperature #E
stream5 = Stream(
token=stream_id5,
maxpoints=20
)
#Graph 2 data, stream names coincide with stream_ids for simplicity
#CO level in ppm #G
stream6 = Stream(
token=stream_id6,
maxpoints=20
)
#CH4 level in ppm #G
stream7 = Stream(
token=stream_id7,
maxpoints=20
)
#Roll #L
stream9 = Stream(
token=stream_id9,
maxpoints=20
)
#Pitch #P
stream10 = Stream(
token=stream_id10,
maxpoints=20
)
#Heading #Q
#stream11 = Stream(
# token=stream_id11,
# maxpoints=20
#)
#Pressure #T
#stream12 = Stream(
# token=stream_id12,
# maxpoints=20
#)
#PressureAltitude #U
stream13 = Stream(
token=stream_id13,
maxpoints=20
)
#Trace names coincide with stream names
trace1 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream1
)
trace2 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream2
)
trace3 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream3
)
trace4 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream4
)
trace5 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream5
)
trace6 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream6
)
trace7 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream7
)
trace9 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream9
)
trace10 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream10
)
#trace11 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream11
#)
#trace12 = Scatter(
# x=[],
# y=[],
# mode='lines+markers',
# stream=stream12
#)
trace13 = Scatter(
x=[],
y=[],
mode='lines+markers',
stream=stream13
)
#Set up the plotly graphs
data_graph_a = Data([trace1, trace3, trace5])
data_graph_b = Data([trace6, trace7])
data_graph_c = Data([trace9, trace10])
#data_graph_d = Data([trace2, trace4])#Does not work
data_graph_e = Data([trace2, trace4])
layout_a = Layout(title='#cougballoon temperatures')#This is the name on the graph
layout_b = Layout(title='#cougballoon air quality levels')#This is the name on the graph
layout_c = Layout(title='#cougballoon payload pitch and roll data')#This is the name on the graph
#layout_d = Layout(title='#cougballoon altitude')#This is the name on the graph
layout_e = Layout(title='#cougballoon altitude')#This is the name on the graph
fig_a = Figure(data=data_graph_a, layout=layout_a)
fig_b = Figure(data=data_graph_b, layout=layout_b)
fig_c = Figure(data=data_graph_c, layout=layout_c)
#fig_d = Figure(data=data_graph_d, layout=layout_d)
fig_e = Figure(data=data_graph_e, layout=layout_e)
unique_url_a = py.plot(fig_a, filename='cougballoon1', fileopt='extend')#Name above the graph
unique_url_b = py.plot(fig_b, filename='cougballoon2', fileopt='extend')#Name above the graph
unique_url_c = py.plot(fig_c, filename='cougballoon3', fileopt='extend')#Name above the graph
#unique_url_d = py.plot(fig_d, filename='cougballoon4', fileopt='extend')#Name above the graph
unique_url_e = py.plot(fig_e, filename='cougballoon5', fileopt='extend')#Name above the graph
#Print the plotly urls
print unique_url_a
print unique_url_b
print unique_url_c
#print unique_url_d
print unique_url_e
#Get the plotly streams ready
s1 = py.Stream(stream_id1)
s2 = py.Stream(stream_id2)
s3 = py.Stream(stream_id3)
s4 = py.Stream(stream_id4)
s5 = py.Stream(stream_id5)
s6 = py.Stream(stream_id6)
s7 = py.Stream(stream_id7)
s9 = py.Stream(stream_id9)
s10 = py.Stream(stream_id10)
#s11 = py.Stream(stream_id11)
#s12 = py.Stream(stream_id12)
#s13 = py.Stream(stream_id13)
#Open the plotly streams
s1.open()
s2.open()
s3.open()
s4.open()
s5.open()
s6.open()
s7.open()
s9.open()
s10.open()
#s11.open()
#s12.open()
#s13.open()
import datetime
import time
# Delay start of stream by 5 sec (time to switch tabs)
time.sleep(5)
#Clean out the buffers
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
line = ser.readline()
time.sleep(2)
while True:
# Current time on x-axis, values on y-axis
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
line = ser.readline() #properly captures incoming string
#External temperature #A
if ((line.find("A")) == 0):
print "External temperature:"
print line
y = StringToFloat(line, extTemp)
saveData(line)
extTemp = y
print y
s1.write(dict(x=x, y=y))
#External pressure #B
elif ((line.find("B")) == 0):
print "External Pressure:"
print line
y = StringToFloat(line)
saveData(line)
print y
#s2.write(dict(x=x, y=y))
#Internal temperature #C
elif ((line.find("C")) == 0):
print "Internal temperature:"
print line
y = StringToFloat(line, intTemp)
saveData(line)
intTemp = y
print y
s3.write(dict(x=x, y=y))
#Internal pressure #D
#elif ((line.find("D")) == 0):
#print "Internal pressure:"
#print line
#y = StringToFloat(line)
#saveData(line)
#print y
#s4.write(dict(x=x, y=y))
#Videolynx temperature #E
elif ((line.find("E")) == 0):
print "Videolynx temperature:"
print line
y = StringToFloat(line)
saveData(line)
vidTemp = y
print y
#s5.write(dict(x=x, y=y))
#CO level in ppm #F
elif ((line.find("F")) == 0):
print "CO level (in ppm):"
print line
y = StringToFloat(line, COlevel)
saveData(line)
COlevel = y
print y
s6.write(dict(x=x, y=y))
#CH4 level in ppm #G
elif ((line.find("G")) == 0):
print "CH4 level (in ppm):"
print line
y = StringToFloat(line, CH4level)
saveData(line)
CH4level = y
print y
s7.write(dict(x=x, y=y))
#Humidity #J
elif ((line.find("J")) == 0):
print "Humidity:"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("K")) == 0):
print "FOUND A K!"
print line
y = StringToFloat(line)
saveData(line)
print y
#What data do we want here?
elif ((line.find("L")) == 0):
print "Roll:"
print line
y = StringToFloat(line, roll)
saveData(line)
roll = y
print y
s9.write(dict(x=x, y=y))
#HACKHD INFO BELOW
elif ((line.find("Hack")) == 0):
print "HackHD information"
print line
saveData(line)
HackStatus = line
HackStatus = HackStatus[6:13]
HackStatus = HackStatus.rstrip('\n');
HackStatus = HackStatus.rstrip('\r');
print HackStatus
#What data do we want here?
elif ((line.find("P")) == 0):
print "Pitch:"
print line
y = StringToFloat(line, pitch)
saveData(line)
pitch = y
print y
s10.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("Q")) == 0):
print "Heading:"
print line
y = StringToFloat(line, heading)
saveData(line)
heading = y
print y
#s11.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("T")) == 0):
print "Pressure"
print line
y = StringToFloat(line, pressure)
saveData(line)
pressure = y
print y
#What data do we want here?
elif ((line.find("U")) == 0):
print "Altitude(from press/temp):"
print line
y = StringToFloat(line, pressureAltitude)
saveData(line)
pressureAltitude = y
print y
s4.write(dict(x=x, y=y))
#What data do we want here?
elif ((line.find("V")) == 0):
print "Temperature(from 10dof):"
print line
y = StringToFloat(line, temperature10DOF)
saveData(line)
temperature10DOF = y
print y
s5.write(dict(x=x, y=y))
#Take care of the incoming GPS data, send to plotly and post as JSON
elif ((line.find("$")) == 0):
print "Incoming GPS information"
handleGPSdata(line)
if (line.startswith( '$GPGGA' ) == True):
GGAreceived = RegExprNMEAdataGGA(line)
elif (line.startswith( '$GPRMC' ) == True):
RMCreceived = RegExprNMEAdataRMC(line)
#When an RMC and a GGA string have been received, post it!
if ((GGAreceived == True) & (RMCreceived == True)):
parseToJson(RMClongitude, RMClatitude, GGAaltitude, RMCspeed, RMCheading, RMCday, RMCmonth, RMCyear, RMChours, RMCminutes, RMCseconds, extTemp, intTemp, vidTemp, COlevel, CH4level, HackStatus)
GGAreceived = False
RMCreceived = False
#Close the stream when done plotting, but we never really close it...
s.close() |
20,906 | 2cc09d5dc9d879b848fad38ea8028b691332d894 | from Bio import SeqIO
fasta = [ fa for fa in SeqIO.parse('corr.txt', 'fasta') ]
def one(a,b):
la = len(a)
idx = range( len(a) )
a, arev, b, brev = a.seq, a.reverse_complement().seq, b.seq, b.reverse_complement().seq
if sum([ a[i] == b[i] for i in idx]) == la - 1:
return a, b
elif sum([ arev[i] == b[i] for i in idx]) == la - 1:
return arev, b
elif sum([ a[i] == brev[i] for i in idx]) == la - 1:
return a, brev
elif sum([ arev[i] == b[i] for i in idx]) == la - 1:
return arev, brev
else:
return False, False
def corr(a,b):
s = b + "->" + a
print s
# Group same reads into bins
grouped = []
for i in range(len(fasta)):
if i in grouped:
continue
n = 1
g = [i]
for j in range(len(fasta)):
if j in grouped:
continue
a, arev, b, brev = fasta[i].seq, fasta[i].reverse_complement().seq, fasta[j].seq, fasta[j].reverse_complement().seq
if j != i and (a == b or arev == brev or a == brev or arev == b):
g.append(j)
if len(g) > 1:
grouped += g
uncor = [ idx for idx in range(len(fasta)) if idx not in grouped ]
for i in uncor:
for j in grouped:
a, b = one(fasta[j], fasta[i])
if b:
corr(a, b)
break
|
20,907 | 6d5f0994ade2a318793f9cc091e1d7323dc50789 | class Treenode:
def __init__(self,key, value, left=None,
right=None,parent=None):
self.key = key
self.payload=value
self.leftChild = lc
self.rightChild = lc
self.parent = parent
def hasleftChild(self):
return self.leftChild
def hasrightChild(self):
return self.rightChild
def isLeftChild(self):
return self.parent and self.parent.leftChild == self
def isRightChild(self):
return self.parent and self.parent.rightChild == self
def isRoot(self):
return not self.parent
def isLeaf(self):
return not (self.rightChild or self.leftChild)
def hasAnyChildren(self):
return self.leftChild or self.rightChild
def hasBothChildren(self):
return self.rightChild and self.leftChild
def replaceNodeData(self, key, value, lc, rc):
self.key = key
self.payload = value
self.leftChild = lc
self.rightChild = rc
if self.hasleftChild():
self.leftchildparent = self
if self.hasrightChild():
self.leftrightparent = self
|
20,908 | 87008f92640b71d95fab03b6dfe197dc28240bff | #!/bin/python3
import sys
def main():
n = int(input().strip())
arr = [int(arr_temp) for arr_temp in input().strip().split(' ')]
for i in range(n-1,-1,-1):
print(arr[i],end=' ',flush=True)
main()
|
20,909 | 2760f229fb108a9568bba65ad4463706b88d8b3b | from src.dialoguemanager import Follow_Up, DBO_Follow_Up
p = DBO_Follow_Up.get_specific_follow_up_template(61)
print(p.follow_up_template)
follow_up_relations = [[1, "13"], [3, "14"]]
dict_nodes = {'1': 'person', '2': 'eat', '3': 'cake'}
print(dict_nodes)
p.fill_blank_template(follow_up_relations, dict_nodes)
p.get_string_template()
print(p.final_response)
'''
def fill_up_response(move, world, remove_index, text, DATABASE_TYPE):
subject_list = []
for blank_type in move.blanks:
print("CURRENT SUBJECTS: ", subject_list)
subject = None # IDK????
replace_subject_type_name = 0
has_a_specified_concept = ":" in blank_type
if has_a_specified_concept:
split_relation = str(blank_type).split(":")
relation_index = -1
replacement_index = -1
for i in range(0, len(split_relation)):
if split_relation[i] in DATABASE_TYPE.RELATIONS:
relation_index = i
else:
replacement_index = i
usable_concepts = []
txt_relation = split_relation[relation_index]
to_replace = split_relation[replacement_index]
if to_replace in ["setting"]:
if to_replace == "setting":
print("SETTING DECISION:")
if subject is None or subject.inSetting['LOC'] is None:
print("No viable SUBJECT or SUBJECT LOCATION... switching move.")
return None
else:
txt_concept = subject.inSetting['LOC']
else:
txt_concept = to_replace
if relation_index == 0:
usable_concepts = DATABASE_TYPE.get_concept_like(txt_relation, second=txt_concept)
elif relation_index == 1:
usable_concepts = DATABASE_TYPE.get_concept_like(txt_relation, first=txt_concept)
else:
print("ERROR: Index not found.")
#if may laman ang usable_concepts
if len(usable_concepts) > 0 :
concept_string = ""
concept_index = random.randint(0,len(usable_concepts)) #randomize it, get one
if relation_index == 0:
concept_string = usable_concepts[concept_index].first #get the first concept
elif relation_index == 1:
concept_string = usable_concepts[concept_index].second
move.template[move.template.index(to_replace)] = concept_string #from the templates, look for the index of the to_replace
move.blank_dictionary_move[to_replace] = concept_string
elif blank_type in DATABASE_TYPE.RELATIONS:
# CHOOSE THE CONCEPT
decided_concept = ""
decided_node = -1
loop_total = 0
if subject is None:
charas = world.get_top_characters()
objects = world.get_top_objects()
list_choices = charas + objects
while True:
if len(list_choices) > 0:
loop_total += 1
choice_index = random.randint(0, len(list_choices))
decided_item = list_choices[choice_index]
# make sure that the same subject is not used twice in one sentence.
# Very ugly code, need to fix
while decided_item.name in subject_list:
list_choices.pop(choice_index)
if len(list_choices) == 0:
break
choice_index = random.randint(0, len(list_choices))
decided_item = list_choices[choice_index]
if len(list_choices) == 0:
decided_item = None
print("AAAAaAA")
break
subject = decided_item
print(subject.name)
print(subject.type)
if world.continue_suggesting == 1:
subject = world.subject_suggest
print("SUBJECT SUGGEST", subject)
decided_node = NODE_START
if len(subject.type) > 0:
# decided_concept = subject.name[random.randint(0, len(subject.type))]
choice_index = random.randint(0, len(subject.type))
decided_concept = subject.type[choice_index]
print("SUBJECT TYPE: ", decided_concept)
subject_list.append(subject.name) #SUBJECT CELINA, person, human, etc.
replace_subject_type_name = 1
decided_node = NODE_START
else:
if isinstance(decided_item, Object):
decided_concept = decided_item.name
subject = decided_item
subject_list.append(subject) #SUBJECT CELINA
decided_node = NODE_START
print("DC", decided_concept)
print("OBJECT KA BA")
#NEVER ATA DUMAAN DITO SA ELIF, di ko alam para saan ito
elif isinstance(decided_item, Character):
# get... something... relationship??
# TODO: use relationship or something to get a concept
found_attr = DATABASE_TYPE.HAS_PROPERTY
decided_concept = decided_item.name
subject = decided_item
if blank_type == DATABASE_TYPE.HAS_PREREQ or blank_type == DATABASE_TYPE.CAUSES:
found_attr = DATABASE_TYPE.CAPABLE_OF
decided_node = NODE_START
elif blank_type == DATABASE_TYPE.IS_A or blank_type == DATABASE_TYPE.PART_OF or DATABASE_TYPE.USED_FOR:
found_attr = DATABASE_TYPE.IS_A
decided_node = NODE_START
for item in decided_item.attributes:
if item.relation == found_attr and not item.isNegated:
decided_concept = item.name
break
if decided_concept == "":
return None
if decided_node != -1 or loop_total > 10:
break
if blank_type == DATABASE_TYPE.AT_LOCATION:
list_settings_names = []
list_settings_names = world.settings
# use for the subject continuous. It "normally" gets the location
# frog went to forest.
# If not continous suggestion, it would get forest at the decided concept
# if continuous, and subject is frog then disregard this
if world.continue_suggesting == 0 or world.subject_suggest.name in list_settings_names:
settings = world.settings
print("length settings", len(settings))
if len(settings) > 0:
decided_concept = settings[ran.choice(list(settings.keys()))].name
decided_node = NODE_END
else:
return None
#else:
# decided_node = NODE_START
if world.continue_suggesting == 1:
subject = world.subject_suggest
print("SUBJECT SUGGEST", subject)
decided_node = NODE_START
# find
# This part looks for the concept. Example Girl went to mall. So if decided_node is NODE_END.
# It would look for concepts na ang second ay mall
if decided_node == NODE_START:
usable_concepts = DATABASE_TYPE.get_concept_like(blank_type, first=decided_concept)
elif decided_node == NODE_END:
usable_concepts = DATABASE_TYPE.get_concept_like(blank_type, second=decided_concept)
elif decided_node == NODE_EITHER: #Not being used?
usable_concepts = DATABASE_TYPE.get_concept(decided_concept, blank_type)
else:
usable_concepts = []
#If there is none found, change template.
if len(usable_concepts) == 0:
print("LP1:", loop_total)
return None
while len(usable_concepts) == 0:
loop_total += 1
print("LP2:", loop_total)
usable_concepts = DATABASE_TYPE.get_concept_like(blank_type)
if loop_total > 10:
break
print("DECIDED CONCEPT: "+decided_concept)
print("Num usable concept", len(usable_concepts))
#Usable concepts for local is limited to those that are valid. Valid = 1
remove_concept = []
if len(usable_concepts) > 0:
# Also check if the concept was already use here, use loops
concept_index = random.randint(0,len(usable_concepts))
concept = usable_concepts[concept_index]
dbtype_concept_list = get_dbtype_concept_list(DATABASE_TYPE, world)
#Make sure the same concept is not used again for this world.
while concept.id in dbtype_concept_list:
usable_concepts.remove(concept)
if len(usable_concepts) == 0:
return None
concept_index = random.randint(0,len(usable_concepts))
concept = usable_concepts[concept_index]
#print("USABLE CON2", len(usable_concepts))
if replace_subject_type_name == 1:
concept.first = subject.name
move.template[move.template.index("start")] = concept.first
move.template[move.template.index("end")] = concept.second
move.blank_dictionary_move["start"] = concept.first
move.blank_dictionary_move["end"] = concept.second
# No need to swap sa iba, this is the only one because start and end from db
# Get the concept id, this is for adding the score
move.concept_id = concept.id
if DATABASE_TYPE == DBO_Concept:
world.global_concept_list.append(concept.id)
elif DATABASE_TYPE == DBO_Local_Concept:
world.local_concept_list.append(concept.id)
print("USED GLOBAL ASSERTIONS ID: ", world.global_concept_list)
print("USED LOCAL ASSERTIONS ID: ", world.local_concept_list)
else:
print("ERROR: NO USABLE CONCEPTS decided:",decided_concept)
return None
elif blank_type == "Object":
if subject is None:
charas = world.get_top_characters()
objects = world.get_top_objects()
list_choices = charas + objects
if len(list_choices) > 0:
choice_index = random.randint(0, len(list_choices))
subject = list_choices[choice_index]
subject_list.append(subject) #SUBJECT CELINA
else:
return None
if world.continue_suggesting == 1 and move_code == MOVE_SPECIFIC_PUMP:
subject = world.subject_suggest
move.template[move.template.index("object")] = subject.id
move.blank_dictionary_move["object"] = subject.id
elif blank_type == "Item":
if subject is None:
objects = world.get_top_objects()
if len(objects) > 0:
choice_index = random.randint(0, len(objects))
subject = objects[choice_index]
subject_list.append(subject) #SUBJECT CELINA
else:
return None
if world.continue_suggesting == 1 and move_code == MOVE_SPECIFIC_PUMP:
subject = world.subject_suggest
move.template[move.template.index("item")] = subject.id
move.blank_dictionary_move["item"] = subject.id
elif blank_type == "Character":
if subject is None or not isinstance(subject, Character):
charas = world.get_top_characters(5)
if len(charas) > 0:
choice_index = random.randint(0, len(charas))
subject = charas[choice_index]
# Line 668 sa Dialogue Planner
# subject = charas[0]
#add condition here that shows na bawal ang character dito na same sa suggest subject?
else:
return None
else:
chara = subject
#NAG SA_SAME SUBJECT DAHIL DITO????
if world.continue_suggesting == 1 and move_code == MOVE_SPECIFIC_PUMP:
subject = world.subject_suggest
subject_list.append(subject.id) #SUBJECT CELINA
move.template[move.template.index("character")] = subject.id
move.blank_dictionary_move["character"] = subject.id
elif blank_type == "inSetting":
if subject is None:
return None
elif subject.inSetting is None:
return None
else:
move.template[move.template.index("inSetting")] = subject.inSetting['LOC']
move.blank_dictionary_move["inSetting"] = subject.inSetting['LOC']
elif blank_type == "Repeat":
if len(world.event_chain) > 0:
move.template[move.template.index("repeat")]\
= to_sentence_string(world.event_chain[len(world.event_chain)-1])
move.blank_dictionary_move["repeat"]\
= to_sentence_string(world.event_chain[len(world.event_chain)-1])
else:
return None
elif blank_type == "Pronoun":
if subject is None:
move.template[move.template.index("pronoun")] = "it"
move.blank_dictionary_move["pronoun"] = "it"
else:
if isinstance(subject, Object):
move.template[move.template.index("pronoun")] = "they"
move.blank_dictionary_move["pronoun"] = "they"
elif subject.gender == "":
move.template[move.template.index("pronoun")] = "they"
move.blank_dictionary_move["pronoun"] = "they"
elif subject.gender == "M":
move.template[move.template.index("pronoun")] = "he"
move.blank_dictionary_move["pronoun"] = "he"
elif subject.gender == "F":
move.template[move.template.index("pronoun")] = "she"
move.blank_dictionary_move["pronoun"] = "she"
else:
move.template[move.template.index("pronoun")] = subject.name
move.blank_dictionary_move["pronoun"] = subject.name
elif blank_type == "Event":
loop_back = len(world.event_chain)-1
loops = 0
while loop_back >= 0 and loops < 5:
event = world.event_chain[loop_back]
if event.event_type == FRAME_EVENT:
if event.action != "":
if "eventverb" in move.template:
move.template[move.template.index("eventverb")] = event.action
move.blank_dictionary_move["eventverb"] = event.action
if "object" in move.template:
move.template[move.template.index("object")] = get_subject_string(event)
move.blank_dictionary_move["object"] = get_subject_string(event)
loop_back -= 1
loops += 1
if loop_back == -1 or loops >= 5:
return None
return move
''' |
20,910 | 75ae24e333b8b03447264b55ed23813b2d3a4a91 | import string
# in GDB break at check address
# in GDB run source [this script]
# update addresses with actual values
alphabet = string.hexdigits + "}"
flag_length = 34
known = "ACI{"
while len(known) < flag_length:
for letter in alphabet:
guess = known + letter + ("_" * (flag_length - 1 - len(known)))
gdb.execute("r < <(python -c \"print '" + guess + "'\")")
enc_flag = str(gdb.parse_and_eval(hex(0x5555557576d0+len(known))).cast(gdb.lookup_type('char').pointer()).dereference())
data_str = str(gdb.parse_and_eval(hex(0x5555557577d0+len(known))).cast(gdb.lookup_type('char').pointer()).dereference())
if enc_flag == data_str:
known += letter
print(known)
print("Flag: {}".format(known))
|
20,911 | 1ff97f9553a99e8da4342c417ca2f4699f2fa961 | # -*- coding: utf-8 -*-
""" Scraper class for getting malicious files from tech defence portal """
import logging
from urllib.parse import urljoin
import scrapy
from scrapy.loader import ItemLoader
from src.items import MaliciousFileCrawlerItem
from src.spiders.scraper import Scraper
from src.utils.read_config import ConfigReader
logger = logging.getLogger(__name__)
class TekDefenceScraper(Scraper):
"""
Crawler site https://das-malwerk.herokuapp.com/
Getting the malware url from site and send it to storage pipeline
"""
name = 'tekdefence'
def __init__(self, config=None, data=None, *args, **kwargs):
super(TekDefenceScraper, self).__init__(*args, **kwargs)
self.cfg = ConfigReader(config.upper()).read_config()
# self.cfg = config
self.login_url = self.cfg.get('login_url')
self.start_urls = [self.login_url]
self.file_page_url = self.cfg.get("file_page_url")
def start_requests(self):
""" inbuilt start method called by scrapy when initializing crawler. """
try:
for url in self.start_urls:
yield scrapy.Request(url,
callback=self.navigate_to)
except Exception as err:
logger.error(f'TekDefenceScraper : start_requests : {err}')
raise err
def navigate_to(self, response):
try:
yield scrapy.Request(self.file_page_url,
callback=self.download_files)
except Exception as err:
logger.error(f'TekDefenceScraper : navigate_to : {err}')
raise err
def download_files(self, response):
# get download file link
try:
logger.info(f'TekDefenceScraper : parser : {response}')
file_download_link_elements = response.xpath("//h3[@class='title']/a/@href")
loader = ItemLoader(item=MaliciousFileCrawlerItem())
for link_element in file_download_link_elements:
link = link_element.get()
absolute_path = urljoin(response.url, link)
loader.add_value('file_urls', absolute_path)
yield loader.load_item()
except Exception as err:
logger.error(f'TekDefenceScraper : download_files : {err}')
raise err
|
20,912 | 04627317007b56e5b52afd68acb63c63ae8899d8 | class Matrix:
def __init__(self, matrix_list):
self.matrix_list = matrix_list
def __str__(self):
return '\n'.join(['\t'.join(['%d' % i for i in row]) for row in self.matrix_list])
def __add__(self, other):
new_matrix = []
temp_numbers = []
for i in range(len(self.matrix_list)):
for j in range(len(self.matrix_list[i])):
number_sum = self.matrix_list[i][j] + other.matrix_list[i][j]
temp_numbers.append(number_sum)
if len(temp_numbers) == len(self.matrix_list[i]):
new_matrix.append(temp_numbers)
temp_numbers = []
return Matrix(new_matrix)
my_1 = Matrix(matrix_list=[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(f'Матрица №1: \n{my_1}')
my_2 = Matrix(matrix_list=[[2, 3, 4], [5, 6, 7], [8, 9, 10]])
print(f'Матрица №2: \n{my_2}')
print(f'Сумма матриц: \n{my_1 + my_2}')
|
20,913 | 123469671f9a8edae25d1fca4b29a0a5d4cd36b9 | """
Views for ``icekit_events`` app.
"""
# Do not use generic class based views unless there is a really good reason to.
# Functional views are much easier to comprehend and maintain.
import warnings
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.response import TemplateResponse
from . import models
from .utils import permissions
def index(request):
"""
Listing page for event `Occurrence`s.
:param request: Django request object.
:param is_preview: Should the listing page be generated as a preview? This
will allow preview specific actions to be done in the
template such as turning off tracking options or adding
links to the admin.
:return: TemplateResponse
"""
warnings.warn(
"icekit_events.views.index is deprecated and will disappear in a "
"future version. If you need this code, copy it into your project."
, DeprecationWarning
)
occurrences = models.Occurrence.objects.visible()
context = {
'occurrences': occurrences,
}
return TemplateResponse(request, 'icekit_events/index.html', context)
def event(request, slug):
"""
:param request: Django request object.
:param event_id: The `id` associated with the event.
:param is_preview: Should the listing page be generated as a preview? This
will allow preview specific actions to be done in the
template such as turning off tracking options or adding
links to the admin.
:return: TemplateResponse
"""
# If this is a preview make sure the user has appropriate permissions.
event = get_object_or_404(models.EventBase.objects.visible(), slug=slug)
context = RequestContext(request, {
'event': event,
'page': event,
})
# TODO Not sure where the `Event.template` notion comes from, keeping it
# here for now for backwards compatibility
template = getattr(event, 'template', 'icekit_events/event.html')
return TemplateResponse(request, template, context)
def event_type(request, slug):
type = get_object_or_404(models.EventType.objects.all(), slug=slug)
occurrences = models.Occurrence.objects.filter(Q(event__primary_type=type) | Q(event__secondary_types=type)).upcoming().visible()
context = RequestContext(request, {
'type': type,
'occurrences': occurrences,
'page': type,
})
return TemplateResponse(request, "icekit_events/type.html", context
)
def occurrence(request, event_id, occurrence_id):
"""
:param request: Django request object.
:param event_id: The `id` associated with the occurrence's event.
:param occurrence_id: The `id` associated with the occurrence.
:param is_preview: Should the listing page be generated as a preview? This
will allow preview specific actions to be done in the
template such as turning off tracking options or adding
links to the admin.
:return: TemplateResponse
"""
# If this is a preview make sure the user has appropriate permissions.
warnings.warn(
"icekit_events.views.occurrence is deprecated and will disappear in a "
"future version. If you need this code, copy it into your project."
, DeprecationWarning
)
try:
occurrence = models.Occurrence.objects \
.filter(event_id=event_id, id=occurrence_id) \
.visible()[0]
except IndexError:
raise Http404
context = {
'occurrence': occurrence,
}
return TemplateResponse(
request, 'icekit_events/occurrence.html', context)
|
20,914 | 06ba54c47902e35f07cea5b0ce0fa637f1ae1873 | import sys
from test import test_support, list_tests
class ListTest(list_tests.CommonTest):
type2test = list
def test_basic(self):
self.assertEqual(list([]), [])
l0_3 = [0, 1, 2, 3]
l0_3_bis = list(l0_3)
self.assertEqual(l0_3, l0_3_bis)
self.assertTrue(l0_3 is not l0_3_bis)
self.assertEqual(list(()), [])
self.assertEqual(list((0, 1, 2, 3)), [0, 1, 2, 3])
self.assertEqual(list(''), [])
self.assertEqual(list('spam'), ['s', 'p', 'a', 'm'])
if sys.maxsize == 0x7fffffff:
# This test can currently only work on 32-bit machines.
# XXX If/when PySequence_Length() returns a ssize_t, it should be
# XXX re-enabled.
# Verify clearing of bug #556025.
# This assumes that the max data size (sys.maxint) == max
# address size this also assumes that the address size is at
# least 4 bytes with 8 byte addresses, the bug is not well
# tested
#
# Note: This test is expected to SEGV under Cygwin 1.3.12 or
# earlier due to a newlib bug. See the following mailing list
# thread for the details:
# http://sources.redhat.com/ml/newlib/2002/msg00369.html
self.assertRaises(MemoryError, list, xrange(sys.maxint // 2))
# This code used to segfault in Py2.4a3
x = []
x.extend(-y for y in x)
self.assertEqual(x, [])
def test_truth(self):
super(ListTest, self).test_truth()
self.assertTrue(not [])
self.assertTrue([42])
def test_identity(self):
self.assertTrue([] is not [])
def test_len(self):
super(ListTest, self).test_len()
self.assertEqual(len([]), 0)
self.assertEqual(len([0]), 1)
self.assertEqual(len([0, 1, 2]), 3)
def test_overflow(self):
lst = [4, 5, 6, 7]
n = int((sys.maxint*2+2) // len(lst))
def mul(a, b): return a * b
def imul(a, b): a *= b
self.assertRaises((MemoryError, OverflowError), mul, lst, n)
self.assertRaises((MemoryError, OverflowError), imul, lst, n)
def test_main(verbose=None):
test_support.run_unittest(ListTest)
# verify reference counting
import sys
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(ListTest)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
|
20,915 | e7f627af20b1062730ccc537e4b1497817da8703 | import json
#pull return vals where first = location dict, second = street address
#END RESULT OF THIS FILE
#generates a map with:
#key: street address of shelter (so use the other map
# "shelter_name_to_street" in order to get street address)
#Value: dictionary of all distances and durations of trip
# from current (key) shelter to all other shelters
# can get to diff shelters using map above to go from
# name to street address
def populate_shelter_locations():
#uploading json distance files into python usable elems
filename1 = 'PaloAltoHumaneSociety.json'
filename2 = 'PeninsulaHumaneSociety.json'
filename3 = 'PetsinNeedShelter.json'
filename4 = 'SiliconValleyHumaneSociety.json'
filename5 = 'BerkeleyHumane.json'
filename6 = 'SanFranciscoAnimalCareandControl.json'
#map to hold street address of each shelter
shelter_name_to_street = {}
#using filename1 for init
#Palo Alto Humane Society
vector = {}
i = 0
with open(filename1, 'rw') as infile:
locationData1 = json.load(infile)
shelter_name_to_street["Palo Alto Humane Society"] = locationData1["origin_addresses"]
while (i < 5):
vector[locationData1["destination_addresses"][i]] = locationData1["rows"][0]["elements"][i]
i +=1
shelter_location_data = {}
shelter_location_data["Palo Alto Humane Society"] = vector
#Peninsula Humane Society
vector = {}
i = 0
with open(filename2, 'rw') as infile:
locationData1 = json.load(infile)
shelter_name_to_street["Peninsula Humane Society"] = locationData1["origin_addresses"]
while (i < 5):
vector[locationData1["destination_addresses"][i]] = locationData1["rows"][0]["elements"][i]
i +=1
shelter_location_data["Peninsula Humane Society"] = vector
#Pets in Need Shelter
vector = {}
i = 0
with open(filename3, 'rw') as infile:
locationData1 = json.load(infile)
shelter_name_to_street["Pets in Need Shelter"] = locationData1["origin_addresses"]
while (i < 5):
vector[locationData1["destination_addresses"][i]] = locationData1["rows"][0]["elements"][i]
i +=1
shelter_location_data["Pets in Need Shelter"] = vector
#Silicon Valley Humane Society
vector = {}
i = 0
with open(filename4, 'rw') as infile:
locationData1 = json.load(infile)
shelter_name_to_street["Silicon Valley Humane Society"] = locationData1["origin_addresses"]
while (i < 5):
vector[locationData1["destination_addresses"][i]] = locationData1["rows"][0]["elements"][i]
i +=1
shelter_location_data["Silicon Valley Humane Society"] = vector
#Berkeley Humane
vector = {}
i = 0
with open(filename5, 'rw') as infile:
locationData1 = json.load(infile)
shelter_name_to_street["Berkeley Humane"] = locationData1["origin_addresses"]
while (i < 5):
vector[locationData1["destination_addresses"][i]] = locationData1["rows"][0]["elements"][i]
i +=1
shelter_location_data["Berkeley Humane"] = vector
#San Francisco Animal Care and Emergency
vector = {}
i = 0
with open(filename6, 'rw') as infile:
locationData1 = json.load(infile)
shelter_name_to_street["San Francisco Animal Care and Emergency"] = locationData1["origin_addresses"]
while (i < 5):
vector[locationData1["destination_addresses"][i]] = locationData1["rows"][0]["elements"][i]
i +=1
shelter_location_data["San Francisco Animal Care and Emergency"] = vector
return shelter_location_data, shelter_name_to_street |
20,916 | 3685d6c97c709a1d52605b1a9ff9e14b435c4c90 | import pandas as pd
import numpy as np
import string
import nltk
nltk.download('words')
from nltk.corpus import words
DATASET_PATH = "../data/Amharic_Dataset.csv"
JSON_PATH = "../data/data_json.json"
data = pd.read_csv(DATASET_PATH)
data.dropna(inplace = True)
df = data[['category','headline']]
'''remove rows with english words by using nltk corpus'''
Word = list(set(words.words()))
df_final = df[~df['headline'].str.contains('|'.join(Word))]
def remove_punctuations(text):
for punctuation in string.punctuation:
text = text.replace(punctuation, '')
return text
def remove_unwanted(text):
unwanted_chars = [u'\n', u'\t', u'r', u'\xa0', u'â\x80\x93',u"።",u"፤",u"፣",u"‹",u"›","፡"]
for char in unwanted_chars:
text = text.replace(char, '')
for i in range(10):
text=text.replace(str(i),'')
text=text.strip()
return text
def clean(text):
text=remove_punctuations(text)
text=remove_unwanted(text)
return text
df_final['headline'] = df_final['headline'].apply(clean)
'''save as json, remove lines = True if you want the output to be seperated by commas'''
df_final.to_json(JSON_PATH,orient = "records",lines = True)
|
20,917 | a878bfda4b78c5d67b2fca82c53accfc6fe33b70 | import sys
from itertools import izip, takewhile
costs = {}
def cost(d, N):
if (d, N) not in costs:
costs[(d,N)] = d*N - ((d*(d-1)) // 2)
return costs[(d, N)]
def build_map(Ms):
events = {}
for o, e, p in Ms:
origin = events.get(o) or (0, 0)
events[o] = (origin[0] + p, origin[1])
exit = events.get(e) or (0, 0)
events[e] = (exit[0], exit[1] + p)
m = []
start = 0
total = 0
events = events.items()
events.sort()
for s, (enter, leave) in events:
if s != start:
m.append((s - start, total))
start = s
total += (enter - leave)
return m
def solve(N, Ms):
normal_cost = sum(p * cost(e - o, N) for (o, e, p) in Ms)
m = build_map(Ms)
routes = []
while m:
r = list(takewhile((lambda (d, p): p > 0), m))
if r:
distance = sum(d for (d, p) in r)
people = min(p for (d, p) in r)
for idx in xrange(len(r)):
(d, p) = m[idx]
m[idx] = (d, p-people)
routes.append((distance, people))
else:
m = m[1:]
new_cost = sum(p * cost(d, N) for (d, p) in routes)
return normal_cost - new_cost
if __name__ == "__main__":
T = int(sys.stdin.readline())
for test in xrange(1, T+1):
N, M = map(int, sys.stdin.readline().strip().split())
Ms = [map(int, sys.stdin.readline().strip().split())
for m in xrange(M)]
loss = solve(N, Ms)
sys.stdout.write("Case #{}: {}\n".format(test, loss % 1000002013))
|
20,918 | 467b86143075e8b84bf66cb4e0925e2393da6221 | import pytest
from implementations.postfix_calculator import postfix_calculator
@pytest.mark.parametrize(
"expression, expected_result",
[
("1 6 7 * + 1 -", 42),
("1 1 + 6 7 * 2 + -", -42),
("1", 1),
("1 1 +", 2),
("10 2 **", 100),
("1 2 -", -1),
],
)
def test_postfix_calculator(expression, expected_result):
assert postfix_calculator(expression) == expected_result
|
20,919 | 1dfacc0d8fbb8a303179c165de4b1dcdc66b15ad | #!/usr/bin/env python
import numpy as np
import math
types = {0:"技术",1:"天气",2:"娱乐",3:"体育",4:"军事"}
# create dict of each type
def createBOW(chaList,types):
wordBag = dict([])
for i in range(len(types)):
wordBag[i] = set(chaList[i])
return wordBag
# calculate total number of each word in each type, storing the value in matrix
def traingN0(trainList, types):
bagList = createBOW(trainList, types)
mat = np.zeros([5,10])
sumArr = np.zeros([1,5])
proMat = np.zeros([5,10])
for i in range(len(types)):
trainCha = trainList[i]
bagCha = bagList[i]
for cha in trainCha:
if cha in bagCha:
bagChaList = list(bagCha)
mat[i,bagChaList.index(cha)] += 1
else:
print("the word %s is not in the bag"%cha)
for i in range(len(types)):
print(mat[i])
sumArr[0,i] = 1#sum(mat[i])
mat += 1.0
for t in range(len(mat)):
for r in range(len(mat[t])):
proMat[t,r] = math.log((mat[t,r]/sumArr[0,t]),3)
return bagList,proMat
# 计算testEntry中每个词在每类中出现的概率,取最大者最为最终结果
def classifyEntry(dataSet,testEntry,types):
bagList, proMat = traingN0(dataSet, types)
pD = np.arange(len(types)).reshape(1,len(types))
for key in bagList:
for word in testEntry:
if word in bagList[key]:
pD[0,key] += proMat[key, list(bagList[key]).index(word)]
max = 0.0
index = 0
for i in range(len(pD[0])):
if(max<pD[0,i]):
max = pD[0,i]
index = i
return types[i]
trainList = [["计算机","视觉","科技","计算机"],
["天气","交通","出行","天气"],
["拍戏","电视剧","电影","音乐"],
["游泳","滑冰","比赛","游泳","比赛"],
["波斯湾","石油","伊拉克","美国","伊拉克"]]
testEntry = ["石油","美国"]
# print(traingN0(trainList,types))
print(classifyEntry(trainList,testEntry,types))
|
20,920 | 4348c8fef60592ec9357e03fdbb953030824ff45 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from qiskit import pulse
from qiskit.test.mock import FakeOpenPulse2Q
backend = FakeOpenPulse2Q()
d0 = pulse.DriveChannel(0)
d1 = pulse.DriveChannel(1)
with pulse.build(backend) as barrier_pulse_prog:
pulse.play(pulse.Constant(10, 1.0), d0)
pulse.barrier(d0, d1)
pulse.play(pulse.Constant(10, 1.0), d1)
# In[2]:
from qiskit.pulse import transforms
with pulse.build(backend) as aligned_pulse_prog:
with pulse.align_sequential():
pulse.play(pulse.Constant(10, 1.0), d0)
pulse.play(pulse.Constant(10, 1.0), d1)
barrier_pulse_prog = transforms.target_qobj_transform(barrier_pulse_prog)
aligned_pulse_prog = transforms.target_qobj_transform(aligned_pulse_prog)
assert barrier_pulse_prog == aligned_pulse_prog
# In[3]:
import math
d0 = pulse.DriveChannel(0)
with pulse.build(backend) as pulse_prog:
with pulse.align_right():
pulse.x(1)
# Barrier qubit 1 and d0.
pulse.barrier(1, d0)
# Due to barrier this will play before the gate on qubit 1.
pulse.play(pulse.Constant(10, 1.0), d0)
# This will end at the same time as the pulse above due to
# the barrier.
pulse.x(1)
|
20,921 | f155f18f098f0c90d45f1674baf0c8c5d60074ad | class Solution(object):
def sortColors(self, nums):
nums.sort(reverse=False)
return nums
|
20,922 | 8deb170ea1e99e64dded64b22dbcdd43750fac8e | # -*- coding: utf-8 -*-
"""
Get historical information of a series from /mints. Eg: https://cryptoslam.io/cryptopunks/mints
@author: HP
"""
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pandas
import time
import requests
from bs4 import BeautifulSoup
from selenium.common.exceptions import ElementClickInterceptedException,StaleElementReferenceException,ElementNotInteractableException,NoSuchElementException
from datetime import datetime, timedelta
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
output_directory = dir_path+"\\cryptoslam_mints" # Data will be outputed here
if not os.path.exists(output_directory): # create the folder if not exists already
os.mkdir(output_directory)
def get_transaction_time_from_etherscan(etherscan_links):
transaction_time_list = list()
for link in etherscan_links:
start = time.time()
browser = webdriver.Firefox()
browser.get(link)
time.sleep(1)
transaction_time = browser.find_element_by_xpath("/html/body/div[1]/main/div[3]/div[1]/div[2]/div[1]/div/div[4]/div/div[2]/i")
transaction_time_list.append(transaction_time)
end = time.time()
print("one request took "+ str(end - start) + " seconds")
return transaction_time_list
def find_transaction_time(table_time_column): # NOT ACCURATE
now = datetime.today()
dates = ["minute","hour","day","month","year"]
timestamps = []
for cell in table_time_column:
splitted = cell.split(" ")
integer_time_value = int(splitted[0])
date = splitted[1]
if "second" in cell:
d = datetime.today() - timedelta(seconds=integer_time_value)
if "minute" in cell:
d =datetime.today() - timedelta(minutes=integer_time_value)
elif "hour" in cell:
d =datetime.today() - timedelta(hours=integer_time_value)
elif "day" in cell:
d =datetime.today() - timedelta(days=integer_time_value)
elif "month" in cell:
d =datetime.today() - timedelta(days=30*integer_time_value)
elif "year" in cell:
d =datetime.today() - timedelta(days=360*integer_time_value)
timestamps.append(d)
return timestamps
def obtain_series_links(series_names):
"""
obtain links of mint pages from series names.
returns a list
"""
links = []
for product in series_names[0]:
product = product.lower()
splitted = product.split()
product = "-".join(splitted)
series_link = "https://cryptoslam.io/" + product + "/mints"
links.append((product,series_link))
return links
series_names = pandas.read_pickle("series_names.pkl") # Get series names (cryptopunks, art blocks etc.)
series_main_pages = obtain_series_links(series_names) # contains tuples [("art-blocks","https://cryptoslam.io/art-blocks/mints"),(,)...]
test = [('cryptopunks', 'https://cryptoslam.io/cryptopunks/mints')]
for page in test:
series_names = page[0]
urlpage = page[1]
# If we have it, skip
if os.path.exists(str(output_directory+"\\cryptoslam_"+series_names+"_mints.xlsx")):
continue
options = webdriver.FirefoxOptions()
# options.headless = True
browser = webdriver.Firefox(options=options)
browser.get(urlpage)
# browser.find_element_by_xpath("//select[@name='element_name']/option[text()='option_text']").click()
time.sleep(6)
table_list = []
start = time.time()
# Get 1000 rows (only do it once per series)
try:
ddelement= Select(browser.find_element_by_xpath('/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[1]/div[1]/div/label/select'))
ddelement.select_by_visible_text("1000")
except ElementNotInteractableException as e:
print(e)
time.sleep(2)
ddelement= Select(browser.find_element_by_xpath('/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[1]/div[1]/div/label/select'))
ddelement.select_by_visible_text("1000")
except NoSuchElementException as e:
print(e)
time.sleep(2)
ddelement= Select(browser.find_element_by_xpath('/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[1]/div[1]/div/label/select'))
ddelement.select_by_visible_text("1000")
time.sleep(10) # wait for the page to load 1000 rows
while True : # Keep until all the pages are scraped
soup = BeautifulSoup(browser.page_source)
soup_table = soup.find_all("table")[-1]
soup_table = soup.find("table")
tables = pandas.read_html(str(soup_table))
table = tables[0]
columns_len = len(table.columns)
results_original_owner = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td["+str(columns_len+1)+"]/a")
results_nft = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[3]/a")
results_etherscan_link = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[2]/a")
original_owner_data = list()
nft_data = list()
etherscan_links = list()
try:
for result in results_etherscan_link:
link = result.get_attribute("href")
etherscan_links.append(link)
for result in results_original_owner:
product_link = result.get_attribute("data-original-title")
original_owner_data.append(product_link)
for result in results_nft:
product_link = result.get_attribute("href")
nft_data.append(product_link)
except StaleElementReferenceException as e:
print(e)
time.sleep(10)
results_original_owner = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td["+str(columns_len+1)+"]/a")
results_nft = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[3]/a")
results_etherscan_link = browser.find_elements_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[3]/div/table/tbody/tr/td[2]/a")
original_owner_data = list()
nft_data = list()
etherscan_links = list()
for result in results_etherscan_link:
link = result.get_attribute("href")
etherscan_links.append(link)
for result in results_original_owner:
product_link = result.get_attribute("data-original-title")
original_owner_data.append(product_link)
for result in results_nft:
product_link = result.get_attribute("href")
nft_data.append(product_link)
table = pandas.read_html(browser.page_source)[0]
table = table[1:]
table["Original Owner"] = original_owner_data[1:]
table["NFT_links"] = nft_data
table["Minted_link"] = etherscan_links
table["Minted"] = find_transaction_time(table["Minted"])
if "Unnamed: 0" in table.columns:
table.drop(labels = ["Unnamed: 0"],axis=1,inplace=True)
table_list.append(table)
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;")
try:
browser.find_element_by_xpath("/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[4]/div[2]/div/ul/li[3]/a").click()
except ElementClickInterceptedException as e:
#print(e)
x_path = "/html/body/div[2]/div/div[5]/div/div/div/div[3]/div[1]/div[4]/div[2]/div/ul/li[3]/a"
# browser.find_element_by_xpath(x_path).click()
element = browser.find_element_by_xpath(x_path)
browser.execute_script("arguments[0].click();", element)
try:
t = table_list[-1].loc[0:1]["Minted_link"]
y = table_list[-2].loc[0:1]["Minted_link"]
if len(table) <= 1 or t.equals(y):
break
except IndexError:
pass
time.sleep(10)
final_table = pandas.concat(table_list)
cols = list(final_table)
cols.remove('Minted')
t = final_table.drop_duplicates(subset=cols,inplace=False)
browser.quit()
t.to_excel(output_directory+"\\cryptoslam_"+series_names+"_mints.xlsx")
end = time.time()
print(end - start) |
20,923 | eb0ba1cd2f5d83de52302796c6ca93d1d5030548 | #strip()
x=" Hello, My Friend "
print(x.strip())
#len()
x="Hello, My Friend"
print(len(x))
#lower()
x="Hello, My Friend"
print(x.lower())
#upper
x="Hello, My Friend"
print(x.upper())
#replace
x="Hello, My Friend"
print(x.replace("H","M"))
#slpit
x="Hello, My Friend"
print(x.split(","))
|
20,924 | 2f09291f757b99dc85e0def6a1361d539408fbe7 | from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
#Saria is developing some better time management practices
#she checks out our awesome to-do list homepage
self.browser.get(self.live_server_url)
#Saria happily notes that the page title mentions "to-do lists"
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
#The page prompts her to enter a to-do item
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter a to-do item'
)
#She types "Teach Link Awesome New Orcarina Song" into the text box
inputbox.send_keys('Teach Link Awesome New Orcarina Song')
#When she types enter, she is taken to a new URL
#the page updates, and now the page lists
#"1: Teach Link awesome new orcarina song" as an item in a to-do list
inputbox.send_keys(Keys.ENTER)
saria_list_url = self.browser.current_url
self.assertRegex(saria_list_url, '/lists/.+')
self.check_for_row_in_list_table('1: Teach Link Awesome New Orcarina Song')
#There is still a text box for adding an additional item to the list
#She enters "Check out decrepit Forest Temple"
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Check out decrepit Forest Temple')
inputbox.send_keys(Keys.ENTER)
#The page updates again showing both items
self.check_for_row_in_list_table('1: Teach Link Awesome New Orcarina Song')
self.check_for_row_in_list_table('2: Check out decrepit Forest Temple')
#Saria sets off to figure out what is going on in the forest temple
#Happy with her list making experience, she tells her friend Nabooru
#Nabooru, a new user, decides to visit the site
## We open a new browser session, to ensure none of Saria's
## information is coming through from cookies etc.
self.browser.quit()
self.browser = webdriver.Firefox()
#Nabooru visits the home page. There is no sign of Saria's list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Teach Link Awesome New Orcarina Song', page_text)
self.assertNotIn('Check out decrepit Forest Temple', page_text)
#Nabooru starts a new list by entering a new item.
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Steal Silver Gauntlets from Spirit Temple')
inputbox.send_keys(Keys.ENTER)
#Nabooru gets her own unique URL
nabooru_list_url = self.browser.current_url
self.assertRegex(nabooru_list_url, '/lists/.+')
self.assertNotEqual(nabooru_list_url, saria_list_url)
#Nabooru's new url still doesn't see anything of Saria's
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Teach Link Awesome New Orcarina Song', page_text)
self.assertIn('Steal Silver Gauntlets from Spirit Temple', page_text)
#Saria is curious if she can come back later to add to the list after
#she figures out what is going on in the forest temple -- she notices
#that the site created a unique url for her list and informs her of this
#self.fail('Finish the test!')
#She visits the url in a private window -- it's still there!
#Satisfied she goes back to sleep
|
20,925 | 2ac5d3c5dff31928490eb073dfca32b8ed91036d | import numpy as np
import cv2
import os
class sample:
def __init__(self, img, truth, box):
self.image = img
self.truth = truth
# a nparray containing npts landmarks, each landmark is denoted by its x and y coordinate
self.guess = np.zeros(truth.shape)
self.box = box
# a nparray of two coordinates, denoting the start and end point of a rectangle
class regressor:
def __init__(self, localCoords, ferns, features):
self.localCoords = localCoords
self.ferns = ferns
self.features = features
class feature:
def __init__(self, m, n, rho_m, rho_n, coor_rhoDiff):
self.m = m
self.n = n
self.rho_m = rho_m
self.rho_n = rho_n
self.coor_rhoDiff = coor_rhoDiff
class fern:
def __init__(self, thresholds, outputs):
self.thresholds = thresholds
self.outputs = outputs
def loadTrainData(data_path):
number_files = len(os.listdir(data_path)) / 4 # because for a image we have 4 related files
train_set = []
for i in range(number_files):
if i % 50 == 0:
print('Samples loaded {} / {} ...'.format(i, number_files))
img = cv2.imread(data_path + 'image_%04d.png'%(i+1), cv2.IMREAD_GRAYSCALE)
file = open('lfpw-test/image_%04d_original.ljson'%(i+1),'r')
content = json.load(file)
pts = content['groups'][0]['landmarks']
for j in range(len(pts)):
pts[i] = pts[i]['point']
pts[i][0] = int(float(pts[i][0]))
pts[i][1] = int(float(pts[i][1]))
pts[i] = pts[i][::-1]
file = open('lfpw-test/image_%04d_original.ljson'%(i+1),'r')
content = json.load(file)
rect = content['landmarks']["points"]
rect[0][0] = int(float(rect[0][0]))
rect[0][1] = int(float(rect[0][1]))
rect[2][0] = int(float(rect[2][0]))
rect[2][1] = int(float(rect[2][1]))
rect = [rect[0][::-1], rect[2][::-1]]
train_set.append(sample(img, np.array(pts), np.array(rect)))
return train_set
def getDistPupils(shape):
npts = shape.shape[0]
if npts == 29:
dist_pupils = np.linalg.norm(shape[7-1,:] - shape[16-1,:])
elif npts == 68:
left_eye_4 = [38 - 1, 39 - 1, 41 - 1, 42 - 1]
right_eye_4 = [44 - 1, 45 - 1, 47 - 1, 48 - 1]
left_center = np.mean(shape(left_eye_4, :), 0)
right_center = np.mean(shape(right_eye_4, :), 0)
dist_pupils = np.linalg.norm(left_center - right_center)
return dist_pupils
def initialization(init_train_set, N_aug, stage = 'train'):
number_samples = len(init_train_set)
train_set = []
# when training we use permuted truth as initial state
if stage == 'train':
for sample_index in len(init_train_set):
random_index = np.random.permutation(number_samples)[:N_aug]
for index in range(N_aug):
train_set.append(init_train_set[sample_index])
train_set[-1].guess = init_train_set[random_index[index]].truth
# align the guess shape with the box
train_set[-1].guess = alignShapeToBox(train_set[-1].guess, init_train_set[random_index[index]].box, train_set[-1].box)
print('Initialization done. Number of augumented samples: {} x {} = {}'.format(number_samples, N_aug, number_samples*N_aug))
else:
# when testing, we take representive shape from train set
pass
def alignShapeToBox(shape0, box0, box):
npts = shape0.shape[0] # number of landmarks
# shape = reshape(shape0, npts, 2)
shape = np.zeros(shape0.shape)
scale = box[1,0] / box0[1,0]
# align the center of the shape to the center of the box
box_c_x, boc_c_y = np.mean(box, 0)
shape = shape0 - np.tile(np.mean(shape0, 0), (npts, 1))
shape = shape .* scale
shape = shape + np.tile([xc, yc], (npts, 1))
return shape
def computeMeanShape(train_set):
# compute in a iterative fashion:
# 1) using truth shape(dataset.guess) of the first image as meanshape
# 2) align all other truth shape to meanshape
# 3) take average of all shapes as meanshape
# 4) repeat 2)-3) until condition is met
refshape = train_set[0].guess.reshape(1, -1)
npts = refshape.size / 2
# align all other shapes to this shape
nshapes = len(train_set)
alignedShapes = zeros(nshapes, npts*2)
for i in range(nshapes)
alignedShapes[i, :] = train_set[i].guess
refshape = alignedShapes[1, :]
iters = 0
diff = float("inf")
maxIters = 4
while diff > 1e-2 && iters < maxIters:
iters = iters + 1
for i in range(nshapes):
alignedShapes(i,:) = alignShape(alignedShapes(i,:), refshape)
refshape_new = np.mean(alignedShapes, 0)
diff = np.abs(np.max(refshape - refshape_new))
refshape = refshape_new
print('MeanShape finished in {} iterations.\n'.format(iters))
return refshape.reshape(-1, 2)
def alignShape(s1, s0):
npts = len(s1)/2
s1 = s1.reshape(npts, 2)
s0 = s0.reshape(npts, 2)
[s, R, t] = estimateTransform(s1, s0)
s1 = s * R * s1.T + tile(t, (1, npts))
s1 = s1.T
s1 = s1.reshape(1, npts*2)
return s1
def estimateTransform(source_shape, target_shape):
n, m = source_shape.shape
mu_source = mean(source_shape, 0)
mu_target = mean(target_shape, 0)
d_source = source_shape - tile(mu_source, (n, 1))
sig_source2 = np.sum(d_source*d_source)/n
d_target = target_shape - repmat(mu_target, n, 1)
sig_target2 = np.sum(d_target*d_target))/n
sig_source_target = d_target.T.dot(d_source) / n
det_sig_source_target = np.linalg.det(sig_p_target)
S = np.eye(m)
if det_sig_source_target < 0:
S[n-1, m-1] = -1
u, d, vh = np.linalg.svd(sig_source_target, full_matrices=True)
R = u*d.dot(vh)
s = np.trace(d*S)/sig_source2
t = mu_target.T - s * R.dot(mu_p.T)
return s, R, t
def normalizedShapeTargets(train_set, mean_shape):
nsamples = len(train_set)
npts = mean_shape.shape[0]
M_norm = [] # M_norm contains the similarity transform matrix for each sample
# Mnorm = cell(nsamples, 1)
Y = np.zeros(nsamples, npts)
for i in range(nsamples):
[s, R, ~] = estimateTransform(trainset[i].guess, mean_shape)
M_norm.append(s*R)
# Mnorm{i}.invM = inv(Mnorm{i}.M)
diff = trainset[i].truth - trainset[i].guess
tdiff = M_norm[i].dot(diff.T)
Y(i,:) = tdiff.T.reshape(1, -1)
return Y, M_norm
def learnStageRegressor(train_set, Y, M_norm, params):
npts = trainset[0].truth.shape[0]
P = params['P']
T = params['T']
F = params['F']
K = params['K']
beta = params['beta']
kappa = params['kappa']
# generate local coordinates
print('Generating local coordinates...')
localCoords = np.zeros(P, 3) # fpidx, x, y
for i in range(P):
localCoords[i, 0] = np.randint(0, npts) # randomly choose a landmark
localCoords[i, 1:] = (np.random.uniform(size=(1,2)) - 0.5) * kappa # fluctuate around landmark
# extract shape indexed pixels
print('Extracting shape indexed pixels...')
nsamples = len(train_set)
M_rho = np.zeros(nsamples, P)
for i in range(nsamples):
M_norm_inv = np.linalg.inv(M_norm[i])
dp = M_norm_inv.dot(localCoords[:,1:].T).T
# fpPos = reshape(train_set[i].guess, Nfp, 2)
# pixPos = fpPos(ind2sub([Nfp 2],localCoords(:,1)), :) + dp
pixPos = train_set[i].guess(localCoords[:,0], :) + dp
rows, cols = trainset[i].image.shape
pixPos = np.round(pixPos)
pixPos(:,0) = np.minimum(np.maximum(pixPos[:,0], 0), cols-1)
pixPos(:,1) = np.minimum(np.maximum(pixPos[:,1], 0), rows-1)
# in case pixel position out of range
M_rho[i,:] = trainset[i].image[pixPos(:,2).T, pixPos(:,1).T]
# compute pixel-pixel covariance
cov_Rho = np.cov(M_rho, rowvar=False)
M_rho_centered = M_rho - tile(mean(M_rho, 0), (M_rho.shape[0], 1))
diagCovRho = np.diag(cov_Rho)
varRhoDRho = -2.0 * covRho + repmat(diagCovRho.T, 1, P) + repmat(diagCovRho, P, 1)
inv_varRhoDRho = 1.0/varRhoDRho # element-wise inverse
# compute all ferns
print('Constructing ferns...')
ferns = []
features = []
for k in range(K):
features.append(correlationBasedFeatureSelection(Y, M_rho, M_rho_centered, inv_varRhoDRho, F))
ferns.append(trainFern(features[-1], Y, M_rho, beta))
# update the normalized target
M_diff_rho = np.zeros(nsamples, F)
for f in range(F):
M_diff_rho[:,f] = features[k,f].rho_m - features[k,f].rho_n
updateMat = evaluateFern_batch(M_diff_rho, ferns[k])
print('fern %d/%d\tmax(Y) = %.6g, min(Y) = %.6g'%(k, K, np.max(Y), np.min(Y)))
Y = Y - updateMat
regressor = regressor(localCoords, ferns, features)
return regressor
def correlationBasedFeatureSelection(Y, M_rho, M_rho_centered, inv_varRhoDRho, F):
Lfp = Y.shape[1]
Nfp = Lfp/2
n, P = M_rho.shape
features = []
for i in range(F):
nu = np.random.randn(Lfp, 1)
Yprob = Y.dot(nu)
covYprob_rho = (sum(Yprob-mean(Yprob)*M_rho_centered),0)/(n-1) # R^{1xP}
covRhoMcovRho = tile(covYprob_rho.T, (1, P)) - tile(covYprob_rho, (P, 1))
corrYprob_rhoDrho = covRhoMcovRho*np.sqrt(inv_varRhoDRho)
#corrYprob_rhoDrho(logical(eye(size(corrYprob_rhoDrho)))) = -10000.0
for j in range(P):
corrYprob_rhoDrho[j,j] = -10000.0
maxCorr = max(corrYprob_rhoDrho)
maxLoc_row, maxLoc_col = np.unravel_index(np.argmax(corrYprob_rhoDrho, axis=None), corrYprob_rhoDrho.shape)
features.append(feature(maxLoc_row, maxLoc_col, Mrho[:,f.m], Mrho[:,f.n], maxCorr))
return features
# def covVM(v, M_centered):
# [n, ~] = size(M_centered)
#
# mu_v = mean(v)
# res = sum( bsxfun(@times, v-mu_v, M_centered) ) / (n-1)
# res = res'
# return res
# fern training
def trainFern(features, Y, Mrho, beta):
F = len(features)
# compute thresholds for ferns
thresholds = np.random.uniform(size=(F, 1))
for f in range(F):
fdiff = features[f].rho_m - features[f].rho_n
maxval = max(fdiff)
minval = min(fdiff)
meanval = np.mean(fdiff)
range = min(maxval-meanval, meanval-minval)
thresholds[f] = (thresholds[f]-0.5)*0.2*range + meanval
# partition the samples into 2^F bins
bins = partitionSamples(Mrho, features, thresholds)
# compute the outputs of each bin
outputs = computeBinOutputs(bins, Y, beta)
fern = fern(thresholds, outputs)
# fern.thresholds = thresholds
# fern.outputs = outputs
return fern
def partitionSamples(Mrho, features, thresholds):
F = len(features)
# bins = cell(2^F, 1)
binss = []
nsamples = Mrho.shape[0]
diffvecs = np.zeros(nsamples, F)
for i in range(F):
diffvecs[:,i] = Mrho[:, features[i].m] - Mrho[:, features[i].n]
for i in range(F):
di = diffvecs[:,i]
lset = np.where(di < thresholds[i])
rset = np.setdiff1d(array(range(nsamples)), lset)
diffvecs[lset, i] = 0
diffvecs[rset, i] = 1
wvec = np.array(range(F))
wvec = 2**wvec[:, np.newaxis]
idxvec = diffvecs.dot(wvec)
for i in range(2**F):
bins.append(np.where(idxvec==i))
return bins
def computeBinOutputs(bins, Y, beta):
Lfp = Y.shape[1]
nbins = len(bins)
outputs = np.zeros(nbins, Lfp)
for i in range(nbins):
if bins[i].size == 0: # empty bin
continue
outputs[i,:] = sum(Y[bins[i], :])
ni = len(bins[i])
factor = 1.0 / ((1 + beta/ni)*ni)
outputs[i,:] = outputs[i,:] * factor
return outputs
def evaluateFern_batch(diffvecs, fern):
F = len(fern.thresholds)
nsamples = diffvecs.shape[0]
for i in range(F):
di = diffvecs[:,i]
lset = np.where(di < thresholds[i])
rset = np.setdiff1d(array(range(nsamples)), lset)
diffvecs[lset, i] = 0
diffvecs[rset, i] = 1
wvec = np.array(range(F))
wvec = 2**wvec[:, np.newaxis]
idxvec = diffvecs.dot(wvec)
output = fern.outputs[idxvec,:]
return output
def updateGuessShapes(trainset, Mnorm, regressor):
nsamples = len(trainset)
Nfp = trainset[0].truth.shape[0]
maxError = 0
F = len(regressor.ferns[0].thresholds)
K = len(regressor.ferns)
rho_diff = np.zeros(nsamples, F)
Mds = np.zeros(nsamples, Nfp * 2)
for k in range(K):
for f in range(F):
rho_diff[:,f] = regressor.features[k,f].rho_m - regressor.features[k,f].rho_n
Mds = Mds + evaluateFern_batch(rho_diff, regressor.ferns[k])
for i in range(nsamples):
ds = Mds[i,:]
ds = ds.reshape(Nfp, 2).T
ds = np.linalg.inv(Mnorm[i]).dot(ds)
ds = ds.T
trainset[i].guess = trainset[i].guess + ds
error = (trainset[i].truth - trainset[i].guess).reshape(-1, 1)
maxError = max(maxError, np.linalg.norm(error))
print('Maxerror : {}'.format(maxError))
return trainset
|
20,926 | 2f5cd5354ff14e06263638ed86dd3e3f4ccfc273 | class Solution:
def insert(self,intervals,newInterval):
intervals += [newInterval]
print(intervals)
intervals.sort()
merge = []
for interval in intervals:
if not merge or merge[-1][1] < interval[0]:
merge.append(interval)
else:
merge[-1][1] = max(merge[-1][1], interval[1])
return merge
s = Solution()
print(s.insert([[1,3],[6,9]],[2,5]))
|
20,927 | eab74ef411b58bd94b6cb5420e8187d7560b43e4 | # -*- coding: utf-8 -*-
# Time : 2019/12/30 0030 17:21
# Author : dengfan
import shutil, os
xml_path = r'E:\study_code\TC_CQ\data\train\xml_new'
train_files = r'train_big.txt'
valid_files = r'valid_big.txt'
train_save = r'./big_train_xml'
valid_save = r'./big_valid_xml'
def get_name(name):
names = []
with open(name, 'r') as r:
lines = r.readlines()
for line in lines:
names.append(line.strip()+'.xml')
return names
train_xml = get_name(train_files)
valid_xml = get_name(valid_files)
for i in train_xml:
shutil.copyfile(os.path.join(xml_path, i), os.path.join(train_save, i))
for i in valid_xml:
shutil.copyfile(os.path.join(xml_path, i), os.path.join(valid_save, i)) |
20,928 | d9d256588d932447ac48a927522f3fe40181834b | import argparse
import numpy as np
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
cuda = True if torch.cuda.is_available() else False
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.latent_dim=100
self.init_size = 64 // 4
self.l1 = nn.Sequential(nn.Linear(self.latent_dim, 128*self.init_size**2))
self.conv_blocks = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 3, 3, stride=1, padding=1),
nn.Tanh()
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
generator = Generator()
if cuda:
generator.cuda()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
generator.load_state_dict(torch.load('./model_500.pth'))
generator.eval()
latent_dim=100
np.random.seed(1)
z = Variable(Tensor(np.random.normal(0, 1, (25, latent_dim))))
gen_imgs = generator(z)
gen_imgs = (gen_imgs + 1) / 2 # rescale to (0,1)
#print(type(gen_imgs.data[0].cpu().numpy()))
import matplotlib.pyplot as plt
r, c = 5, 5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs.data[cnt].cpu().numpy().transpose(1,2,0))
axs[i,j].axis('off')
cnt += 1
fig.savefig("./samples/gan.png")
plt.close()
|
20,929 | 65c59f752f1f8620b4d646483b588eec6026f031 | from unittest import TestCase, main
import cv2
class TestLaneFinder(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from LaneUtils.LaneUtils import getSceneCanny
from LaneUtils.LaneUtils import getRegOfInterest
from LaneUtils.LaneUtils import getPointsFromLine
from LaneUtils.LaneUtils import drawLanesLines
self.frame = cv2.imread("../data/jpgs/0.jpg")
self.canny = getSceneCanny("../data/jpgs/0.jpg")
self.mask = getRegOfInterest(self.canny)
self.linePoints = getPointsFromLine(self.frame.shape, self.mask)
self.sceneLanes = drawLanesLines(self.frame, self.mask)
def test_getSceneCanny(self):
msg1 = "edgesFrame doesn't have consistent shape with the source frame"
self.assertEqual(self.frame.shape[:2], edgesFrame.shape[:2], msg=msg1)
msg2 = "Colored Frame has the same channels of the grayFrame"
self.assertNotEqual(self.frame.shape, edgesFrame.shape, msg=msg2)
msg3 = "edgesFrame must have a single color channel"
self.assertEqual(2, len(edgesFrame.shape), msg=msg3)
def test_getRegOfInterest(self):
msg1 = "mask doesn't have consistent shape with the source frame"
self.assertEqual(self.frame.shape[:2], self.mask.shape[:2], msg=msg1)
msg2 = "mask must have a single color channel"
self.assertEqual(2, len(self.mask.shape), msg=msg2)
def test_getPointsFromLine(self):
msg1 = "linePoints must be numpy array of 4 elements (x1, y1, x2, y2)"
self.assertEqual(4, len(self.linePoints.shape), msg=msg1)
def test_drawLanesLines(self):
msg1 = "detected lanes scene must be consistent with the source scene"
self.assertEqual(self.frame.shape, self.sceneLanes.shape, msg=msg1)
if __name__ == "__main__":
main()
|
20,930 | 4c8376ac7034588c99cd8b3c689c561ab257785b | import torch
a = torch.randn(3,2)
b = torch.tensor([[1],[0],[1]])
y = torch.gather(a,1,b)
print(a)
print(y) |
20,931 | 9a4808a999d93c85c59bed409ebf26bb99e71897 | import quandl
import pandas as pd
import pickle
from matplotlib import pyplot as plt
from matplotlib import style
style.use('fivethirtyeight')
# Not necessary, I just do this so I do not show my API key.
api_key = open('quandlapikey.txt', 'r').read()
sub_df = pd.DataFrame()
usa_df = pd.DataFrame()
def state_list():
fiddy_states = pd.read_html('https://simple.wikipedia.org/wiki/List_of_U.S._states')
return fiddy_states[0][1][1:]
def grab_initial_state_data():
states = state_list()
main_df = pd.DataFrame()
#sub_df = pd.DataFrame()
for abbv in states:
query = "FMAC/HPI_" + str(abbv)
df = quandl.get(query, authtoken=api_key)
#df = df.pct_change()
#print(df.columns.values)
sub_df['Value'] = df['SA Value']
sub_df['Value']= (sub_df['Value'] - sub_df['Value'][0]) / sub_df['Value'][0] * 100.0
# print(sub_df['Value'].head())
sub_df.rename(columns={'Value': abbv}, inplace=True)
#main_df = sub_df
# print(main_df.head())
# print('************************')
#df[abbv]= (df[abbv] - df[abbv][0]) / df[abbv][0] * 100.0
# df['Value']= (df['Value'] - df['Value'][0]) / df['Value'][0] * 100.0
#print(query)
# print(df.head())
# print(main_df.head())
if main_df.empty:
#main_df = df
main_df = sub_df
# print(main_df.head())
# print('************************')
else:
# df.index.names = [abbv]
# main_df = main_df.join(df)
# main_df = main_df.join(df, lsuffix="_" + abbv)
# df.rename(index={'Value_'+abbv : abbv})
main_df = sub_df
# print(main_df.head())
# print('************************')
# print(main_df.head())
# # print('************************')
pickle_out = open('pickle.pickle', 'wb')
pickle.dump(main_df, pickle_out)
pickle_out.close()
pickle_in = open('pickle.pickle', 'rb')
HPI_data = pickle.load(pickle_in)
HPI_data.to_pickle('pickle2.pickle')
HPI_data2 = pd.read_pickle('pickle2.pickle')
# print(HPI_data2.head())
# print('************ HPI_data2 ************')
def HPI_Benchmark():
df = quandl.get("FMAC/HPI_USA", authtoken=api_key)
# print(df.columns.values)
# print(df.head())
usa_df['United States'] = df['SA Value']
usa_df['United States'] = (usa_df['United States'] - usa_df['United States'][0]) / usa_df['United States'][0] * 100.0
# df['Value'] = (df['Value'] - df['Value'][0]) / df['Value'][0] * 100.0
# print(usa_df.columns.values)
# print(usa_df.head())
return usa_df
def mortgage_30y():
df = quandl.get("FMAC/MORTG", trim_start="1975-01-01", authtoken=api_key)
df['Value'] = (df['Value'] - df['Value'][0]) / df['Value'][0] * 100.0
df = df.resample('D').mean()
df = df.resample('M').mean()
df.columns = ['M30']
return df
def gdp_data():
df = quandl.get("BCB/4385", trim_start="1975-01-01", authtoken=api_key)
df["Value"] = (df["Value"]-df["Value"][0]) / df["Value"][0] * 100.0
df=df.resample('M').mean()
df.rename(columns={'Value':'GDP'}, inplace=True)
df = df['GDP']
return df
def us_unemployment():
df = quandl.get("FRED/LNU04023705", trim_start="1975-01-01", authtoken=api_key)
#df = quandl.get("ECPI/JOB_G", trim_start="1975-01-01", authtoken=api_key)
#df["Unemployment Rate"] = (df["Unemployment Rate"]-df["Unemployment Rate"][0]) / df["Unemployment Rate"][0] * 100.0
df["Value"] = (df["Value"]-df["Value"][0]) / df["Value"][0] * 100.0
df=df.resample('1D').mean()
df=df.resample('M').mean()
df.rename(columns={'Value': 'UNE'}, inplace=True)
df = df['UNE']
return df
def sp500_data():
df = quandl.get("BCIW/_INX", trim_start="1975-01-01", authtoken=api_key)
df["Close"] = (df["Close"]-df["Close"][0]) / df["Close"][0] * 100.0
# df = quandl.get("YAHOO/INDEX_GSPC", trim_start="1975-01-01", authtoken=api_key)
# df["Adjusted Close"] = (df["Adjusted Close"]-df["Adjusted Close"][0]) / df["Adjusted Close"][0] * 100.0
df=df.resample('M').mean()
df.rename(columns={'Close':'sp500'}, inplace=True)
df = df['sp500']
return df
US_GDP = gdp_data()
US_unemployment = us_unemployment()
sp500 = sp500_data()
m30 = mortgage_30y()
HPI_data = pd.read_pickle('pickle.pickle')
HPI_benchmar = HPI_Benchmark()
HPI = HPI_data.join([HPI_benchmar, m30, US_unemployment, US_GDP, sp500])
#HPI = HPI_data.join([m30, US_unemployment, US_GDP, sp500])
#HPI.dropna(inplace = True)
print(HPI)
print(HPI.corr())
HPI.to_pickle('HPI.pickle')
|
20,932 | 7882bbeaf2a2ab19fd0b957adfe00ed5dec46857 | import numpy as np
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model1.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
int_features = [float(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
if int(output)==0:
output1='Extremely Weak'
elif int(output)==1:
output1='Weak'
elif int(output)==2:
output1='Healthy'
elif int(output)==3:
output1='Over Weight'
elif int(output)==4:
output1='Obese'
elif int(output)==5:
output1='Extremely Obese'
return render_template('index.html', prediction_text='Person is in {}'.format(output1)+' condition')
if __name__ == "__main__":
app.run(debug=True)
|
20,933 | 4f023575bb5c8ef0538fea658ec8cf5c6efbed1d | import gtk
class BaseBuffer(gtk.TextBuffer):
def can_undo(self):
return False
def can_redo(self):
return False
def begin_not_undoable_action(self):
pass
def end_not_undoable_action(self):
pass
BaseView = gtk.TextView |
20,934 | 5f4cadacb1793422e19fa090af9d0dec9ca56873 | #!flaskenv/bin/python
from app import app
app.run(debug=False) |
20,935 | 8391471129ac6f52ce45264ffdcbe6931445215b | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import json
import orjson
import yaml
class Equals(yaml.YAMLObject):
yaml_loader = yaml.SafeLoader
yaml_dumper = yaml.SafeDumper
yaml_tag = u"!Equals"
def __init__(self, values):
self.values = values
@classmethod
def from_yaml(cls, constructor, node):
x = constructor.construct_sequence(node)
return cls(values=x)
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_sequence(cls.yaml_tag, data.values)
def get_result(self):
return all(element == self.values[0] for element in self.values)
class Not(yaml.YAMLObject):
yaml_loader = yaml.SafeLoader
yaml_dumper = yaml.SafeDumper
yaml_tag = u"!Not"
def __init__(self, values):
self.values = values
@classmethod
def from_yaml(cls, constructor, node):
x = constructor.construct_sequence(node)
return cls(values=x)
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_sequence(cls.yaml_tag, data.values)
def get_result(self):
return not self.values[0]
def load(what):
return yaml.safe_load(what)
def dump(what):
return yaml.safe_dump(what, default_flow_style=False, width=1000000)
def dump_as_json(input):
return json.dumps(input, default=str)
def load_as_json(input):
return json_loads(input)
def json_dumps(obj):
return orjson.dumps(obj, option=orjson.OPT_INDENT_2)
def json_loads(s):
return orjson.loads(s)
|
20,936 | 31b7e0207a0b48c1d3dd6f4d4bb6f903855c66d3 | # -*- coding: utf-8 -*-
import json
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from .models import Campaign
from .models import PlantedObject
from .models import Planting
def index(request):
campaigns = Campaign.objects.all()
return render(request, 'index.html', context={
'campaigns': campaigns,
})
@csrf_exempt
def new_campaign(request):
if request.method == 'POST' and request.is_ajax:
campaign_data = json.loads(request.body.decode('utf-8'))
campaign = Campaign(**campaign_data)
campaign.save()
return HttpResponse('{"url": "%s"}' % reverse(
'show_campaign',
kwargs={'id_': str(campaign.id)}))
return render(request, 'new_campaign.html')
def show_campaign(request, id_):
campaign = Campaign.objects.get(id=id_)
plantings = Planting.objects.filter(campaign=campaign)
return render(request, 'show_campaign.html', context={
'campaign': campaign,
'plantings': plantings,
})
@csrf_exempt
def new_planting(request, id_):
campaign = Campaign.objects.get(id=id_)
if request.method == 'POST' and request.is_ajax:
planting = Planting(campaign=campaign)
planting.save()
planting_data = json.loads(request.body.decode('utf-8'))
objects = planting_data['objects']
for o in objects.values():
PlantedObject(
planting=planting, object_id=o['objectId'], x=o['x'], y=o['y'],
width=o['scale']).save()
return HttpResponse('{"url": "%s"}' % reverse(
'show_planting',
kwargs={'campaign_id': id_, 'planting_id': planting.id}))
return render(request, 'new_planting.html', context={
'campaign': campaign,
})
def show_planting(request, campaign_id, planting_id):
campaign = Campaign.objects.get(id=campaign_id)
planting = Planting.objects.get(id=planting_id)
planted_objects = PlantedObject.objects.filter(planting=planting)
return render(request, 'show_planting.html', context={
'campaign': campaign,
'planting': planting,
'planted_objects': planted_objects,
})
|
20,937 | e172a1a042ea5da69ea817724e43f2df3cfa721a | from django.urls import path
from . import views
app_name = 'accounts'
urlpatterns = [
path('signup/',views.CreateUserView.as_view(), name = 'signup'),
path('login/done',views.RegisteredView.as_view(), name = 'create_user_done'),
]
|
20,938 | e6168b55ad5ba0209d0538d3053344549443efc9 | import numpy as np
data = np.loadtxt("file/PrimeData_10_million.txt",delimiter=',',dtype=int)
# 将样本正例和负例分开
positiveData = data[data[:,1]==1,:]
print(positiveData)
print("positive data number:",positiveData.shape[0])
negativeData = data[data[:,1]==0,:]
print("positive data number:",negativeData.shape[0])
np.savetxt("file/PositivePrimeData_10_million.txt",positiveData,delimiter=',',fmt='%d')
np.savetxt("file/NegativePrimeData_10_million.txt",negativeData,delimiter=',',fmt='%d') |
20,939 | ca864c257df78e26a65ae6b91dbec02957abf8d6 | from projects.models.workers import WorkerAssignment
def create_worker_transfer(worker, new_assigned_supervisor_user):
worker_assignment = WorkerAssignment.objects.create(
worker=worker,
from_supervisor=worker.assigned_to,
to_supervisor=new_assigned_supervisor_user
)
worker.assigned_to = new_assigned_supervisor_user
return worker_assignment
|
20,940 | 65fe9949a737ccc0ce65a5f11ee3623c23dd631b | # -*- coding: utf-8 -*-
import webapp2
import logging
import json
from models import *
from utils import *
class BaseHandler(webapp2.RequestHandler):
pass
class SEU_HANDLER(BaseHandler):
pass
class CommentsHandler(BaseHandler):
#This method return the comments of post informed
def get(self, id_institution, id_post):
#Util
def date_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
elif hasattr(obj, 'email'):
return obj.email()
return obj
def data2json(data):
return json.dumps(
data,
default=date_handler,
indent=2,
separators=(',', ': '),
ensure_ascii=False
)
post = Post.get_by_id(int(id_post))
all_comments = post.comments #Array of comments, how i convert for JSON ?
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write(data2json(all_comments))
def post(self, id_institution, id_post):
data = self.request.body
post = Post.get_by_id(int(id_post))
if(not post.comments):
post.comments = []
post.comments.append(data)
post.put()
self.response.write(data)
def patch(self, id_institution, id_post):
data = json.loads(self.request.body)
index = data.indice
post = Post.get_by_id(int(id_post))
comments = post.comments
comment = comments[indice]
pass
class TimelineInstitutionHandler(BaseHandler):
def get(self, id_institution):
def date_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
elif hasattr(obj, 'email'):
return obj.email()
if isinstance(obj, ndb.Key):
return obj.integer_id()
return obj
def data2json(data):
return json.dumps(
data,
default=date_handler,
indent=2,
separators=(',', ': '),
ensure_ascii=False
)
institution = Institution.get_by_id(int(id_institution))
timeline = institution.timeline
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write(data2json(timeline))
class InstitutionHandler(BaseHandler):
#Method to get the institution by id
def get(self, institutionId):
id = int(institutionId)
data = Institution.get_by_id(id)
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write(data2json(data.to_dict()))
#Method to post a new institution
def post(self):
data = json.loads(self.request.body)
newInstitution = Institution()
#Create User Admin
admin = User()
admin.email = data['email_admin']
admin.put()
#Create Institution
newInstitution.admin = admin.key
newInstitution.name = data['name']
newInstitution.email_admin = data['email_admin']
newInstitution.parent_institution = data.get('parent_institution')
newInstitution.state = data.get('state')
newInstitution.put()
#Att User Admin
admin.institutions_admin.append(newInstitution.key)
admin.put()
#Create Timeline
timeline = Timeline()
timeline.put()
newInstitution.timeline = timeline.key
newInstitution.put()
self.response.write(data2json(newInstitution.to_dict()))
self.response.set_status(201)
#Method to update an institution
def patch(self):
pass
#Method to delete an institution by id
def delete(self, institutionId):
id = int(institutionId)
institution = Institution.get_by_id(id)
institution.state = 'inactive'
institution.put()
self.response.write(data2json(institution.to_dict()))
class ErroHandler(webapp2.RequestHandler):
def get(self):
self.response.write("Rota Inesistente")
class InstitutionMembersHandler(BaseHandler):
def get(self, id):
#gets the institution by id
institution = Institution.get_by_id(int(id))
if institution:
#gets the institution's members
members = institution.members
#builds a list of members' keys
list = [member.integer_id() for member in members]
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
#send the response
self.response.write(list)
else:
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.responde.write("Wrong id")
def post(self, id):
#gets the institution by id
institution = Institution.get_by_id(int(id))
if institution:
#gets the data body
data = json.loads(self.request.body)
#gets the user's id
user_id = data['id']
#gets the user by id
user = User.get_by_id(int(user_id))
#makes the user a member
institution.members.append(user.key)
user.institutions.append(institution.key)
#saves the institution and the user in datastore
user.put()
institution.put()
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
#send the response
self.response.write(data)
else:
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write("Wrong id")
class InstitutionFollowersHandler(BaseHandler):
def get(self, id):
#gets the institution by id
institution = Institution.get_by_id(int(id))
if institution:
#gets the institution's followers
followers = institution.followers
#builds a list of followers' keys
list = [follower.integer_id() for follower in followers]
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
#sends the response
self.response.write(list)
else:
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write("Wrong id")
def post(self, id):
#gets the institution by id
institution = Institution.get_by_id(int(id))
if institution:
#gets the data body
data = json.loads(self.request.body)
#gets the user's id
user_id = data['id']
#gets the user by id
user = User.get_by_id(int(user_id))
#makes the user a follower
institution.followers.append(user.key)
user.follows.append(institution.key)
#saves the institution and the user in datastore
user.put()
institution.put()
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
#sends the response
self.response.write(data)
else:
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write("Wrong id")
class InstitutionPostHandler(BaseHandler):
def get(self, institution_id, post_id):
def date_handler(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
elif hasattr(obj, 'email'):
return obj.email()
if isinstance(obj, ndb.Key):
return obj.integer_id()
return obj
def data2json(data):
return json.dumps(
data,
default=date_handler,
indent=2,
separators=(',', ': '),
ensure_ascii=False)
#Get the datastore post
post = Post.get_by_id(int(post_id))
#Verify of the post is deleted
if post.state != 'deleted':
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
#Converts the post to json and writes to the output
self.response.write(data2json(post.to_dict()))
else:
self.response.write("Post not found")
def patch(self):
pass
def delete(self, institution_id, post_id):
#Get the datastore post
post = Post.get_by_id(int(post_id))
#Modify state for deleted
post.state = 'deleted'
post.put()
class UserNotificationsHandler(BaseHandler):
def get(self, user_id):
user = User.get_by_id(int(user_id))
notifications = user.notifications
self.response.write(notifications)
class UserHandler(BaseHandler):
def get(self, userId):
user = User.get_by_id(int(userId))
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
self.response.write(user)
def post(self):
data = json.loads(self.request.body)
Ids = data.get('institutions')
if Ids:
newuser = User()
newuser.email = data.get('email')
for institutionId in Ids:
newuser.institutions.append(Institution.get_by_id(int(institutionId)).key)
newuser.state = data.get('state')
newuser.put()
self.response.set_status(201)
else:
self.response.write("Wrong id")
def delete(self, userId):
id = int(userId)
user = User.get_by_id(id)
user.state = 'inactive'
user.put()
def patch(self):
pass
class UserTimelineHandler(BaseHandler):
def get(self, userId):
user = User.get_by_id(int(userId))
posts = user.timeline
if posts is not None:
list = [posts.integer_id() for posts in posts]
self.response.write(list)
else:
self. response.write("No posts yet")
class PostHandler(BaseHandler):
def post(self, id_institution):
#gets the institution by id
institution = Institution.get_by_id(int(id_institution))
user = institution.admin.get()
data = json.loads(self.request.body)
#create Post
newPost = Post()
newPost.author = institution.admin
newPost.institution = institution.key
newPost.title = data['title']
newPost.body = data['body']
newPost.put()
#Att Institution
institution.posts.append(newPost.key)
institution.put()
#Att User
user.posts.append(newPost.key)
user.put()
self.response.set_status(201)
app = webapp2.WSGIApplication([
("/api/institution", InstitutionHandler),
("/api/institution/(\d+)", InstitutionHandler),
("/api/institution/(\d+)/members", InstitutionMembersHandler),
("/api/institution/(\d+)/followers", InstitutionFollowersHandler),
("/api/institution", InstitutionHandler),
("/api/institution/:id", InstitutionHandler),
("/api/institution/:id/members", SEU_HANDLER),
("/api/institution/:id/followers", SEU_HANDLER),
("/api/institution/(\d+)/timeline", TimelineInstitutionHandler),
("/api/institution/(\d+)/post", PostHandler),
("/api/institution/(\d+)/post/(\d+)", InstitutionPostHandler),
("/api/institution/(\d+)/post/(\d+)/comments", CommentsHandler),
("/api/user", UserHandler),
("/api/user/(\d+)", UserHandler),
("/api/user/(\d+)/timeline", UserTimelineHandler),
("/api/user/(\d+)/notifications", UserNotificationsHandler),
("/api/.*", ErroHandler)
], debug=True)
|
20,941 | 6f8f554a98ca2a58ad44d79655b28b7a19d7a9fb | import functools
from normal.FluentPython_code_master.ch07_closure_deco.clockdeco import clock
@functools.lru_cache() # <1>
@clock # <2>
def fibonacci_lru(n):
if n < 2:
return n
return fibonacci_lru(n-2) + fibonacci_lru(n-1)
if __name__=='__main__':
print(fibonacci_lru(6))
|
20,942 | d5223e3195b301934810fcf28cdcc21ddeaf784b | liczba = int(input("Podaj liczbę całkowitą: "))
if liczba > 0 and liczba < 100:
print('Liczba jest z zakresu (0, 100)')
# wersja druga
if 0 < liczba < 100:
print('Liczba jest z zakresu (0, 100)')
|
20,943 | 65fc38fd5d1edf17c71460d867995e1652e8cc9a | from django.shortcuts import render
from django.views import generic
from .models import Subred, Thread, Post
# Create your views here.
class SubredsView(generic.ListView):
model = Subred
template_name = 'subs/subred.html'
context_object_name = 'subreds'
class SubredView(generic.DetailView):
template_name = 'subs/subreds.html'
model = Subred
# def get_context_data(self, **kwargs):
# context = super().get_context_data(**kwargs)
# subscription, created = Subscription.objects.get_or_create(user=self.request.user, subrediti=self.object)
# context['subscribed'] = True if subscription.subscribed else False
# return context |
20,944 | de9f300432d00dd7e6c0ac0a7ac84b2b5ad71144 | #!/bin/python3
"""
Making Candies
Haker Rank
https://www.hackerrank.com/challenges/making-candies/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=search
"""
import sys
from math import floor, ceil, inf
def minimumPasses(m, w, p, n):
if p > n:
return ceil(n/(m*w))
m, w = sorted([m, w])
c = 0
i = 0
j = inf
while True:
if c < p:
x = ceil((p-c)/(m*w))
else:
x = 1
i += x
c += x*m*w
if c > n:
break
j = min(j, i + ceil((n-c)/(m*w)))
y = c//p
c = c % p
d = w-m
m += min(d, y)
y -= min(d, y)
m += y//2
w += y//2 + y % 2
return min(i, j)
|
20,945 | dc122e10c7d86e650a3f65dad75ac6c26a72441e | from collections import defaultdict
def main():
n,l = map(int,input().split()) #n = 言葉の数、l =1つの言葉の長さ
words = []
for _ in range(n):
words.append(input())
d = defaultdict(list)
for word in words:
d[word[0]].append(word)
answer = dict()
for i in range(2,l+1):
new_d = defaultdict(list)
for key in d.keys():
words = d[key]
if len(words) == 1:
answer[key] = words[0]
else:
for word in words:
new_d[word[:i]].append(word)
d = new_d
print(answer)
if __name__ == '__main__':
main()
|
20,946 | fba9d7a881dd7c379eb1f7d527f1580a80475035 | import torch.nn as nn
def noBiasDecay(model, lr, weight_decay):
'''
no bias decay : only apply weight decay to the weights in convolution and fully-connected layers
In paper [Bag of Tricks for Image Classification with Convolutional Neural Networks](https://arxiv.org/abs/1812.01187)
Ref: https://github.com/weiaicunzai/Bag_of_Tricks_for_Image_Classification_with_Convolutional_Neural_Networks/blob/master/utils.py
'''
decay, bias_no_decay, weight_no_decay = [], [], []
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
decay.append(m.weight)
if m.bias is not None:
bias_no_decay.append(m.bias)
else:
if hasattr(m, 'weight'):
weight_no_decay.append(m.weight)
if hasattr(m, 'bias'):
bias_no_decay.append(m.bias)
assert len(list(model.parameters())) == len(decay) + len(bias_no_decay) + len(weight_no_decay)
# bias using 2*lr
return [{'params': bias_no_decay, 'lr': 2*lr, 'weight_decay': 0.0}, {'params': weight_no_decay, 'lr': lr, 'weight_decay': 0.0}, {'params': decay, 'lr': lr, 'weight_decay': weight_decay}] |
20,947 | c92ef2b36770951c996a299c4451d462049e0f1c | # Copyright (c) 2021, Tycho Andersen. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
import filecmp
import os
import os.path
import shutil
import sys
from functools import partial
from glob import glob
BACKUP_SUFFIX = ".migrate.bak"
try:
import bowler
except ImportError:
pass
def rename_hook(query, fro, to):
# could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >
# but the replacement gets more complicated...
selector = "'{name}'".format(name=fro)
q = query.select_pattern(selector)
q.current.kwargs["name"] = fro
return q.rename(to)
def client_name_updated(query):
"""Rename window_name_change -> client_name_updated"""
return rename_hook(query, "window_name_change", "client_name_updated")
def tile_master_windows_rename(query):
return query.select_function("Tile").modify_argument("masterWindows", "master_length")
def threaded_poll_text_rename(query):
return query.select_class("ThreadedPollText").rename("ThreadPoolText")
def pacman_to_checkupdates(query):
return query.select_class("Pacman").rename("CheckUpdates")
def reset_format(node, capture, filename):
args = capture.get("class_arguments")
if args:
if args[0].type == 260: # argument list
n_children = len(args[0].children)
for i in range(n_children):
# we only want to remove the format argument
if "format" in str(args[0].children[i]):
# remove the argument and the trailing or preceeding comma
if i == n_children - 1: # last argument
args[0].children[i - 1].remove()
args[0].children[i - 1].remove()
else:
args[0].children[i].remove()
args[0].children[i].remove()
break
else: # there's only one argument
args[0].remove()
def bitcoin_to_crypto(query):
return query.select_class("BitcoinTicker").modify(reset_format).rename("CryptoTicker")
def hook_main_function(query):
def modify_main(node, capture, filename):
main = capture.get("function_def")
if main.prev_sibling:
for leaf in main.prev_sibling.leaves():
if "startup" == leaf.value:
return
args = capture.get("function_arguments")
if args:
args[0].remove()
main.prefix += "from libqtile import hook, qtile\n"
main.prefix += "@hook.subscribe.startup\n"
return query.select_function("main").is_def().modify(modify_main)
# Deprecated new_at_current key replaced by new_client_position.
# In the node, we want to change the key name
# and adapts its value depending of the previous value :
# new_at_current=True => new_client_position=before_current
# new_at_current<>True => new_client_position=after_current
def update_node_nac(node, capture, filename):
key = capture.get("k")
key.value = "new_client_position"
val = capture.get("v")
if val.value == "True":
val.value = "'before_current'"
else:
val.value = "'after_current'"
def new_at_current_to_new_client_position(query):
old_pattern = """
argument< k="new_at_current" "=" v=any >
"""
return query.select(old_pattern).modify(update_node_nac)
def windowtogroup_groupName_argument(funcname, query): # noqa: N802
return query.select_method(funcname).modify_argument("groupName", "group_name")
def command_decorators_changes(query):
"""
Some commands were renamed when moving from `cmd_` to decorator syntax for
exposed commands.
While most code should continue to work, with required changes indicated in log files,
some changes may cause breakages.
This migration function attempts to address the key changes.
"""
return (
query.select_method("cmd_groups") # noqa: BLK100
.rename("get_groups")
.select_method("cmd_screens")
.rename("get_screens")
.select_method("opacity")
.rename("set_opacity")
.select_method("cmd_opacity")
.rename("set_opacity")
.select_method("hints")
.rename("get_hints")
.select_method("cmd_hints")
.rename("get_hints")
)
def rename_cmd_methods(query):
"""
Renames any method call that starts with "cmd_" to remove
the prefix.
"""
select = """power< name=any* trailer< "(" any* ")" > any* >"""
def modify(node, capture, filename):
def search_method(item):
"""
Result will be a nested list of Node and Leaf objects so
we need to be able to recursively check each object.
"""
for obj in item:
if hasattr(obj, "value"):
if obj.value.startswith("cmd_"):
obj.value = obj.value[4:]
else:
search_method(obj.leaves())
cmd_name = capture.get("name")
search_method(cmd_name)
return query.select(select).modify(modify)
MIGRATIONS = [
client_name_updated,
tile_master_windows_rename,
threaded_poll_text_rename,
pacman_to_checkupdates,
bitcoin_to_crypto,
hook_main_function,
new_at_current_to_new_client_position,
partial(windowtogroup_groupName_argument, "togroup"),
partial(windowtogroup_groupName_argument, "cmd_togroup"),
command_decorators_changes,
rename_cmd_methods,
]
MODULE_RENAMES = [
("libqtile.command_graph", "libqtile.command.graph"),
("libqtile.command_client", "libqtile.command.client"),
("libqtile.command_interface", "libqtile.command.interface"),
("libqtile.command_object", "libqtile.command.base"),
("libqtile.window", "libqtile.backend.x11.window"),
]
for fro, to in MODULE_RENAMES:
def f(query, fro=fro, to=to):
return query.select_module(fro).rename(to)
MIGRATIONS.append(f)
def file_and_backup(config_dir):
for py in glob(os.path.join(config_dir, "*.py")):
backup = py + BACKUP_SUFFIX
yield py, backup
def do_migrate(args):
if "bowler" not in sys.modules:
print("bowler can't be found, not migrating config file")
print("install it and try again")
sys.exit(1)
config_dir = os.path.dirname(args.config)
for py, backup in file_and_backup(config_dir):
shutil.copyfile(py, backup)
for m in MIGRATIONS:
q = bowler.Query(config_dir)
m(q).execute(interactive=not args.yes, write=True)
changed = False
for py, backup in file_and_backup(config_dir):
backup = py + BACKUP_SUFFIX
if not filecmp.cmp(py, backup, shallow=False):
changed = True
break
if not changed:
print("Config unchanged.")
for _, backup in file_and_backup(config_dir):
os.remove(backup)
def add_subcommand(subparsers, parents):
parser = subparsers.add_parser(
"migrate", parents=parents, help="Migrate a configuration file to the current API."
)
parser.add_argument(
"-c",
"--config",
action="store",
default=os.path.expanduser(
os.path.join(os.getenv("XDG_CONFIG_HOME", "~/.config"), "qtile", "config.py")
),
help="Use the specified configuration file (migrates every .py file in this directory).",
)
parser.add_argument(
"--yes",
action="store_true",
help="Automatically apply diffs with no confirmation.",
)
parser.set_defaults(func=do_migrate)
|
20,948 | 2ff2419a412d8d75ecd81230e6ad819287c6a109 | def uname(name):
print "Your name is: %s" % name
def gender(sex):
print "You gender is %s ?" % sex
promt = "> "
print "Hey there what is your name?"
name = raw_input(promt)
print "What is you gender %s ?" % name
sex = raw_input(promt)
uname(name)
gender(sex)
print "\nWelcome %s and you said you gender is %s" % (name, sex)
print "Thanks for visited %s, see you next time..!!!" % name
|
20,949 | 4dfb96f5c2312d763e6c5ae7cce15687d7223876 | import turtle
from random import randint, shuffle
from time import sleep
# initialize empty 9x9 grid
grid = []
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
grid.append([0, 0, 0, 0, 0, 0, 0, 0, 0])
lTrace = turtle.Turtle()
#lTrace.tracer(0)
lTrace.speed(0)
lTrace.color("#000000")
lTrace.hideturtle()
limTopLeft_x = -150
limTopLeft_y = 150
def text(message, x, y, size):
'''
writes message at location
:param message:
:param x:
:param y:
:param size:
:return:
'''
lFont = ("Garamond", size, "normal")
lTrace.penup()
lTrace.goto(x, y)
lTrace.write(message, align="left", font=lFont)
def drawGrid(grid):
'''
draws the grid!
:param grid:
:return:
'''
intDim = 35
for row in range(0, 10):
if (row % 3) == 0:
lTrace.pensize(3)
else:
lTrace.pensize(1)
lTrace.penup()
lTrace.goto(limTopLeft_x, limTopLeft_y - row * intDim)
lTrace.pendown()
lTrace.goto(limTopLeft_x + 9 * intDim, limTopLeft_y - row * intDim)
for row in range(0, 9):
for col in range(0, 9):
if grid[row][col] != 0:
text(grid[row][col], limTopLeft_y + col * intDim + 9, limTopLeft_y - row * intDim - intDim + 8, 18)
def checkGrid(grid):
'''
checks if grid is full
:param grid:
:return:
'''
for row in range(0, 9):
for col in range(0, 9):
if grid[row][col] == 0:
return False
return True # complete grid
def solveGrid(grid):
'''
backtracking/recursive function to check all combinations of numbers to find solution.
:param grid:
:return:
'''
global counter
for i in range(0, 81):
row = i // 9
col = i % 9
if grid[row][col] == 0:
for value in range(1, 10):
if not (value in grid[row]): # check value not already sued
if not value in (
grid[0][col], grid[1][col], grid[2][col], grid[3][col], grid[4][col], grid[5][col],
grid[6][col],
grid[7][col], grid[8][col]):
# Identify which of the 9 squares we are working on
square = []
if row < 3:
if col < 3:
square = [grid[i][0:3] for i in range(0, 3)]
elif col < 6:
square = [grid[i][3:6] for i in range(0, 3)]
else:
square = [grid[i][6:9] for i in range(0, 3)]
elif row < 6:
if col < 3:
square = [grid[i][0:3] for i in range(3, 6)]
elif col < 6:
square = [grid[i][3:6] for i in range(3, 6)]
else:
square = [grid[i][6:9] for i in range(3, 6)]
else:
if col < 3:
square = [grid[i][0:3] for i in range(6, 9)]
elif col < 6:
square = [grid[i][3:6] for i in range(6, 9)]
else:
square = [grid[i][6:9] for i in range(6, 9)]
# Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
grid[row][col] = value
if checkGrid(grid):
counter += 1
break
else:
if solveGrid(grid):
return True
numberList = [1, 2, 3, 4, 5, 6, 7, 8, 9]
shuffle(numberList)
def fillGrid(grid):
'''
backtracing=recursive fn to check all possible combos until solution.
:param grid:
:return:
'''
global counter
for i in range(0, 81):
row = i // 9
col = i % 9
if grid[row][col] == 0:
shuffle(numberList)
for value in numberList:
# Check that this value has not already be used on this row
if not (value in grid[row]):
# Check that this value has not already be used on this column
if not value in (
grid[0][col], grid[1][col], grid[2][col], grid[3][col], grid[4][col], grid[5][col], grid[6][col],
grid[7][col], grid[8][col]):
# Identify which of the 9 squares we are working on
square = []
if row < 3:
if col < 3:
square = [grid[i][0:3] for i in range(0, 3)]
elif col < 6:
square = [grid[i][3:6] for i in range(0, 3)]
else:
square = [grid[i][6:9] for i in range(0, 3)]
elif row < 6:
if col < 3:
square = [grid[i][0:3] for i in range(3, 6)]
elif col < 6:
square = [grid[i][3:6] for i in range(3, 6)]
else:
square = [grid[i][6:9] for i in range(3, 6)]
else:
if col < 3:
square = [grid[i][0:3] for i in range(6, 9)]
elif col < 6:
square = [grid[i][3:6] for i in range(6, 9)]
else:
square = [grid[i][6:9] for i in range(6, 9)]
# Check that this value has not already be used on this 3x3 square
if not value in (square[0] + square[1] + square[2]):
grid[row][col] = value
if checkGrid(grid):
return True
else:
if fillGrid(grid):
return True
break
grid[row][col] = 0
#time for number deletion!
attempts=5 #change this number for more difficult puzzles.
counter = 1
while attempts>0:
row = randint(0,8)
col = randint(0,8)
reset = grid[row][col]
grid[row][col] = 0
lGridCopy = []
for r in range(0,9):
lGridCopy.append([])
for c in range(0,9):
lGridCopy[r].append(grid[r][c])
counter = 0
solveGrid(lGridCopy)
if counter !=1:
grid[row][col] = reset
attempts -= 1
lTrace.clear()
drawGrid(grid)
lTrace.getscreen().update()
print("grid ready") |
20,950 | d29a9440ecc1cf3ea5ca0e49df78f0828a72a657 | import pytest
from alg import graph as gr
from functools import total_ordering
@total_ordering
class Vertex:
""" Auxialiary class for testing purpose
Must be comparable, because if first priority in compared tuples are equal
"""
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return f"Vertex:{self.name}"
def test_1():
g = gr.Graph()
v1 = Vertex("v1")
v2 = Vertex("v2")
v3 = Vertex("v3")
v4 = Vertex("v4")
v5 = Vertex("v5")
v6 = Vertex("v6")
v7 = Vertex("v7")
v8 = Vertex("v8")
g.add_edge(v1, v2)
g.add_edge(v3, v2)
g.add_edge(v5, v6)
g.add_edge(v7, v2)
g.add_edge(v4, v5)
g.add_edge(v5, v8)
g.add_edge(v7, v5, 3)
assert g.get_adj(v2) == [v1, v3, v7]
assert g.get_adj(v5) == [v6, v4, v8, v7]
assert g.get_weight(v5, v7) == 3
assert g.get_weight(v7, v5) == 3
test_1()
|
20,951 | 373333e28766a38b59f6ccdfe45f94e8144ff6bc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from std_msgs.msg import String, Float64
from sensor_msgs.msg import Joy
import time
from geometry_msgs.msg import Twist
from std_srvs.srv import Empty, EmptyRequest
from std_srvs.srv import Empty, EmptyRequest
from tracked_irl.msg import BaseMotorCmd, FlipMotorCmd
class TeleopControl:
def __init__(self):
print('Base control instance created')
self.sim = bool(rospy.get_param("sim"))
self.pub_track_cmd = rospy.Publisher('/drrobot_jaguar_v6_basemotor_cmd', BaseMotorCmd, queue_size=1)
self.pub_flip_cmd = rospy.Publisher('/drrobot_jaguar_v6_flipmotor_cmd', FlipMotorCmd, queue_size=1)
self.pwm = 0.1
self.msg_tracks = BaseMotorCmd()
self.msg_flips = FlipMotorCmd()
self.state_flips = {
'leftFront': 0,
'rightFront': 0,
'leftRear': 0,
'rightRear': 0
}
self.ready = False
self.MAX_ANGLE = 1.1775
self.ANGLE_STEP = 0.2
self.MAX_EFFORT = 2.5
self.STEP_EFFORT = 0.1
self.effort_left = 0.
self.regime = False
self.allowed = True
self.effort_right = 0.
self.action_pub = rospy.Publisher('/action_published', Float64, queue_size=1)
self.action_pub_msg = Float64()
self.reset_gazebo = rospy.ServiceProxy('/gazebo/reset_world', Empty)
self.bars = {
'/tracked_robot/left_front_bar_hinge_position_controller/command': Float64(),
'/tracked_robot/left_rear_bar_hinge_position_controller/command': Float64(),
'/tracked_robot/right_front_bar_hinge_position_controller/command': Float64(),
'/tracked_robot/right_rear_bar_hinge_position_controller/command': Float64()
}
self.wheels = {
'/tracked_robot/left_front_bar_wheel_1_hinge_effort_controller/command': Float64(),
'/tracked_robot/left_front_bar_wheel_2_hinge_effort_controller/command': Float64(),
'/tracked_robot/left_front_bar_wheel_3_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_front_bar_wheel_4_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_front_bar_wheel_5_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_rear_bar_wheel_1_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_rear_bar_wheel_2_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_rear_bar_wheel_3_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_rear_bar_wheel_4_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_rear_bar_wheel_5_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_1_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_2_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_3_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_4_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_5_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_6_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_7_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_8_hinge_effort_controller/command':Float64(),
'/tracked_robot/left_wheel_9_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_front_bar_wheel_1_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_front_bar_wheel_2_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_front_bar_wheel_3_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_front_bar_wheel_4_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_front_bar_wheel_5_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_rear_bar_wheel_1_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_rear_bar_wheel_2_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_rear_bar_wheel_3_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_rear_bar_wheel_4_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_rear_bar_wheel_5_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_1_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_2_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_3_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_4_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_5_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_6_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_7_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_8_hinge_effort_controller/command':Float64(),
'/tracked_robot/right_wheel_9_hinge_effort_controller/command':Float64()
}
# Dict of publishers
self.publishers = {}
# Common dict for flippers and tracks
d = dict(self.wheels)
d.update(self.bars)
for k, v in d.items():
self.publishers[k] = rospy.Publisher(k, Float64, queue_size=1)
def wrap(self,actions):
if len(actions) == 0:
raise ValueError('Actions are not defined')
res = {
'/tracked_robot/left_front_bar_hinge_position_controller/command': -1*actions[0],
'/tracked_robot/left_rear_bar_hinge_position_controller/command': actions[1],
'/tracked_robot/right_front_bar_hinge_position_controller/command': actions[0],
'/tracked_robot/right_rear_bar_hinge_position_controller/command': -1*actions[1],
'left': actions[2],
'right': actions[3]
}
return res
def apply_actions(self, actions):
'''
:param actions: 'left_front_bar_hinge', 'left_rear_bar_hinge', 'right_front_bar_hinge',
'right_rear_bar_hinge', 'left_wheel_1_hinge', 'right_wheel_1_hinge'
:return: None
'''
# Transform list of actions to dict of actions
actions = self.wrap(actions)
step_angle = 0.2
step_effort = 2.0
# Publish
for bar_k, bar_v in self.bars.items():
self.bars[bar_k].data += step_angle * actions[bar_k]
if abs(self.bars[bar_k].data) < self.MAX_ANGLE:
self.publishers[bar_k].publish(self.bars[bar_k])
else:
self.bars[bar_k].data -= step_angle * actions[bar_k]
for wheel_k, wheel_v in self.wheels.items():
if 'left' in wheel_k:
action = actions['left']
elif 'right' in wheel_k:
action = actions['right']
self.wheels[wheel_k].data = action
self.publishers[wheel_k].publish(self.wheels[wheel_k])
# To delete
self.action_pub.publish(self.action_pub_msg)
def calculate_commands(self, cmd):
# Track efforts
if self.regime == False:
self.effort_left = cmd['y']*self.MAX_EFFORT - self.MAX_EFFORT*cmd['x']
self.effort_right = cmd['y']*self.MAX_EFFORT + self.MAX_EFFORT*cmd['x']
else:
if self.allowed:
self.effort_left = self.STEP_EFFORT # cmd['y_'] * self.STEP_EFFORT
self.effort_right = self.STEP_EFFORT # cmd['y_'] * self.STEP_EFFORT
else:
self.effort_left = 0.
self.effort_right = 0.
front = 0
rear = 0
if cmd['front_down'] == 1:
front = -1
elif cmd['front_up'] == 1:
front = 1
elif cmd['rear_down'] == 1:
rear = -1
elif cmd['rear_up'] == 1:
rear = 1
self.apply_actions([front, rear, self.effort_left, self.effort_right])
def joy_callback(self, msg):
if abs(msg.buttons[4]) != 0:
self.reset()
else:
d = {}
d['x'] = msg.axes[2]
d['y'] = msg.axes[3]
d['front_down'] = msg.buttons[2]
d['front_up'] = msg.buttons[3]
d['rear_down'] = msg.buttons[1]
d['rear_up'] = msg.buttons[0]
self.calculate_commands(d)
def reset(self):
for k in self.bars.keys():
self.bars[k].data = 0.
for k in self.wheels.keys():
self.wheels[k].data = 0.
self.effort_left = 0.
self.effort_right = 0.
self.reset_gazebo(EmptyRequest())
def autonomous_callback(self, vel):
left = vel.linear.x - vel.angular.z
right = vel.linear.x + vel.angular.z
print('left {} right {}'.format(left, right))
actions = [0, 0, left, right]
if self.sim:
self.apply_actions(actions)
else:
self.apply_actions_jaguar(actions)
def apply_actions_jaguar(self, actions):
'''
remember that the left main track velocity command has to be inverted.
Thus, to move forward it is necessary to set -300, 300 units for left and right tracks
:param actions: list of actions [front_bars, rear_bars, left, right]
:return:
'''
PWM = 300
left = actions[2]
right = actions[3]
max_ = max(abs(left), abs(right))
if max_ == 0:
self.msg_tracks.leftCmd = 0.0
self.msg_tracks.rightCmd = 0.0
else:
self.msg_tracks.leftCmd = -PWM * left / max_
self.msg_tracks.rightCmd = PWM * right / max_
print('Applying actions to the jaguar left: {}, right: {} \n'.format(
self.msg_tracks.leftCmd,
self.msg_tracks.rightCmd
))
#decision = raw_input("Should I apply these actions (y/n)?")
# if decision == 'y':
self.pub_track_cmd.publish(self.msg_tracks)
if __name__ == '__main__':
tele = TeleopControl()
rospy.init_node('Joystick_control', anonymous=True)
rospy.Subscriber('/joy', Joy, tele.joy_callback, queue_size=1)
rospy.Subscriber('/cmd_vel', Twist, tele.autonomous_callback, queue_size=1)
rospy.spin()
'''
rosrun tf static_transform_publisher 2 0.5 0 0 0 0 1 map go 100
''' |
20,952 | 62e1a20d33432035a051013e31cbd0b1edb23d22 | class DoubleDictGraph:
"""A directed graph, represented as two maps,
one from each vertex to the set of outbound neighbours,
the other from each vertex to the set of inbound neighbours"""
def __init__(self, n):
"""Creates a graph with n vertices (numbered from 0 to n-1)
and no edges"""
self._dictOut = {}
self._dictIn = {}
for i in range(n):
self._dictOut[i] = []
self._dictIn[i] = []
def parseX(self):
"""Returns an iterable containing all the vertices"""
return self._dictOut.keys()
def parseNout(self, x):
"""Returns an iterable containing the outbound neighbours of x"""
return self._dictOut[x]
def parseNin(self, x):
"""Returns an iterable containing the inbound neighbours of x"""
return self._dictIn[x]
def isEdge(self, x, y):
"""Returns True if there is an edge from x to y, False otherwise"""
return y in self._dictOut[x]
def addEdge(self, x, y):
"""Adds an edge from x to y.
Precondition: there is no edge from x to y"""
self._dictOut[x].append(y)
self._dictIn[y].append(x)
|
20,953 | bca7d5ec155fd317bfe3764c56db21b054c8b7dd | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import sys
import traceback
from oslo_log import log
from networking_vsphere._i18n import _LI
from networking_vsphere.common import error
LOG = log.getLogger(__name__)
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s).' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def load_object(driver, base_class, *args, **kwargs):
"""Load a class, instantiate, check if its of base_class type."""
driver_obj = import_class(driver)(*args, **kwargs)
if not isinstance(driver_obj, base_class):
raise TypeError("Invalid type - %s does not extend %s." %
(fullname(driver_obj), base_class))
return driver_obj
def fullname(cls):
"""Get full name of a class."""
module = cls.__module__
if module is None or module == str.__class__.__module__:
return cls.__class__.__name__
return module + '.' + cls.__class__.__name__
def require_state(state=None, excp=True):
"""Decorator to check state of an object.
First argument of the decorated function should be
the object whose state needs to be checked.
:param state: valid set of states
:param excp: If True then raise an exception if in invalid state
"""
if state is not None and not isinstance(state, set):
state = set(state)
def outer(f):
@functools.wraps(f)
def inner(obj, *args, **kw):
if state is not None and obj.state not in state:
l_states = list(state)
if excp:
raise error.OVSvAppNeutronAgentError(
"%s not allowed. "
"%s is in %s state. "
"To be in %s state" %
(f.__name__,
obj.__class__.__name__,
obj.state,
l_states))
else:
LOG.info(_LI("%(name)s not allowed. "
"%(obj)s is %(state)s state. "
"Need to be in %(states)s state."),
{'name': f.__name__,
'obj': obj.__class__.__name__,
'state': obj.state,
'states': l_states})
return
return f(obj, *args, **kw)
return inner
return outer
def get_cluster_based_topic(cluster, device):
if cluster:
return cluster.replace('/', '_') + '_' + device
else:
return device
|
20,954 | 06819c33ccee6e7c401c2e4a2ec5985276377e03 | from .trinket import Trinket
class Trinket1(Trinket):
def __init__(self):
super()__init__() |
20,955 | 0ab74f0d4cc68f14010efe2ff592ca829b323ae7 | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2021 Clyde McQueen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Launch topside for ROV operations."""
# Test w/ no barometer:
# ros2 topic pub -r 20 -p 20 /barometer orca_msgs/msg/Barometer {}
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import ExecuteProcess, SetEnvironmentVariable
from launch_ros.actions import Node
# TODO call bringup.py (figure out how to combine IfCondition with EqualsCondition)
def generate_launch_description():
orca_bringup_dir = get_package_share_directory('orca_bringup')
params_file = os.path.join(orca_bringup_dir, 'params', 'rov_topside_params.yaml')
orca_description_dir = get_package_share_directory('orca_description')
urdf_file = os.path.join(orca_description_dir, 'urdf', 'hw7.urdf')
return LaunchDescription([
SetEnvironmentVariable('RCUTILS_LOGGING_BUFFERED_STREAM', '1'),
# Bag everything
ExecuteProcess(
cmd=['ros2', 'bag', 'record', '-a'],
output='screen'
),
# Publish static /tf
Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
arguments=[urdf_file],
),
# Publish /joy
Node(
package='joy',
executable='joy_node',
output='screen',
name='joy_node',
parameters=[params_file],
),
# Subscribe to /joy and publish /armed, /camera_tilt, /cmd_vel and /lights
Node(
package='orca_base',
executable='teleop_node',
output='screen',
name='teleop_node',
parameters=[params_file],
),
# Subscribe to /cmd_vel and publish /thrust, /odom and /tf odom->base_link
Node(
package='orca_base',
executable='base_controller',
output='screen',
name='base_controller',
parameters=[params_file],
remappings=[
('barometer', 'filtered_barometer'),
],
),
# Barometer filter
Node(
package='orca_base',
executable='baro_filter_node',
output='screen',
parameters=[params_file],
),
])
|
20,956 | 6cd2fcd221221ab1c2af25bef3469b74cbe734b5 | ############ Author:################################
# Marcin Cuber
#####################################################
# GA17 Privacy Enhancing Technologies -- Lab 01
#
# Basics of Petlib, encryption, signatures and
# an end-to-end encryption system.
#
# Run the tests through:
# $ py.test-2.7 -v Lab01Tests.py
#####################################################
# TASK 1 -- Ensure petlib is installed on the System
# and also pytest. Ensure the Lab Code can
# be imported.
import petlib
#####################################################
# TASK 2 -- Symmetric encryption using AES-GCM
# (Galois Counter Mode)
#
# Implement a encryption and decryption function
# that simply performs AES_GCM symmetric encryption
# and decryption using the functions in petlib.cipher.
from os import urandom
from petlib.cipher import Cipher
def encrypt_message(K, message):
""" Encrypt a message under a key K """
plaintext = message.encode("utf8")
aes = Cipher("aes-128-gcm")
iv = urandom(16)
# Encryption using AES-GCM returns a ciphertext and a tag
ciphertext, tag = aes.quick_gcm_enc(K, iv, plaintext)
return (iv, ciphertext, tag)
def decrypt_message(K, iv, ciphertext, tag):
""" Decrypt a cipher text under a key K
In case the decryption fails, throw an exception.
"""
aes = Cipher("aes-128-gcm")
plain = aes.quick_gcm_dec(K, iv, ciphertext, tag)
return plain.encode("utf8")
#####################################################
# TASK 3 -- Understand Elliptic Curve Arithmetic
# - Test if a point is on a curve.
# - Implement Point addition.
# - Implement Point doubling.
# - Implement Scalar multiplication (double & add).
# - Implement Scalar multiplication (Montgomery ladder).
#
# MUST NOT USE ANY OF THE petlib.ec FUNCIONS. Only petlib.bn!
from petlib.bn import Bn
def is_point_on_curve(a, b, p, x, y):
"""
Check that a point (x, y) is on the curve defined by a,b and prime p.
Reminder: an Elliptic Curve on a prime field p is defined as:
y^2 = x^3 + ax + b (mod p)
(Weierstrass form)
Return True if point (x,y) is on curve, otherwise False.
By convention a (None, None) point represents "infinity".
"""
assert isinstance(a, Bn)
assert isinstance(b, Bn)
assert isinstance(p, Bn) and p > 0
assert (isinstance(x, Bn) and isinstance(y, Bn)) \
or (x == None and y == None)
if x == None and y == None:
return True
lhs = (y * y) % p
rhs = (x*x*x + a*x + b) % p
on_curve = (lhs == rhs)
return on_curve
def point_add(a, b, p, x0, y0, x1, y1):
"""Define the "addition" operation for 2 EC Points.
Reminder: (xr, yr) = (xq, yq) + (xp, yp)
is defined as:
lam = yq - yp * (xq - xp)^-1 (mod p)
xr = lam^2 - xp - xq (mod p)
yr = lam * (xp - xr) - yp (mod p)
Return the point resulting from the addition. Raises an Exception if the points are equal.
"""
#initilise new coordinates
xr, yr = None, None
#create tuples for the input points
p1 = (x0,y0)
p2 = (x1,y1)
#check validity of the points
try:
assert is_point_on_curve(a, b, p, x0, y0)
assert is_point_on_curve(a, b, p, x1, y1)
except:
raise Exception('not valid points')
#check curve 4a^3+27b^2 != 0 mod p.
c0 = a.mod_pow(Bn(3),p)
c1 = c0.mod_mul(Bn(4),p)
c2 = b.mod_pow(Bn(2),p)
c3 = c2.mod_mul(Bn(27),p)
c = c1.mod_add(c3,p)
try:
assert c != 0
except:
raise Exception('invalid curve')
#check if points are equal
try:
assert p1 != p2
except:
raise Exception('EC Points must not be equal')
#checking the points and different cases
if p1 == (None,None) and p2 == (None, None):
return (None,None)
elif (x0 == x1) and (y0.mod_add(y1,p)==0):
return (None,None)
elif (x0 == None or y0 == None) and (x1 != None and y1 != None):
return p2
elif (x1 == None or y1 == None) and (x0 != None and y0 != None):
return p1
elif y0 != None and x0 != None and y1 != None and x1 != None:
#check if the points are valid with an additional check
#through an exception
try:
assert p1 != p2
assert p1 != (x1,(-y1))
except:
raise Exception('EC Points must not be equal')
if y1 == 0:
lam0 = -y0
else:
lam0 = y1.mod_sub(y0,p)
if x1 == 0:
lam1 = -x0
else:
lam1 = x1.mod_sub(x0,p)
#condition check if the gradient is 0
if lam0 == 0 or lam1 == 0:
xr = -x0.mod_sub(x1,p)
yr = -y1
#check if the point is on the curve
if xr == None or yr == None:
return (None, None)
try:
assert is_point_on_curve(a, b, p, xr, yr)
except:
raise Exception('The new point is not valid')
#do calculations on the numbers that can give valid xr,yr point
else:
lam2 = lam1.mod_inverse(p)
lam = lam0.mod_mul(lam2,p)
xr0 = lam.mod_pow(Bn(2),p)
xr1 = xr0.mod_sub(x0,p)
xr = xr1.mod_sub(x1,p)
yr0 = x0.mod_sub(xr,p)
yr1 = lam.mod_mul(yr0,p)
yr = yr1.mod_sub(y0,p)
#check if the new point is valid and if it is then return it
try:
assert is_point_on_curve(a, b, p, xr, yr)
except:
raise Exception('The new point is not valid')
#check if any part is None, it may never be!
if xr == None or yr == None:
return (None, None)
return (xr, yr)
def point_double(a, b, p, x, y):
"""Define "doubling" an EC point.
A special case, when a point needs to be added to itself.
Reminder:
lam = 3 * x ^ 2 + a * (2 * y) ^ -1 (mod p)
xr = lam ^ 2 - 2 * xp
yr = lam * (xp - xr) - yp (mod p)
Returns the point representing the double of the input (x, y).
"""
xr, yr = None, None
p1 = (x,y)
#check the input point for validity
try:
assert is_point_on_curve(a, b, p, x, y)
except:
raise Exception('not a valid point')
#check curve 4a^3+27b^2 != 0 mod p for validity.
c0 = a.mod_pow(Bn(3),p)
c1 = c0.mod_mul(Bn(4),p)
c2 = b.mod_pow(Bn(2),p)
c3 = c2.mod_mul(Bn(27),p)
c = c1.mod_add(c3,p)
try:
assert c != 0
except:
raise Exception('invalid curve')
#verify the input point
if p1 == (None,None):
return (None,None)
elif p1 == (0,0):
return (0,0)
elif y == None or y == 0:
return (None, None)
#calculate the new point== doubled point
else:
if x == 0:
xp2 = a
else:
xp0 = x.mod_pow(Bn(2),p)
xp1 = xp0.mod_mul(Bn(3),p)
xp2 = xp1.mod_add(a,p)
yp0 = y.mod_mul(Bn(2),p)
if yp0 != 0:
yp = yp0.mod_inverse(p)
else:
yp = 0;
if (xp2 != 0 and yp != 0):
#calculate gradient if the points are not zero
lam = xp2.mod_mul(yp,p)
#calculate new x coordinate
xr0 = lam.mod_pow(Bn(2),p)
xr1 = x.mod_mul(Bn(2),p)
xr = xr0.mod_sub(xr1,p)
#calcualte new y coordinate
yr0 = x.mod_sub(xr,p)
yr1 = lam.mod_mul(yr0,p)
yr = yr1.mod_sub(y,p)
if (xr == None or yr == None):
return (None, None)
else:
xr = -x.mod_mul(Bn(2),p)
yr = -y
if (xr == None or yr == None):
return (None, None)
#check whether the new point is valid whcih is passed from the previous if statement
try:
assert is_point_on_curve(a, b, p, x, y)
except:
raise Exception('The new point is not valid')
return xr, yr
def point_scalar_multiplication_double_and_add(a, b, p, x, y, scalar):
"""
Implement Point multiplication with a scalar:
r * (x, y) = (x, y) + ... + (x, y) (r times)
Reminder of Double and Multiply algorithm: r * P
Q = infinity
for i = 0 to num_bits(P)-1
if bit i of P == 1 then
Q = Q + P
P = 2 * P
return Q
"""
Q = (None, None)
P = (x, y)
binary = bin(scalar)
for i in range(scalar.num_bits()):
if binary[scalar.num_bits()-i+1] == '1':
Q = point_add(a, b, p, Q[0], Q[1], P[0], P[1])
#print Q
pass
P = point_double(a, b, p, P[0],P[1])
pass
return Q
def point_scalar_multiplication_montgomerry_ladder(a, b, p, x, y, scalar):
"""
Implement Point multiplication with a scalar:
r * (x, y) = (x, y) + ... + (x, y) (r times)
Reminder of Double and Multiply algorithm: r * P
R0 = infinity
R1 = P
for i in num_bits(P)-1 to zero:
if di = 0:
R1 = R0 + R1
R0 = 2R0
else
R0 = R0 + R1
R1 = 2 R1
return R0
"""
R0 = (None, None)
R1 = (x, y)
#convert the scalar variable to binary
binary = bin(scalar)
#start the scan checking each bit
for i in reversed(range(0,scalar.num_bits())):
#if bit is 0 do the addition and double R0
if binary[scalar.num_bits()-i+1] == '0':
R1 = point_add(a, b, p, R0[0], R0[1], R1[0], R1[1])
R0 = point_double(a, b, p, R0[0],R0[1])
#if bit is not zero then do the addition and double R1
else:
R0 = point_add(a, b, p, R0[0], R0[1], R1[0], R1[1])
R1 = point_double(a, b, p, R1[0],R1[1])
return R0
#####################################################
# TASK 4 -- Standard ECDSA signatures
#
# - Implement a key / param generation
# - Implement ECDSA signature using petlib.ecdsa
# - Implement ECDSA signature verification
# using petlib.ecdsa
from hashlib import sha256
from petlib.ec import EcGroup
from petlib.ecdsa import do_ecdsa_sign, do_ecdsa_verify
def ecdsa_key_gen():
""" Returns an EC group, a random private key for signing
and the corresponding public key for verification"""
G = EcGroup()
priv_sign = G.order().random()
pub_verify = priv_sign * G.generator()
return (G, priv_sign, pub_verify)
def ecdsa_sign(G, priv_sign, message):
""" Sign the SHA256 digest of the message using ECDSA and return a signature """
plaintext = message.encode("utf8")
digest = sha256(plaintext).digest()
sig = do_ecdsa_sign(G,priv_sign,digest)
return sig
def ecdsa_verify(G, pub_verify, message, sig):
""" Verify the ECDSA signature on the message """
plaintext = message.encode("utf8")
digest = sha256(plaintext).digest()
res = do_ecdsa_verify(G,pub_verify,sig,digest)
return res
#####################################################
# TASK 5 -- Diffie-Hellman Key Exchange and Derivation
# - use Bob's public key to derive a shared key.
# - Use Bob's public key to encrypt a message.
# - Use Bob's private key to decrypt the message.
#
# NOTE:
def dh_get_key():
""" Generate a DH key pair """
G = EcGroup()
priv_dec = G.order().random()
pub_enc = priv_dec * G.generator()
return (G, priv_dec, pub_enc)
def dh_encrypt(pub, message):
""" Assume you know the public key of someone else (Bob),
and wish to Encrypt a message for them.
- Generate a fresh DH key for this message.
- Derive a fresh shared key.
- Use the shared key to AES_GCM encrypt the message.
- Optionally: sign the message.
"""
Group, private, public = dh_get_key()#generate new DH pair for Alice
#private key is an integer/scalar and public key is a point on the curve
#check whether public key of Bob is valid and on curve
assert Group.check_point(pub)
#Alice obtains shared secret by multiplying her private key with bob's forwarded public key
key = pub.pt_mul(private)#dA* qB
print "key from enc is", key
hashedKey=sha256(key.export()).digest()
plaintext = message.encode("utf8")#encode message
aes = Cipher("aes-128-gcm")#select cipher
iv = urandom(16)#generate initialization vector
cipher, tag = aes.quick_gcm_enc(hashedKey[:16], iv, plaintext)#encrypt using shared key
ciphertext = [iv,cipher,tag,public]
return ciphertext
def dh_decrypt(priv, ciphertext):
""" Decrypt a received message encrypted using your public key,
of which the private key is provided"""
Group1,private, public = dh_get_key()#generate new DH pair for Bob
iv=ciphertext[0]
cipher=ciphertext[1]
tag=ciphertext[2]
pubA=ciphertext[3]
#Bob derives shared secret key by multiplying his public key with Alice's private key
shared2 = pubA.pt_mul(priv)#qA * dB
print "key from dec is", shared2
hashedKey=sha256(shared2.export()).digest()
aes = Cipher("aes-128-gcm")
plain = aes.quick_gcm_dec(hashedKey[:16], iv, cipher, tag)#where to get IV and tag from ???
return plain.encode("utf8")
## NOTE: populate those (or more) tests
# ensure they run using the "py.test filename" command.
# What is your test coverage? Where is it missing cases?
# $ py.test --cov-report html --cov Lab01Code Lab01Code.py -s
def test_encrypt():
G1, private1, public1 = dh_get_key()
msg = u"Test" * 1000
print "msg is"
ciphertext=dh_encrypt(public1,msg)
assert True
def test_decrypt():
G1, private1, public1 = dh_get_key()
msg = u"Test" * 1000
print "decnow"
ciphertext=dh_encrypt(public1,msg)
assert dh_decrypt(private1,ciphertext)==msg
def test_fails():
G1, private1, public1 = dh_get_key()
msg = u"Test" * 1000
ciphertext=dh_encrypt(public1,msg)
iv=ciphertext[0]#get IV from dh_encrypt()
tag=ciphertext[2]#tag
pubA=ciphertext[3]#Alice's public key
#derive shared secret by doing qA * dB
shared=pubA.pt_mul(private1)
hashedKey=sha256(shared.export()).digest()
print "shared in fail is", shared
mess=msg.encode("utf8")
aes = Cipher.aes_128_gcm() # Initialize AES cipher
enc = aes.enc(hashedKey[:16], iv) # Get an encryption CipherOperation
ciphertext2 = enc.update(mess) # Include some plaintext
nothing = enc.finalize() # Finalize
tag2 = enc.get_tag(16) # Get the AES-GCM tag
if tag==tag2:#only attempt to decrypt if tag is valid !
assert dh_decrypt(private1,ciphertext)==mess
else:
assert False
#####################################################
# TASK 6 -- Time EC scalar multiplication
# Open Task.
#
# - Time your implementations of scalar multiplication
# (use time.clock() for measurements)for different
# scalar sizes)
# - Print reports on timing dependencies on secrets.
# - Fix one implementation to not leak information.
def time_scalar_mul():
pass
|
20,957 | 02ac814253a560287977f356cecbdf7a25477916 | class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
majority = nums[0]
majority_times = 1
majority_flag = 0
for i in range(1, len(nums)):
if nums[i] == majority:
majority_times += 1
elif nums[i] > nums[i-1]:
majority_flag = i
if i - majority_flag + 1 > majority_times:
majority = nums[i]
majority_times = i - majority_flag - 1
if majority_times > len(nums) / 2: break
return majority |
20,958 | 2ceb1d8b6df99961f61a6f39d168d994b5c1965a | class simplemap:
""" 用迭代器协议实现 map 功能 """
def __init__(self, func, *sequences):
self.func = func
self.sequences = sequences
self.i = -1
def __iter__(self):
return self
def __next__(self):
try:
self.i += 1
return self.func(*[list(subseq)[self.i] for subseq in self.sequences])
except IndexError:
raise StopIteration
def mapper(func, *sequences):
"""用函数方式实现 map 的功能"""
if len(sequences) > 0:
minlen = min(len(subseq) for subseq in sequences)
for i in range(minlen):
yield func(*[list(subseq)[i] for subseq in sequences])
def add_one(x, y, z):
return x + y + z
lst = [1, 2, 3]
lst2 = [4, 5]
lst3 = [7, 8, 9, 10]
result = simplemap(add_one, lst, lst2, lst3)
result = mapper(add_one, lst, lst2, lst3)
type(result) |
20,959 | ff53db30adb2af20bb367f06fa908b42b4b2550d | #!/usr/bin/env python3
# Source: https://huggingface.co/bigscience/bloomz-560m#cpu
# To run this demo, type in a terminal: gradio bloomz-560m_cpu_gradio.py
# Model "bigscience/bloomz-560m" (560M parameters): multitask finetuned on xP3.
# Recommended for prompting in English.
# See: https://huggingface.co/bigscience/bloomz-560m
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
checkpoint = "bigscience/bloomz-560m"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForCausalLM.from_pretrained(checkpoint)
def chat(message, history):
# TODO: use `history`
inputs = tokenizer.encode(message, return_tensors="pt")
outputs = model.generate(inputs)
return tokenizer.decode(outputs[0])
examples = [
"Translate to English: Je t’aime.",
"Write a fairy tale about a troll saving a princess from a dangerous dragon. The fairy tale is a masterpiece that has achieved praise worldwide and its moral is \"Heroes Come in All Shapes and Sizes\". Story (in French):",
"Explain in a sentence in French what is backpropagation in neural networks."
]
demo = gr.ChatInterface(chat,
examples=examples,
title="BloomZ 560M",
description="See: https://huggingface.co/bigscience/bloomz-560m")
demo.launch() |
20,960 | 4bac6d0a5970f6b408d2bbe12fea6af1ccdf775e | import tkinter as tk
import sys
from copy import deepcopy
import collections
import types
def _isiterable(x):
return isinstance(x, collections.Iterable) and not isinstance(x,types.StringTypes)
class FSM:
"""a finite state machine bas class to ease tkinter bindings"""
def __init__(self, initialState, transitions, widget):
self._state = initialState
self._tt = transitions
self._widget = widget
self._events = self.unique_events()
self._stopped = True
self.start()
def unique_events(self):
evs = set()
# find unique events
for state, tag, event in self._tt.keys():
if event is not None: # None is any event
evs.add((tag, event))
return evs
def start(self):
for tag, event in self._events:
# event thunk that saves the event info in extra arguments
def _trans(ev, self=self, te=(tag, event)):
return self.transition(ev, te[0], te[1])
if (tag):
self._widget.tag_bind(tag, event, _trans)
else:
self._widget.bind(event, _trans)
self._stopped = False
def stop(self):
# bind all unique events
for tag, event in self._events:
if (tag):
self._widget.tag_unbind(tag, event)
else:
self._widget.unbind(event)
self._stopped = True
def isStopped(self):
return self._stopped
def transition(self, ev, tag, event):
# print ("transition from state",self._state,tag,event)
tr = None
key = (self._state, tag, event)
if tk.CURRENT:
tags = ev.widget.gettags(tk.CURRENT)
if not tag in tags or not key in self._tt:
# print ("no tags transition found",key)
key = (self._state, None, event)
if not key in self._tt:
# check for any event transition
key = (self._state, None, None)
if key in self._tt:
new_state, cbs = self._tt[key]
# print ("transition found:",key,tr[0])
if cbs: # callback
if _isiterable(cbs):
for cb in cbs:
cb(ev)
else:
cbs(ev)
self._state = new_state # set new state
else:
# print ("no transition found:", self._state, tag, event)
pass
sys.stdout.flush()
class SavedFSM(FSM):
def __init__(self, initialState, transitions, widget,
undobinding="<Control-Key-z>",
redobinding="<Control-Key-y>"):
undo_redo = {
(initialState, None, redobinding): (initialState, self.onRedo),
(initialState, None, undobinding): (initialState, self.onUndo),
}
transitions.update(undo_redo)
super().__init__(initialState, transitions, widget)
self.history = []
self.future = []
def historySave(self):
self.history.append(deepcopy(self.save())) # save copy of control curve
self.future = [] # clear redos
print("Save", len(self.future), len(self.history))
def historyClear(self):
self.history = [] # clear undos
self.future = [] # clear redos
def historyTakeover(self,other):
self.history.extend(other.history)
self.future = [] # clear redos
self.restore(other.save())
print("takeover", len(self.future), len(self.history))
def save(self):
print("please implement save in your derived class!!!")
def restore(self, data):
print("please implement restore in your derived class!!!")
def onUndo(self, ev):
if self.history: # not empty
current = self.history.pop()
self.future.append(deepcopy(self.save()))
self.restore(current)
print("Undo", len(self.future), len(self.history))
def onRedo(self, ev):
if self.future: # not empty
current = self.future.pop()
self.history.append(deepcopy(self.save()))
self.restore(current)
print("Redo", len(self.future), len(self.history))
|
20,961 | 880e08cd9731b0c4cca4b400aa43544c19f6a6b7 | # Compress airfoil data into a single .npz folder for easier extraction later
import os
import math
import numpy as np
import re
import warnings
import shutil
import random
from cnn_2D.airfoil import Airfoil
airfoil_names = []
in_dir = "aero_shape_opt\\datasets\\airfoils"
out_dir = "aero_shape_opt\\datasets\\xfoil_data"
# Get list of airfoil names
for filename in os.listdir(in_dir):
if filename.endswith(".dat"):
name = filename[0:filename.find('.')]
airfoil_names.append(name)
airfoil_inputs = []
airfoil_outputs = []
# Get airfoil information
for a_name in airfoil_names:
coord_name = a_name + "_airfoil.txt"
polar_name = a_name + "_polar.txt"
try:
# Coordinates. Turn off empty file warnings, since is caught
with warnings.catch_warnings():
warnings.simplefilter("ignore")
coords = np.loadtxt(out_dir + "\\" + coord_name)
# Extract all polar data
f = open(out_dir + "\\" + polar_name, 'r')
xdata = f.read()
f.close()
xdata = xdata.splitlines()
# Extract [Mach no., Reynold n., Ncrit]
header_nums = re.findall(r'[-+]?\d+\.*\d*[ e ]*\d*', xdata[8])
header_nums = [float(a.replace(" ","")) for a in header_nums]
Ma = header_nums[0]
Re = header_nums[1]
except:
print('Could not load data for airfoil '+ a_name)
continue
# Ensure bad airfoils are dealt with - send them to bad folder
try:
af = Airfoil(a_name,Ma,Re)
px = af.pixel_grid()
except:
shutil.move('{}\\{}.dat'.format(in_dir,a_name),'aero_shape_opt\\datasets\\bad_airfoils')
shutil.move('{}\\{}_airfoil.txt'.format(out_dir,a_name),'aero_shape_opt\\datasets\\bad_airfoils')
shutil.move('{}\\{}_polar.txt'.format(out_dir,a_name),'aero_shape_opt\\datasets\\bad_airfoils')
continue
#coords = coords.flatten() # flatten to a vector of [x1,y1,x2,y2,...]
# Extract each [AoA, CL, CD, CDp, CM, Top_xtr, Bot_xtr]
if len(xdata) > 12:
polar_nums = np.array([re.findall(r'[-+]?\d+[\.\,\d]*\d*',line) for line in xdata[12:]], dtype='float32')
else:
print('Could not load data for airfoil '+ a_name)
continue
# The input vector to the ANN
# input =
if len(polar_nums) < 1:
continue
# Create an input for each angle of attack
for polar_line in polar_nums:
aoa = polar_line[0]
CL = polar_line[1]
CD = polar_line[2]
CM = polar_line[4]
# [x1,y1,x2,y2,...,Re,Ma,AoA]
input = [px,Re,Ma,aoa]
#input = np.append(px,(Re,Ma,aoa))
output = [CL,CD,CM]
# airfoil_inputs.append(input.tolist())
airfoil_inputs.append(input)
airfoil_outputs.append(output)
# Ratio of train data
train = 0.8
# Each element of the input data consists of: [[pixel image],Re,Ma,AoA]
# Each element of the output data consists of: [CL,CD,CM]]
print(len(airfoil_outputs))
print('Almost done')
order = [i for i in range(len(airfoil_inputs))]
random.shuffle(order)
inp_train = [airfoil_inputs[i] for i in order[:math.ceil(train*len(airfoil_inputs))]]
inp_test = [airfoil_inputs[i] for i in order[math.ceil(train*len(airfoil_inputs)):]]
out_train = [airfoil_outputs[i] for i in order[:math.ceil(train*len(airfoil_outputs))]]
out_test = [airfoil_outputs[i] for i in order[math.ceil(train*len(airfoil_outputs)):]]
np.savez('aero_shape_opt\\datasets\\data_file_cnn',x_train=inp_train,x_test=inp_test,y_train=out_train,y_test=out_test)
# TO IMPORT THE NEWLY CREATED FILE:
data = np.load('aero_shape_opt\\datasets\\data_file_cnn.npz',allow_pickle=True)
x_train = data['x_train']
x_test = data['x_test']
y_train = data['y_train']
y_test = data['y_test']
print('Done')
|
20,962 | 5dd3ddaec8dde5853dfd6018aeb5844bf53f4a70 | def area(width, height):
return int(height) * int(width)
w = input("Enter width: ")
h = input("Enter height: ")
print(area(w,h))
|
20,963 | 8f065d826ed3b2656fe6138f8220190650e258c1 | """
The callback function of the demo slider app.
The core part is a Dash slider.
Refer to https://dash.plotly.com/dash-core-components/slider for details.
"""
from dash.dependencies import Input, Output
from .constants import TRACE_DAYS
# some online free pictures
RANDOM_IMAGE_URLS = [ # urls of 7 mock images
"http://images-assets.nasa.gov/image/PIA17680/PIA17680~orig.jpg",
"http://images-assets.nasa.gov/image/PIA18156/PIA18156~orig.jpg",
"http://images-assets.nasa.gov/image/PIA18795/PIA18795~orig.jpg",
"http://images-assets.nasa.gov/image/PIA02816/PIA02816~orig.jpg",
"http://images-assets.nasa.gov/image/PIA00948/PIA00948~orig.jpg",
"http://images-assets.nasa.gov/image/PIA01252/PIA01252~orig.jpg",
"http://images-assets.nasa.gov/image/PIA04596/PIA04596~orig.jpg"
]
# the callback function of the slider in
def register_demo_slider_callbacks(dashapp):
@dashapp.callback( # multiple outputs, single input
Output('demo-slider-center-img', 'src'), # return the src of the image
Output('demo-slider-output-container', 'children'),
[Input('demo-slider-slider', 'value')]
)
def update_output(selected_delta_value: int):
"""
A mock callback function to return different images based on the slider input value.
:param selected_delta_value: input, int, in the range [-6, 0] here.
:return: the image url address, and the input slider value.
"""
# TODO: replace this with true figures or Plotly figure objects
return \
RANDOM_IMAGE_URLS[selected_delta_value + TRACE_DAYS - 1], \
'You have selected "{}", the data type is {}'.format(selected_delta_value, type(selected_delta_value))
|
20,964 | d3cfb20b0fa5e903ad161ca5c4e01e00bb11b394 | #!/usr/bin/env python3
# 12/01/2020
# lesson01
# Dev: Cody Yarger
# This script includes four functions that demonstrate four common Exceptions
# NameError, TypeError, SyntaxError and AttributeError.
# Name error function
def name_error():
x = y
# Type error function
def type_error():
list = [0, 1, 2]
return list(3)
# Syntax error function
def syntax_error():
5 === 5
# Attribute error function
def attribute_error():
string = "text"
string.append("attribute error")
|
20,965 | 7e0d095937357f855b283927aba4d1c2f7be9103 | from html.parser import HTMLParser
from urllib.request import urlopen
from urllib.parse import urlencode
from timeMask import timeMask
from os import walk
def insert(xs, g):
ys = []
for i, e in enumerate(xs[1:]):
if not g(e) and g(xs[i]):
ys.append(' ')
ys.append(e)
return ''.join(ys)
def handle(data):
a, b = data['instructor'].split(' / ')
data['instructor'] = a.split('、')
data['instructor_en'] = b.split()
if data['session'][0] not in '1234567890ABCDEF一二三四五六日':
data['session'] = 'None'
else:
data['session'] = timeMask(data['session'].split(' / ')[0])
data['crse_type'] = data['crse_type'].split('/')[0]
data['place'] = insert(data['place'], lambda e: e in '0123456789').split()
class Handler():
def __init__(self, tag, attr, pred, prcs):
self.tag = tag
self.attr = attr
self.attrv = ''
self.guard = False
self._pred = pred
self._prcs = prcs
self.counter = 0
def pred(self, tag, attrs):
if self.guard and tag == self.tag:
self.counter += 1
if self.tag == tag and \
self._pred(dict(attrs).get(self.attr, 'None')):
self.guard = True
self.attrv = dict(attrs)[self.attr]
def prcs(self, data):
if self.guard:
self._prcs(self.attrv, data.strip())
def clos(self, tag):
if self.guard and tag == self.tag:
if self.counter == 0:
self.guard = False
else:
self.counter -= 1
class Buffer():
def __init__(self):
self.dict = {}
def flush(self):
xlat = {
'qryresult0_yy_smtL': 'semester', # 學年/期
'qryresult0_coursenumL': 'code' , # 科目代碼
'qryresult0_instructorHL': 'instructor', # 教授
'qryresult0_pointL': 'point' , # 學分
'qryresult0_sessionL': 'session', # 時段
'qryresult0_placeL': 'place', # 地點
'qryresult0_wayL': 'sel_meth', # 選課方式
'qryresult0_distance_courseL': 'dist_crse', # 遠距教學
'qryresult0_MOIL': 'language', # 語言
'qryresult0_eligibleL': 'ofst_genx', # 可抵通識
'qryresult0_GECL': 'genx_type', # 通識類別
'qryresult0_chargesL': 'charge', # 付費
'qryresult0_auxiliaryL': 'aux', # 擴大輔系
'qryresult0_department_instituteL': 'depart', # 開課系所
'qryresult0_volumeL': 'volume', # 1.學期課 2.學年課
'qryresult0_CEL': 'crse_type', # 修別選必
'qryresult0_kerlL': 'kernal', # 核心通識
'qryresult0_course_nameHL': 'crse', # 課程名稱
'qryresult0_chg_remL': 'chg_info', # 異動資訊
'qryresult0_noteL': 'note' # 備註
}
data = {}
for title in xlat:
if title in self.dict:
data[xlat[title]] = self.dict[title]
else:
data[xlat[title]] = 'None'
handle(data)
print(data)
self.dict = {}
def record(self, title, data):
if title in self.dict:
self.dict[title] += data
else:
self.dict[title] = data
prefixOf = lambda xs:(lambda ys: ys[:len(xs)] == xs)
class CourseParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.entries = \
[Handler(tag, 'id', prefixOf('qryresult0_'), logger.record) \
for tag in ['span', 'a']]
def handle_starttag(self , tag , attrs):
for prcser in self.entries:
prcser.pred(tag, attrs)
def handle_data(self , data):
for prcser in self.entries:
prcser.prcs(data)
def handle_endtag(self , tag):
for prcser in self.entries:
prcser.clos(tag)
if __name__ == "__main__":
logger = Buffer()
parser = CourseParser()
for t in walk('qryTor'):
for fn in t[2]:
print(fn)
parser.feed(open('qryTor/' + fn, 'r').read())
logger.flush()
|
20,966 | 5e95e8d1dbeff615acf49b05e821a221235b508e | import sys
import os
import time
import random
import pygame as pg
from settings import *
from sprites import *
from ui import *
#from moviepy.editor import VideoFileClip
class Robot:
#로봇 초기화
def __init__(self):
pg.init()
pg.mixer.init()
#기본 디스플레이 설정
self.screen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
#상세 창 없는 디스플레시 설정
#self.screen = pg.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pg.NOFRAME)
#라즈베리파이용 디스플레이 설정
#self.screen = pg.display.set_mode((480, 320), pg.FULLSCREEN | pg.HWSURFACE | pg.DOUBLEBUF)
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.opening = True #오프닝 스크린 실행 Boolean 값
self.runing = True #프로그램 실행 Boolean 값
self.load_date()
#-------------------메인 -------------------
def main_new(self):
self.setting_section = Setting_section(robot)
self.cook_section = Cook_section(robot)
self.camera_section = Camera_section(robot)
self.cctv_section = Cctv_section(robot)
self.main_menu = pg.sprite.Group()
self.main_menu_setting = Menu(
self.setting_icon, (SCREEN_WIDTH / 3), (SCREEN_HEIGHT / 3))
self.main_menu_cook = Menu(
self.cook_icon, (SCREEN_WIDTH / 3), (2 * (SCREEN_HEIGHT / 3)))
self.main_menu_camera = Menu(
self.camera_icon, (2 * (SCREEN_WIDTH / 3)), (SCREEN_HEIGHT / 3))
self.main_menu_cctv = Menu(
self.cctv_icon, (2 * (SCREEN_WIDTH / 3)), (2 * (SCREEN_HEIGHT / 3)))
self.main_menu_marc = Menu(self.marc_icon, 60, 65)
self.main_menu.add(self.main_menu_marc)
self.main_menu.add(self.main_menu_setting)
self.main_menu.add(self.main_menu_cook)
self.main_menu.add(self.main_menu_camera)
self.main_menu.add(self.main_menu_cctv)
self.main_loop = True
self.main_run()
def main_run(self):
print("메인 섹션 실행")
while self.main_loop: #메인 루프
self.clock.tick(FPS)
self.main_events()
self.main_update()
self.main_draw()
pg.mixer.music.fadeout(500) #배경음이 갑자기 꺼지지 않고 점점 꺼지게 함
def main_events(self):
#main loop - events
for event in pg.event.get():
if event.type == pg.QUIT:
if self.main_loop:
self.main_loop = False
self.runing = False
self.opening = False
print("프로그램 종료")
elif event.type == pg.MOUSEBUTTONDOWN:
# 1 is the left mouse button, 2 is middle, 3 is right.
if event.button == 1:
if self.main_menu_setting.mouse_ckeck(event.pos):
print("셋팅 섹션")
self.setting_section.new()
elif self.main_menu_cook.mouse_ckeck(event.pos):
print("요리 섹션")
self.cook_section.new()
elif self.main_menu_camera.mouse_ckeck(event.pos):
print("카메라 섹션")
self.camera_section.new()
elif self.main_menu_cctv.mouse_ckeck(event.pos):
print("CCTV 섹션")
self.cctv_section.new()
def main_update(self):
self.time = time.strftime("%Y %m %d %I:%M").split(" ")
self.time = self.time[0] + "년 " + self.time[1] + "월 " + self.time[2] + "일 " + self.time[3]
self.main_menu.update()
def main_draw(self):
pg.draw.rect(
self.screen, MAIN_SECTION_BACKGROUND, [0, 0, SCREEN_WIDTH, SCREEN_HEIGHT])
pg.draw.rect(
self.screen, MAIN_SECTION_MENUBAR, [0, 0, SCREEN_WIDTH, 60])
self.main_menu.draw(self.screen)
self.draw_text("메인 페이지 입니다.", 20,
MAIN_SECTION_FONT_COLOR, (SCREEN_WIDTH / 2), 30)
self.draw_text(
self.time, 15, MAIN_SECTION_FONT_COLOR, 7 * (SCREEN_WIDTH / 9) + 60, 30)
pg.display.update()
#--------------------메인 ----------------------
#--------------------오프닝 ---------------------
#오프닝 스크린
def show_opening_screen(self):
print("오프닝 시작")
pg.mixer.music.load(os.path.join(SOUND_DIR, 'Background_sound1.mp3'))
pg.mixer.music.play(loops=-1)
self.opening_new()
def opening_new(self):
self.opening_sprites_group = pg.sprite.Group()
self.opening_tick = pg.time.get_ticks()
self.opening_sprites = Opening(self)
self.opening_sprites_group.add(self.opening_sprites)
self.opening_run()
def opening_run(self):
#opening loop
self.opening_playing = True
while self.opening_playing:
self.clock.tick(FPS)
self.opening_events()
self.opening_update()
self.opening_draw()
def opening_events(self):
for event in pg.event.get():
if event.type == pg.QUIT:
if self.opening_playing:
self.opening_playing = False
self.runing = False
print("프로그램 종료")
def opening_update(self):
self.opening_sprites_group.update()
def opening_draw(self):
pg.draw.rect(self.screen, OPENING_SECTION_BACKGROUND, [0, 0, SCREEN_WIDTH, SCREEN_HEIGHT])
#오프닝 멘트 처리 로직
if self.opening_sprites.step == 0:
pg.draw.rect(self.screen, OPENING_SECTION_BACKGROUND, [0, 0, SCREEN_WIDTH, SCREEN_HEIGHT])
elif self.opening_sprites.step == 1:
self.draw_text("안녕하세요.", 70, OPENINH_SECTION_FONT_COLOR, (SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))
elif self.opening_sprites.step == 2:
self.draw_text("로봇을 부팅중입니다.", 70, OPENINH_SECTION_FONT_COLOR, (SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))
elif self.opening_sprites.step == 3:
pg.draw.rect(self.screen, OPENING_SECTION_BACKGROUND, [0, 0, SCREEN_WIDTH, SCREEN_HEIGHT])
else:
print("오프닝 종료")
self.opening_playing = False
self.opening = False
pg.display.update()
#-------------------------------------------------------
#필요한 외부 데이터를 불러오는 함수
def load_date(self):
#image
self.setting_icon = pg.image.load("../source/image/" + SETTING_ICON).convert_alpha()
self.cook_icon = pg.image.load("../source/image/" + COOK_ICON).convert_alpha()
self.camera_icon = pg.image.load("../source/image/" + CCTV_ICON).convert_alpha()
self.cctv_icon = pg.image.load("../source/image/" + CAMERA_ICON).convert_alpha()
self.marc_icon = pg.image.load("../source/image/" + MARC_ICON).convert_alpha()
self.back_icon = pg.image.load("../source/image/" + BACK_ICON).convert_alpha()
#txt
self.font_hmkmrhd = "../source/font/" + HMKMRHD
#sound(효과음)
#self.Background_sound = pg.mixer.Sound(os.path.join(SOUND_DIR, 'Background_sound1.mp3'))
#화면에 텍스트 처리를 위한 메서드
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_hmkmrhd, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.center = (x, y)
self.screen.blit(text_surface, text_rect)
#render(text, antialias, color, background=None) -> Surface
if __name__ == "__main__":
robot = Robot()
"""
while robot.opening:
robot.show_opening_screen()
while robot.runing:
robot.main_new()
"""
robot.main_new()
pg.quit()
|
20,967 | 519d5484b9bbec55c215cfa3000614c7714295de | import os
from qgis.core import QgsVectorLayer, QgsProject, Qgis
from qgis.utils import iface
from qgis.PyQt import QtWidgets
from qgis.PyQt.QtCore import QSettings
def load_from_wfs(layername):
layername = layername.lower().replace(" ","_")
uri = "http://18.223.98.214:8080/geoserver/wfs?srsname=EPSG:32662&typename=fantasygis_wow:v_" + layername + "&version=1.1.0&request=GetFeature&service=WFS"
vlayer = QgsVectorLayer(uri, layername, "WFS")
if vlayer.isValid():
QgsProject.instance().addMapLayer(vlayer)
else:
iface.messageBar().pushMessage("Error", "Layer is invaild or connection with geoserver is lost! Please report this issue.",
level=Qgis.Critical)
def download_vector(dlgVector):
checked_checkboxes = []
for child in dlgVector.groupBox.findChildren(QtWidgets.QCheckBox):
if child.isChecked():
checked_checkboxes.append(child.text())
for child in dlgVector.groupBox_2.findChildren(QtWidgets.QCheckBox):
if child.isChecked():
checked_checkboxes.append(child.text())
for checkbox in checked_checkboxes:
load_from_wfs(checkbox) |
20,968 | c656a1929130eb1844b2ad67105834a11d6dc546 | # Copyright 2018 Michael DeHaan LLC, <michael@michaeldehaan.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opsmop.providers.provider import Provider
class Service(Provider):
"""
Contains some fuzzy matching code all service instances should be able to use
"""
def _is_started(self, status):
if not status:
return False
return status in [ 'running', 'started' ]
def _is_enabled(self, status):
if not status:
return False
return status in [ 'running', 'started', 'stopped', 'enabled' ]
def plan(self, on_boot=True):
status = self._get_status()
if self._is_started(status):
if not self.started:
self.needs('stop')
else:
if self.started:
self.needs('start')
elif self.restarted:
self.needs('restart')
if on_boot:
# this part of the planner can be switched off for services
# that don't support it, allowing them to not fail when they
# are only able to execute part of the plan
if self._is_enabled(status):
if not self.enabled:
self.needs('disable')
else:
if self.enabled:
self.needs('enable')
|
20,969 | 15ba934de4008f75eeb469d72e1788d32617bcf5 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from lpce import LateralPiece
from lrg import LateralRollGap
from ufd import UniForcDist
from crlc import CompositeRollStackCrown
from ..utils import mathuty
from ..config import setting
import logging
logging.basicConfig(level=logging.INFO, filename="env_print.log")
class Envelope():
def __init__(self, input_df, stk_crn_df):
self.pass_vec = np.array([0, 1, 2, 3, 4, 5, 6, 7])
self.std_vec = np.array([1, 2, 3, 4, 5, 6, 7])
self.input_df = input_df
self.stk_crn_df = stk_crn_df
# lim_nom dataframe
self.lim_df = pd.read_excel(
"{}cfg_env/std_{}.xlsx".format(setting.CFG_DIR, setting.ROLL_LINE))
def Calculate(self):
input_df = self.input_df
lim_df = self.lim_df
stk_crn_df = self.stk_crn_df
ufd = UniForcDist(input_df)
lpce = LateralPiece(input_df)
lrg = LateralRollGap(input_df, lpce)
crlc = CompositeRollStackCrown(input_df, stk_crn_df)
# logging.info(lim_df)
# lim_df["pos_shft_lim_min"] = lim_df["pos_shft_lim_max"] = 0
# 计算辊系凸度
lim_df["pce_wr_crn_lim_min"], lim_df["wr_br_crn_lim_min"] = (
crlc.Crns_vector(lim_df["pos_shft_lim_min"]))
lim_df["pce_wr_crn_lim_max"], lim_df["wr_br_crn_lim_max"] = (
crlc.Crns_vector(lim_df["pos_shft_lim_max"]))
# 计算单位轧制力
input_df["force_pu_wid"] = (input_df["rolling_force"] /
input_df["en_width"])
lim_df["force_pu_wid_lim_min"] = input_df["force_pu_wid"]
lim_df["force_pu_wid_lim_max"] = input_df["force_pu_wid"]
# nom窜辊位辊系凸度
lim_df["pce_wr_crn_nom"], lim_df["wr_br_crn_nom"] = (
crlc.Crns_vector(lim_df["pos_shft_nom"])
)
# lim的max/min与env中的min/max对应上
pass_vec = np.array([0, 1, 2, 3, 4, 5, 6, 7])
env_df = pd.DataFrame(index=pass_vec)
env_df["force_bnd_env_min"] = lim_df["force_bnd_lim_max"]
env_df["force_bnd_env_max"] = lim_df["force_bnd_lim_min"]
env_df["pos_shft_env_min"] = lim_df["pos_shft_lim_max"]
env_df["pos_shft_env_max"] = lim_df["pos_shft_lim_min"]
env_df["force_pu_wid_env_min"] = lim_df["force_pu_wid_lim_min"]
env_df["force_pu_wid_env_max"] = lim_df["force_pu_wid_lim_max"]
env_df["pce_wr_crn_env_min"] = lim_df["pce_wr_crn_lim_max"]
env_df["pce_wr_crn_env_max"] = lim_df["pce_wr_crn_lim_min"]
env_df["wr_br_crn_env_min"] = lim_df["wr_br_crn_lim_max"]
env_df["wr_br_crn_env_max"] = lim_df["wr_br_crn_lim_min"]
# logging.info(env_df)
for m__ in ["min", "max"]:
for std in self.std_vec:
env_df.loc[std, "ufd_pu_prf_env_{}".format(m__)] = (
ufd.Prf(
std,
env_df["force_pu_wid_env_{}".format(m__)][std],
env_df["force_bnd_env_{}".format(m__)][std],
env_df["pce_wr_crn_env_{}".format(m__)][std],
env_df["wr_br_crn_env_{}".format(m__)][std]) /
input_df["ex_thick"][std])
bckl_list = ["we", "cb"]
for bckl in bckl_list:
lim_df["std_ex_strn_lim_{}".format(bckl)] = (
lpce.df["bckl_lim_{}".format(bckl)])
# 计算各机架入口有效单位凸度极限范围
# 后期用cLRGD::Ef_En_PU_Prf1(..)替换这个计算过程
for std in self.std_vec:
lim_df.loc[std - 1, "ef_pu_prf_lim_min"] = (
env_df.loc[std, "ufd_pu_prf_env_min"] -
lim_df.loc[std, "std_ex_strn_lim_we"] *
lrg.df.loc[std, "prf_chg_attn_fac"] /
lrg.df.loc[std, "pce_infl_cof"])
lim_df.loc[std - 1, "ef_pu_prf_lim_max"] = (
env_df.loc[std, "ufd_pu_prf_env_max"] -
lim_df.loc[std, "std_ex_strn_lim_cb"] *
lrg.df.loc[std, "prf_chg_attn_fac"] /
lrg.df.loc[std, "pce_infl_cof"])
if 2250 == setting.ROLL_LINE:
lim_df.loc[std - 1, "ef_pu_prf_lim_min"] = -2
lim_df.loc[std - 1, "ef_pu_prf_lim_max"] = 2
if std == 7:
lim_df.loc[std, "ef_pu_prf_lim_min"] = -1
lim_df.loc[std, "ef_pu_prf_lim_max"] = 1
# mean指的意思是都一样的, 初始化中间坯的ef_pu_prf_env为
env_df.loc[0, "ef_pu_prf_env_min"] = input_df["pu_prf_pass0"].mean()
env_df.loc[0, "ef_pu_prf_env_max"] = input_df["pu_prf_pass0"].mean()
# 包络线对应的极限机架号
# pas_env_lim_min = 0
# pas_env_lim_max = 0
# ========================= 协调单位凸度包络线 ================================
loop_count = 0
std = 1
while std > 0:
print(std)
pce_wr_crn = 0
wr_br_crn = 0
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# --------------- 计算各机架出口有效单位凸度包络线下限 -------------------
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
move_prv_min = False
# 注意入口有效单位凸度包络线为上道次有效单位凸度包络线
env_df.loc[std, "ef_pu_prf_env_min"] = (
lrg.calc(std, "Ef_Ex_PU_Prf3")(
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ufd_pu_prf_env_min"][std]))
print(env_df.loc[std, "ef_pu_prf_env_min"])
print(lim_df.loc[std, "ef_pu_prf_lim_min"])
# 若出口有效单位凸度包络线下限小于极限值下限,修正出口有效单位凸度包络线下限
if (env_df["ef_pu_prf_env_min"][std] <
lim_df["ef_pu_prf_lim_min"][std]):
print("进行了修正")
# 将有效比例凸度极限的最小值作为新的目标,之后进行重新计算ufd_pu_prf
ef_ex_pu_prf = lim_df["ef_pu_prf_lim_min"][std]
# 重新计算ufd_pu_prf
# 注意入口有效单位凸度包络线为上道次有效单位凸度包络线
ufd_pu_prf = lrg.calc(std, "UFD_PU_Prf3")(
env_df["ef_pu_prf_env_min"][std - 1], ef_ex_pu_prf)
# ufd状态异常,对>force_pu_wid_lim做偏移量为10的修正,在这里忽略
# 从force_chg_clmp判定的条件分支开始
istd_ex_pu_prf = lrg.calc(std, "Istd_Ex_PU_Prf0")(
lim_df["std_ex_strn_lim_we"][std], ef_ex_pu_prf)
ef_en_pu_prf = lrg.calc(std, "Ef_En_PU_Prf5")(
lim_df["std_ex_strn_lim_we"][std], istd_ex_pu_prf)
# 利用上一道次的ef_pu_prf_env来clamp获得ef_en_pu_prf_buf
# (注意是否要提前定义这个buf)
ef_en_pu_prf_buf = mathuty.clamp(
ef_en_pu_prf,
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ef_pu_prf_env_max"][std - 1])
# 更新move_prv标记
move_prv_min = ((
ef_en_pu_prf_buf !=
env_df["ef_pu_prf_env_min"][std - 1]
) and (
env_df["ef_pu_prf_env_min"][std - 1] !=
env_df["ef_pu_prf_env_max"][std - 1]
))
# 更新上一道次或入口有效单位凸度极限的最小值,注意是极限
lim_df.loc[std - 1, "ef_pu_prf_lim_min"] = ef_en_pu_prf_buf
# 如果不能前移,则将入口有效包络线的下限赋值给ef_en_pu_prf_buf
if not move_prv_min:
ef_en_pu_prf_buf = env_df["ef_pu_prf_env_min"][std - 1]
# --- force_chg_clmp判定的条件分支结束 ---
# output (first) per unit prof
pp_df = pd.DataFrame()
pp_df.loc[std, "ef_en_pu_prf"] = ef_en_pu_prf
pp_df.loc[std, "move_prv_min"] = move_prv_min
# 输出后计算ufd单位凸度
ufd_pu_prf = lrg.calc(std, "UFD_PU_Prf3")(
ef_en_pu_prf_buf, ef_ex_pu_prf)
# 之后是窜辊和弯辊力介入调整计算辊系凸度
pce_wr_crn, wr_br_crn = ufd.Crns(
std,
ufd_pu_prf * input_df["ex_thick"][std],
env_df["force_pu_wid_env_min"][std],
env_df["force_bnd_env_min"][std],
pce_wr_crn,
wr_br_crn)
# 窜辊位置包络线下限更新
env_df.loc[std, "pos_shft_env_min"] = crlc.Shft_Pos(
std,
pce_wr_crn,
lim_df["pce_wr_crn_nom"][std],
lim_df,
env_df["pos_shft_env_min"][std])
# 窜辊位置包络线下限限幅
env_df.loc[std, "pos_shft_env_min"] = mathuty.clamp(
env_df["pos_shft_env_min"][std],
lim_df["pos_shft_lim_min"][std],
lim_df["pos_shft_lim_max"][std])
# 根据上面的窜辊位置重计算更新综合辊系凸度
env_df["pce_wr_crn_env_min"], env_df["wr_br_crn_env_min"] = (
crlc.Crns(std, env_df["pos_shft_env_min"][std])
)
pwc_org = env_df["pce_wr_crn_env_min"]
# 用ufd.Pce_WR_Crn(..)计算pce_wr_crn
pce_wr_crn = ufd.Pce_WR_Crn(
std,
ufd_pu_prf * input_df["ex_thick"][std],
env_df["force_pu_wid_env_min"][std],
env_df["force_bnd_env_min"][std],
env_df["wr_br_crn_env_min"][std])
# 更新弯辊力包络线的下限
force_bnd_des = ufd.Bnd_Frc(
std,
ufd_pu_prf * input_df["ex_thick"][std],
env_df["force_pu_wid_env_min"][std],
env_df["pce_wr_crn_env_min"][std],
env_df["wr_br_crn_env_min"][std])
# 弯辊力计算值和原值是否相等的指示器
force_bnd_clmp = (
force_bnd_des != env_df["force_bnd_env_min"][std])
# 计算均载辊缝单位凸度包络线下限
env_df["ufd_pu_prf_env_min"][std] = (
ufd.Prf(
std,
env_df["force_pu_wid_env_min"][std],
env_df["force_bnd_env_min"][std],
env_df["pce_wr_crn_env_min"][std],
env_df["wr_br_crn_env_min"][std]) /
input_df["ex_thick"][std])
# force_bnd_clmp判断以及处理有效单位凸度
if force_bnd_clmp:
force_bnd_clmp = False
ef_en_pu_prf = lrg.calc(std, "Ef_En_PU_Prf3")(
env_df["ufd_pu_prf_env_min"][std], ef_ex_pu_prf)
# 对入口有效单位凸度进行限幅
ef_en_pu_prf_buf = mathuty.clamp(
ef_en_pu_prf,
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ef_pu_prf_env_max"][std - 1])
# move_prv指示器更新
move_prv_min = move_prv_min | ((
ef_en_pu_prf_buf !=
lim_df["ef_pu_prf_lim_min"][std - 1]) & (
env_df["ef_pu_prf_env_min"][std - 1] !=
env_df["ef_pu_prf_env_max"][std - 1]))
lim_df["ef_pu_prf_lim_min"][std - 1] = ef_en_pu_prf_buf
# ----------------------------------------------------
# ----------------- 浪形失稳条件判断 -----------------
# ----------------------------------------------------
if not move_prv_min:
std_ex_strn = lrg.calc(std, "Std_Ex_Strn1")(
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ufd_pu_prf_env_min"][std])
# std_ex_strn低于出口应变差中浪极限
# std_ex_strn高于出口应变差边浪极限
# ++++++++++++++++ 最终计算有效单位凸度下限(算是更新) +++++++++++++++++++
env_df["ef_pu_prf_env_min"][std] = lrg.calc(std, "Ef_Ex_PU_Prf3")(
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ufd_pu_prf_env_min"][std])
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# --------------- 计算各机架出口有效单位凸度包络线上限 -------------------
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# 注意入口有效单位凸度包络线为上道次有效单位凸度包络线
move_prv_max = False
# 注意入口有效单位凸度包络线为上道次有效单位凸度包络线
env_df.loc[std, "ef_pu_prf_env_max"] = (
lrg.calc(std, "Ef_Ex_PU_Prf3")(
env_df["ef_pu_prf_env_max"][std - 1],
env_df["ufd_pu_prf_env_max"][std]))
print(env_df.loc[std, "ef_pu_prf_env_max"])
print(lim_df.loc[std, "ef_pu_prf_lim_max"])
print("--------")
# 若出口有效单位凸度包络线上限小于极限值上限,修正出口有效单位凸度包络线上限
if (env_df["ef_pu_prf_env_max"][std] >
lim_df["ef_pu_prf_lim_max"][std]):
# 将有效比例凸度极限的最小值作为新的目标,之后进行重新计算ufd_pu_prf
ef_ex_pu_prf = lim_df["ef_pu_prf_lim_max"][std]
# 重新计算ufd_pu_prf
# 注意入口有效单位凸度包络线为上道次有效单位凸度包络线
ufd_pu_prf = lrg.calc(std, "UFD_PU_Prf3")(
env_df["ef_pu_prf_env_max"][std - 1], ef_ex_pu_prf)
# ufd状态异常,对>force_pu_wid_lim做偏移量为10的修正,在这里忽略
# --- 从force_chg_clmp判定的条件分支开始 ---
istd_ex_pu_prf = lrg.calc(std, "Istd_Ex_PU_Prf0")(
lim_df["std_ex_strn_lim_cb"][std], ef_ex_pu_prf)
ef_en_pu_prf = lrg.calc(std, "Ef_En_PU_Prf5")(
lim_df["std_ex_strn_lim_cb"][std], istd_ex_pu_prf)
# 利用上一道次的ef_pu_prf_env来clamp获得ef_en_pu_prf_buf
# (注意是否要提前定义这个buf)
ef_en_pu_prf_buf = mathuty.clamp(
ef_en_pu_prf,
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ef_pu_prf_env_max"][std - 1])
# 更新move_prv标记
move_prv_max = ((
ef_en_pu_prf_buf !=
env_df["ef_pu_prf_env_max"][std - 1]
) and (
env_df["ef_pu_prf_env_min"][std - 1] !=
env_df["ef_pu_prf_env_max"][std - 1]
))
# 更新上一道次或入口有效单位凸度极限的最小值,注意是极限
lim_df.loc[std - 1, "ef_pu_prf_lim_max"] = ef_en_pu_prf_buf
# 如果不能前移,则将入口有效包络线的上限赋值给ef_en_pu_prf_buf
if not move_prv_max:
ef_en_pu_prf_buf = env_df["ef_pu_prf_env_max"][std - 1]
# --- force_chg_clmp判定的条件分支结束 ---
# output (first) per unit prof
pp_df = pd.DataFrame()
pp_df.loc[std, "ef_en_pu_prf"] = ef_en_pu_prf
pp_df.loc[std, "move_prv_max"] = move_prv_max
# 输出后计算ufd单位凸度
ufd_pu_prf = lrg.calc(std, "UFD_PU_Prf3")(
ef_en_pu_prf_buf, ef_ex_pu_prf)
# 之后是窜辊和弯辊力介入调整计算辊系凸度
pce_wr_crn, wr_br_crn = ufd.Crns(
std,
ufd_pu_prf * input_df["ex_thick"][std],
env_df["force_pu_wid_env_max"][std],
env_df["force_bnd_env_max"][std],
pce_wr_crn,
wr_br_crn)
# 窜辊位置包络线上限更新
env_df.loc[std, "pos_shft_env_max"] = crlc.Shft_Pos(
std,
pce_wr_crn,
pwc_org,
lim_df,
env_df["pos_shft_env_max"][std])
# 窜辊位置包络线上限限幅
env_df.loc[std, "pos_shft_env_max"] = mathuty.clamp(
env_df["pos_shft_env_max"][std],
lim_df["pos_shft_lim_min"][std],
lim_df["pos_shft_lim_max"][std])
# 根据上面的窜辊位置重计算更新综合辊系凸度
env_df["pce_wr_crn_env_max"], env_df["wr_br_crn_env_max"] = (
crlc.Crns(std, env_df["pos_shft_env_max"][std])
)
# 用ufd.Pce_WR_Crn(..)计算pce_wr_crn
pce_wr_crn = ufd.Pce_WR_Crn(
std,
ufd_pu_prf * input_df["ex_thick"][std],
env_df["force_pu_wid_env_max"][std],
env_df["force_bnd_env_max"][std],
env_df["wr_br_crn_env_max"][std])
# 更新弯辊力包络线的上限
force_bnd_des = ufd.Bnd_Frc(
std,
ufd_pu_prf * input_df["ex_thick"][std],
env_df["force_pu_wid_env_max"][std],
env_df["pce_wr_crn_env_max"][std],
env_df["wr_br_crn_env_max"][std])
# 弯辊力计算值和原值是否相等的指示器
force_bnd_clmp = (
force_bnd_des != env_df["force_bnd_env_max"][std])
# 计算均载辊缝单位凸度包络线上限
env_df["ufd_pu_prf_env_max"][std] = (
ufd.Prf(
std,
env_df["force_pu_wid_env_max"][std],
env_df["force_bnd_env_max"][std],
env_df["pce_wr_crn_env_max"][std],
env_df["wr_br_crn_env_max"][std]) /
input_df["ex_thick"][std])
# force_bnd_clmp判断以及处理有效单位凸度
if force_bnd_clmp:
force_bnd_clmp = False
ef_en_pu_prf = lrg.calc(std, "Ef_En_PU_Prf3")(
env_df["ufd_pu_prf_env_max"][std], ef_ex_pu_prf)
# 对入口有效单位凸度进行限幅
ef_en_pu_prf_buf = mathuty.clamp(
ef_en_pu_prf,
env_df["ef_pu_prf_env_min"][std - 1],
env_df["ef_pu_prf_env_max"][std - 1])
# move_prv指示器更新
move_prv_max = move_prv_max | ((
ef_en_pu_prf_buf !=
lim_df["ef_pu_prf_lim_max"][std - 1]
) & (
env_df["ef_pu_prf_env_min"][std - 1] !=
env_df["ef_pu_prf_env_max"][std - 1]
))
lim_df["ef_pu_prf_lim_max"][std - 1] = ef_en_pu_prf_buf
# ----------------------------------------------------
# ----------------- 浪形失稳条件判断 -----------------
# ----------------------------------------------------
if not move_prv_max:
std_ex_strn = lrg.calc(std, "Std_Ex_Strn1")(
env_df["ef_pu_prf_env_max"][std - 1],
env_df["ufd_pu_prf_env_max"][std])
# std_ex_strn低于出口应变差中浪极限
# std_ex_strn高于出口应变差边浪极限
# ++++++++++++++++ 最终计算有效单位凸度上限 +++++++++++++++++++
env_df["ef_pu_prf_env_max"][std] = lrg.calc(std, "Ef_Ex_PU_Prf3")(
env_df["ef_pu_prf_env_max"][std - 1],
env_df["ufd_pu_prf_env_max"][std])
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ------------------------ 每个循环周期末的迭代处理 ----------------------
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if move_prv_min | move_prv_max:
loop_count = loop_count + 1
if loop_count > pow(pass_vec[-1] - 1, 2):
if 7 == std:
break
else:
std = std + 1
else:
std = std - 1
else:
if 7 == std:
break
else:
std = std + 1
# 结束--计算各机架出口有效单位凸度包络线下限和上限,在循环中进行
# loop_count计数器 超出极限的监控
if loop_count > pow(pass_vec[-1] - 2, 2):
logging.info("loop counter exceeded limit")
# =============== 最后一波计算以及检查确认工作 ===================
std = 1
while std > 0:
mxx_list = ["max", "min"]
for m__ in mxx_list:
env_df.loc[std, "ufd_pu_prf_env_{}".format(m__)] = (
ufd.Prf(
std,
env_df["force_pu_wid_env_{}".format(m__)][std],
env_df["force_bnd_env_{}".format(m__)][std],
env_df["pce_wr_crn_env_{}".format(m__)][std],
env_df["wr_br_crn_env_{}".format(m__)][std]) /
input_df["ex_thick"][std])
env_df.loc[std, "ef_pu_prf_env_{}".format(m__)] = (
lrg.calc(std, "Ef_Ex_PU_Prf3")(
env_df["ef_pu_prf_env_{}".format(m__)][std - 1],
env_df["ufd_pu_prf_env_{}".format(m__)][std]))
std_ex_strn = lrg.calc(std, "Std_Ex_Strn1")(
env_df["ef_pu_prf_env_{}".format(m__)][std],
env_df["ufd_pu_prf_env_{}".format(m__)][std])
# 计算比例凸度包络线上下限
env_df.loc[std, "pu_prf_env_{}".format(m__)] = (
lrg.calc(std, "Istd_Ex_PU_Prf0")(
std_ex_strn,
env_df["ef_pu_prf_env_{}".format(m__)][std]))
# 检查确认
parameter_list = [
"pu_prf_env",
"ef_pu_prf_env",
"ufd_pu_prf_env",
"force_pu_wid_env"]
for para in parameter_list:
scratch = max(
env_df["{}_min".format(para)][std],
env_df["{}_max".format(para)][std])
env_df["{}_min".format(para)][std] = min(
env_df["{}_min".format(para)][std],
env_df["{}_max".format(para)][std])
env_df["{}_max".format(para)][std] = scratch
# 迭代计数器处理
if 7 == std:
break
else:
std = std + 1
print(env_df)
print(lim_df)
|
20,970 | c1b29078b805b26badd8c0c93f6a912c4c53b8e3 | # -*- coding: utf-8 -*-
__author__ = 'ZombieGroup'
def get_hash_id(soup):
return soup.find("button", class_ = "zg-btn zg-btn-follow zm-rich-follow-btn")['data-id']
def get_xsrf(soup):
return soup.find("input", {"name": "_xsrf"})['value']
debug_info_flag = True
|
20,971 | 369391afcd7e4f92df642c76d78a8a23c452e327 | def print_list(list):
new_list = []
i = 0
while i < len(list):
for element in list[i]:
new_list.append(element)
i += 1
return new_list
given_List = [[1, 3], [3, 6]]
print(print_list(given_List))
|
20,972 | 46b52b7acf417871acf919a2816cb8c4a88c4c6c | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Folder watch: application to monitor a folder.
Application that displays an icon in the notification area. The icon is
updated when a new file appears in a specific folder. A click on the icon
opens the new file.
"""
import gobject
import gtk
import os
import stat
import subprocess
import time
class FolderWatch(object):
"""An icon that show a folder status (usefule for CupsPDF for example)."""
def __init__(self):
"""Create the icon and init status."""
self._next_update = None
self._last_file_seen = None
self._folder = "/home/gl/PDF/"
self._command = "/home/gl/bin/show_last_pdf"
self.statusicon = gtk.StatusIcon()
self.statusicon.set_from_stock(gtk.STOCK_PRINT)
self.statusicon.connect("popup-menu", self.right_click_event)
self.statusicon.connect("activate", self.activated)
self.statusicon.set_tooltip("FolderWatch")
def activated(self, icon):
"""Handle the activate event."""
self.statusicon.set_from_stock(gtk.STOCK_PRINT)
self.statusicon.set_tooltip("FolderWatch")
subprocess.call([self._command], shell=True)
def refresh(self):
"""Refresh the icon status."""
if self._next_update is None or time.time() > self._next_update:
self._refresh()
self._next_update = time.time() + 0.8
return True
def right_click_event(self, icon, button, time):
"""Handle right click event on icon."""
menu = gtk.Menu()
about = gtk.MenuItem("About")
quit = gtk.MenuItem("Quit")
about.connect("activate", self.show_about_dialog)
quit.connect("activate", gtk.main_quit)
menu.append(about)
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu, button, time, self.statusicon)
def show_about_dialog(self, widget):
"""Show about dialog."""
about_dialog = gtk.AboutDialog()
about_dialog.set_destroy_with_parent(True)
about_dialog.set_name("Folder Watch")
about_dialog.set_version("1.0")
about_dialog.set_authors([u"Ghislain Lévêque"])
about_dialog.run()
about_dialog.destroy()
def _refresh(self):
entries = ((os.path.join(self._folder, fn)
for fn in os.listdir(self._folder)))
entries = ((os.stat(path), path) for path in entries)
entries = ((stat_data[stat.ST_CTIME], path)
for stat_data, path in entries
if stat.S_ISREG(stat_data[stat.ST_MODE]))
last_date, last_name = sorted(entries)[-1]
if self._last_file_seen is None or self._last_file_seen < last_date:
self.statusicon.set_from_stock(gtk.STOCK_PRINT_REPORT)
self.statusicon.set_tooltip("Last file: {} ({})".format(
last_name, time.ctime(last_date)
))
self._last_file_seen = last_date
def main():
"""Launch the app."""
app = FolderWatch()
gobject.timeout_add(1, app.refresh)
gtk.main()
if __name__ == "__main__":
main()
|
20,973 | 4794eeb99218ce53e4645f2814463e8198e96cb9 | import time
import json
from pprint import pprint
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
app = Flask(__name__)
# Signing Access-Control-Allow-Origin. This is CRITICAL.
CORS(app)
# Data Stores
nursePQ = {}
idToPatient = {}
idToDoctor = {}
specialisationToPatients = {}
#doctorToPatient = {}
patientToProcedures = {}
patientToProgress = {}
count = 0
# API
@app.route('/get_doctor_name',methods=['GET'])
def get_doctor_name():
doctor_id = request.args.get('doctor_id')
doctor_name = idToDoctor[doctor_id]['name']
return jsonify({"doctor_name":doctor_name})
@app.route('/get_progress', methods=['GET'])
def get_progress():
return jsonify(patientToProgress[patient_id])
@app.route('/get_specialisations', methods=['GET'])
def get_specialisations():
specialisations = []
for specialisation in specialisationToPatients.keys():
specialisations.append(specialisation)
return jsonify(specialisations)
@app.route('/view_procedures', methods=['GET'])
def view_procedures():
#may need to reverse order???
return jsonify({"counter":count})
@app.route('/get_patient_list', methods=['GET'])
def get_patient_list():
doctor_id = request.args.get('doctor_id')
specialisation = idToDoctor[doctor_id]['specialisation']
patientQueue = specialisationToPatients[specialisation]
patientList = []
counter = 1
while counter < 4:
for patient in patientQueue:
if patient['priority'] == counter:
patientList.append(patient)
counter = counter + 1
return jsonify(patientList)
@app.route('/get_all_patients', methods=['GET'])
def get_all_patients():
patient_list = []
for patient in nursePQ.values():
patient_list.append(patient)
return jsonify(patient_list)
@app.route('/get_patient', methods=['GET'])
def get_patient():
patient_id = request.args.get('patient_id')
print(patient_id)
print(idToPatient)
return jsonify(idToPatient[patient_id])
@app.route('/accept_patient', methods=['POST'])
def accept_patient():
patient_id = request.args.get('patient_id')
doctor_id = request.args.get('doctor_id')
idToPatient[patient_id][accepted] = True
#doctorToPatient[doctor_id] = patient_id
# Return status. This is arbitary.
return jsonify({ "status" : "success" })
@app.route('/add_procedure', methods=['POST'])
def add_procedure():
global count
doctor_id = request.args.get('doctor_id')
#patient_id = doctorToPatient[doctor_id]
doctor_name = idToDoctor[doctor_id]['name']
count = count+1
#get the list of sessions for this patient
#get the most recent session
#add to it to given procedure
#patientToProcedures[patient_id].append({
# 'doctor_id':doctor_id,
# 'doctor_name':doctor_name,
# 'count':count,
#})
# Return status. This is arbitary.
return jsonify({ "status" : "success" })
@app.route('/pass_on', methods=['POST'])
def pass_on():
old_doctor_id = request.args.get('doctor_id')
patient_id = request.args.get('patient_id')
specialty = request.args.get('specialisation')
old_specialty = idToDoctor[old_doctor_id]['specialisation']
specialisationToPatients[old_specialty].remove(idToPatient[patient_id])
idToPatient[patient_id]['accepted'] = False
specialisationToPatients[specialty].append(idToPatient[patient_id])
# Return status. This is arbitary.
return jsonify({ "status" : "success" })
@app.route('/assign', methods=['POST'])
def assign():
patient_id = request.args.get('patient_id')
specialty = request.args.get('specialisation')
idToPatient[patient_id]['accepted'] = False
specialisationToPatients[specialty].append(idToPatient[patient_id])
del nursePQ[patient_id]
# Return status. This is arbitary.
return jsonify({ "status" : "success" })
def add(patient_id, name, age, gender, height, weight, emergency_contact, health_insurance, condition, address, priority, bloodType):
if priority == "Low":
priority = 3
elif priority == "Mid":
priority = 2
elif priority == "High":
priority = 1
new_patient = {
'patient_id' : patient_id,
'name' : name,
'age' : age,
'gender' : gender,
'height' : height,
'weight' : weight,
'emergency_contact' : emergency_contact,
'health_insurance' : health_insurance,
'condition' : condition,
'accepted' : False,
'address' : address,
'priority' : priority,
'bloodType' : bloodType
}
# Adding the patient to patient list.
idToPatient[patient_id] = new_patient
nursePQ[patient_id] = new_patient
patientToProcedures[patient_id] = []
@app.route('/add_patient', methods=['POST'])
def add_patient():
# Retrieve the new patient.
patient_id = str(len(idToPatient))
firstName = request.args.get('firstName')
lastName = request.args.get('lastName')
age = request.args.get('age')
gender = request.args.get('gender')
height = request.args.get('height')
weight = request.args.get('weight')
emergency_contact = request.args.get('emergency_contact')
health_insurance = request.args.get('health_insurance')
condition = request.args.get('condition')
accepted = False
address = request.args.get('address')
priority = request.args.get('priority')
bloodType = request.args.get('bloodType')
name = firstName + " " +lastName
new_patient = {
'patient_id' : patient_id,
'name' : name,
'age' : age,
'gender' : gender,
'height' : height,
'weight' : weight,
'emergency_contact' : emergency_contact,
'health_insurance' : health_insurance,
'condition' : condition,
'accepted' : accepted,
'address' : address,
'priority' : priority,
'bloodType' : bloodType
}
# Adding the patient to patient list.
idToPatient[patient_id] = new_patient
nursePQ[patient_id] = new_patient
patientToProcedures[patient_id] = []
# Return status. This is arbitary.
return jsonify({ "status" : "success" })
def add_doctor(doctor_id, name, age, gender, specialisation):
new_doctor = {
'doctor_id' : doctor_id,
'name' : name,
'age' : age,
'gender' : gender,
'specialisation' : specialisation
}
idToDoctor[doctor_id] = new_doctor
#if we haven't seen the specialisation before create a new PQ of patients
if specialisation not in specialisationToPatients.keys():
specialisationToPatients[specialisation] = []
@app.route('/save_progress', methods=['POST'])
def save_progress():
step_number = request.args.get('step_number')
task_description = request.args.get('task_description')
timer = request.args.get('timer')
patientToProgress[patient_id] = {
"step_number": step_number,
"task_decription": task_decription,
"timer": timer
}
@app.route('/remove_patient', methods=['POST'])
def remove_patient():
doctor_id = request.args.get('doctor_id')
patient_id = request.args.get('patient_id')
specialisation = idToDoctor[doctor_id]['specialisation']
specialisationToPatients[specialisation].remove(idToPatient[patient_id])
@app.route('/dummy')
def dummy():
return jsonify({ "status" : "success" })
with open('doctors.json') as f:
data = json.load(f)
for doctor in data:
add_doctor(doctor["doctor_id"], doctor["name"], str(doctor["age"]), doctor["gender"], doctor["specialisation"])
with open('patients.json') as f:
data = json.load(f)
for patient in data:
#if(patient["patient_id"] != "D*4NS6N"):
add(patient["patient_id"], patient["name"], patient["age"], patient["gender"], patient["height"], patient["weight"], patient["emergency_contact"], patient["health_insurance"], patient["condition"], patient["address"], patient["priority"], patient["blood_type"])
app.run()
|
20,974 | 9ce5cf7701780a3ebd3f81d9f639763b90907094 | import pygal
from pygal.style import LightColorizedStyle as LCS, LightenStyle as LS
my_style = LS('#333366', base_style=LCS)
chart = pygal.Bar(style=my_style, x_label_rotation=45, show_legend=False)
chart.title = 'Python Projects'
chart.x_labels = ['system-design-primer', 'public-apis', 'Python']
plot_dicts = [
{'value': 132419, 'label': 'Description of system-design-primer.'},
{'value': 125056, 'label': 'Description of public-apis.'},
{'value': 108081, 'label': 'Description of Python.'},
]
# add函数只会接受字符串和列表作为参数
chart.add('', plot_dicts)
chart.render_to_file('bar_descriptions.svg') |
20,975 | b55a840ead5e8d93aef7342409abdc86fdb75dfb | import csv
import numpy as np
import pandas as pd
from clustering.LoadDataBaha import TransactionsWithoutDuplications
def TransitiveClosure(d, m, y, Offset):
"""
This finction is to implement Trasitive Closure Heuristic that concern about mutual addresses between
transactions. for example (a and b are input addresses for T1) and (b and c are input addresses for T2),
so it is probably that a, b, and c belong to the same user. This function return a csv file where each row
has the addresses belong to the same user.
"""
arr, TransactionsTo, TransactionsFrom = TransactionsWithoutDuplications(d, m, y, Offset)
NumberOfTxsTo = len(TransactionsTo)
TxsWithTwoOrMoreInputs = []
with open('TwoAddressesInput.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Transaction'] + ['First Address'] + ['Second Address'])
with open('TransitiveClosure.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
['Transactions1'] + ['Transactions2'] + ['First Address'] + ['Second Address'] + ['Third Address'])
for x in range(NumberOfTxsTo):
TxInputAddresses = []
for row in arr:
if row[1] == TransactionsTo[x] and row[0] not in TxInputAddresses:
TxInputAddresses.append(row[0])
if len(TxInputAddresses) == 2:
with open('TwoAddressesInput.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([TransactionsTo[x]] + [TxInputAddresses[0]] + [TxInputAddresses[1]])
TwoAddressesInput = pd.read_csv('TwoAddressesInput.csv')
arrTwoAddressesInput = np.array(TwoAddressesInput)
count = len(arrTwoAddressesInput)
for g in arrTwoAddressesInput:
x = g
arrTwoAddressesInput = np.delete(arrTwoAddressesInput, 0, axis=0)
for f in arrTwoAddressesInput:
pp = bool(set(g) & set(f))
if pp == True:
Transitive = sorted(list(set(x).union(set(f))))
with open('TransitiveClosure.csv', 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([Transitive[0]] + [Transitive[1]] + [Transitive[2]] + [Transitive[3]] + [Transitive[4]]) |
20,976 | b23b8d5a471d213327d7cb88f13bb3039a292ba4 | alpha_dict = dict()
alpha_dict['a'] = 'y'
alpha_dict['o'] = 'e'
alpha_dict['z'] = 'q'
alpha_dict['q'] = 'z'
learn_text1 = [
'ejp mysljylc kd kxveddknmc re jsicpdrysi', \
'rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd', \
'de kr kd eoya kw aej tysr re ujdr lkgc jv' \
]
learn_text2 = [
'our language is impossible to understand', \
'there are twenty six factorial possibilities', \
'so it is okay if you want to just give up' \
]
for i in range(0,3):
line = learn_text1[i]
for j in range(0,len(line)):
if line[j].isalpha():
alpha_dict[line[j]] = learn_text2[i][j]
inputs_text = []
lines = int(raw_input())
for i in range(0,lines):
line = raw_input()
newline = []
for j in range(0,len(line)):
if line[j].isalpha():
newline.append(alpha_dict[line[j]])
else:
newline.append(line[j])
print 'Case #' + str(i+1) + ': ' + ''.join(newline)
|
20,977 | c2876e99608b7a60d23cf09b45530e48c47b1c81 | import os
import json
import pprint
from flask import Flask, request, url_for
from signalwire.voice_response import VoiceResponse, Gather
app = Flask(__name__)
with open('configure.json') as f:
ccConfig = json.load(f)
# Dump config to console, for debugging
pprint.pprint(ccConfig)
HOSTNAME = ccConfig['settings']['hostname']
SIGNALWIRE_SPACE = ccConfig['signalwire']['space']
SIGNALWIRE_PROJECT = ccConfig['signalwire']['project']
SIGNALWIRE_TOKEN = ccConfig['signalwire']['token']
@app.route('/get_menu', methods=['GET', 'POST'])
def get_menu():
response = VoiceResponse()
# read menus from config
menus = ccConfig['settings']['menus']
# check to see if a default menu was specified, else default to "main"
menu = request.values.get("menu")
if menu not in menus:
menu = "main"
# read input_type variable
input_type = request.values.get("input_type")
# check if user input was provided via dtmf entry
if input_type == "dtmf":
# get digits pressed at menu
digits = request.values.get("Digits")
input_action = menus[menu][digits]["action"]
response.redirect(url=input_action)
response.hangup()
else:
# no user input was detected, so lets present a menu
gather = Gather(action='/get_menu' + "?menu=" + menu, input='dtmf', timeout="5", method='POST', numDigits="1")
# loop through menus and generate menu options
for key in menus[menu]:
print(key, '->', menus[menu][key]["verbiage"])
gather.say(menus[menu][key]["verbiage"])
# add menu to response
response.append(gather)
response.hangup()
# return response
return str(response)
@app.route("/transfer_call", methods=['POST', 'GET'])
def transfer_call():
response = VoiceResponse()
farward_to = ccConfig['settings']['callTransferTo_']
# print(farward_to)
response.say("please wait your call is connecting....")
response.dial(farward_to, action=url_for('quit_call'))
print(farward_to)
return str(response)
@app.route("/connect_dgf", methods=['POST', 'GET'])
def connect_dgf():
response = VoiceResponse()
farward_to = ccConfig['settings']['GDF_connection']
# print(farward_to)
response.dial(farward_to, action=url_for('quit_call'))
# print(farward_to)
return str(response)
@app.route('/quit_call', methods=['GET', 'POST'])
def quit_call():
response = VoiceResponse()
response.say('Thank you for calling Eagle.net')
response.hangup()
return str(response)
if __name__ == '__main__':
app.run(host="0.0.0.0")
|
20,978 | 2d9c6168557bb24d81e67ad8b1f322b1f8ce083e | # -*- coding: utf-8 -*-
"""
@file: e.spiralOrder.py
@date: 2020-09-07 4:19 PM
@desc: 剑指 Offer 29. 顺时针打印矩阵
@url : https://leetcode-cn.com/problems/shun-shi-zhen-da-yin-ju-zhen-lcof/
"""
# 输入:matrix = [
# [1,2,3,4],
# [5,6,7,8],
# [9,10,11,12]
# ]
# 输出:[
# 1,2,3,4,
# 8,12,11,10,
# 9,5,6,7
# ]
from typing import List
class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix or not matrix[0]:
return list()
rows, columns = len(matrix), len(matrix[0])
order = list()
left, right, top, bottom = 0, columns - 1, 0, rows - 1
while left <= right and top <= bottom:
for column in range(left, right + 1):
order.append(matrix[top][column])
for row in range(top + 1, bottom + 1):
order.append(matrix[row][right])
if left < right and top < bottom:
for column in range(right - 1, left, -1):
order.append(matrix[bottom][column])
for row in range(bottom, top, -1):
order.append(matrix[row][left])
left, right, top, bottom = left + 1, right - 1, top + 1, bottom - 1
return order
# 00 01 02 03
# 13 23
# 22 21 20
|
20,979 | 391e217a5a3653ee7367dd3f1f501dc99bf11ecd | # -*- coding: utf-8 -*-
'''Common configuration
'''
import os
from django.utils.translation import gettext_lazy as _u
DEBUG = False
_ENV = os.environ.get('SERVICE_CONFIGURATION')
if _ENV is None:
raise RuntimeError('No SERVICE_CONFIGURATION in env')
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SETTINGS_BASE = BASE_DIR + '/settings'
exec(open(SETTINGS_BASE + '/' + _ENV + '.py').read()) in globals()
INSTALLED_APPS = (
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
'pocoto.apps.base',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'pocoto', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
#'django.template.context_processors.csrf',
],
},
},
]
ROOT_URLCONF = 'pocoto.urls'
APPEND_SLASH = False
PREPEND_WWW = False
WSGI_APPLICATION = 'pocoto.wsgi.APP'
LANGUAGE_CODE = 'pl'
TIME_ZONE = 'Europe/Warsaw'
USE_I18N = True
USE_L10N = True
USE_TZ = False
FIRST_DAY_OF_WEEK = 1
LANGUAGES = (
('pl', _u('Polish')),
('en', _u('English')),
)
STATIC_URL = '/static/'
CSRF_COOKIE_DOMAIN = None
CSRF_FAILURE_VIEW = 'pocoto.apps.base.views.csrf_failure'
SESSION_COOKIE_DOMAIN = None
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
X_FRAME_OPTIONS = 'SAMEORIGIN'
IGNORABLE_404_URLS = ()
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(name)s %(levelname)s %(module)s:' \
'%(lineno)s %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'stderr': {
'level': 'NOTSET' if DEBUG else 'ERROR',
'formatter': 'verbose',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
},
},
'root': {
'handlers': ['stderr'],
'level': 'NOTSET' if DEBUG else 'INFO',
'propagate': False,
},
'loggers': {
},
}
# django "catch all" and core loggers
for logger in ['', 'root', 'django', 'django.request', 'django.db.backends',
'django.security']:
LOGGING['loggers'][logger] = LOGGING['root']
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
|
20,980 | 9e2bb04ae6f455f2e5b14aabd89c52f72a5c9a50 | #! /usr/bin/evn python
import rospy
import yaml
from tkFileDialog import askopenfilename, asksaveasfilename
# This function gives a dialog to obtain the yaml file and load the supported
# value types only
def readDumpFileDialog(window):
filename = askopenfilename(parent=window, title="Gives a file name",\
defaultextension=".yaml", filetypes=[("YAML file", "*.yaml")])
if filename:
fd = open(filename, "r")
content = yaml.safe_load(fd.read())
fd.close()
# Filter out the unsupported value type
res = {}
for key, value in content.iteritems():
if (type(value) is not int and type(value) is not str\
and type(value) is not float):
continue
# Remove newlines
tmp_val = value
if type(tmp_val) is str:
tmp_val = tmp_val.strip()
res[key.strip()] = tmp_val
return res
# This function dump the given dictionary into a yaml file with the file name
# specified through a dialog
def writeDumpFileDialog(window, content):
filename = asksaveasfilename(parent=window, title="Gives a file name",\
defaultextension=".yaml", filetypes=[("YAML file", "*.yaml")])
if filename:
fd = open(filename, "w+")
yaml.dump(content, fd, default_flow_style=False)
fd.close()
|
20,981 | a5084890e9f2d2a5160b6f310341fa9a4d0abd0c | from __future__ import division
import cv, numpy
import time, math
def CapCam(CamNum,capture):
frame = cv.QueryFrame(capture)
small = cv.CreateImage((frameW,frameH),frame.depth,frame.nChannels)
cv.Resize(frame,small)
cv.ShowImage("camera"+str(CamNum), small)
imagesL.append(small)
#cv.WriteFrame(writer,small)
cv.NamedWindow("camera1", 1)
capture = cv.CaptureFromCAM(0)
time0 = time.time()
frameW = 640 # images width
frameH = 480 # images height
filename = "test1"
imagesL = list()
while True:
CapCam(1,capture)
time1 = time.time()
print time1-time0
if cv.WaitKey(10) == 27:
cv.DestroyWindow("camera1")
break
fps = numpy.round(len(imagesL)/(time1-time0))
print fps
isColor = 1
#fps = 20# or 30, frames per second
writer = cv.CreateVideoWriter(filename+"_Cam1.avi",cv.CV_FOURCC('D', 'I', 'V', 'X') , fps,(frameW,frameH),isColor)
for k in imagesL:
cv.WriteFrame(writer,k)
|
20,982 | 53f7c65977f69bedfd3a55cefee9c65dc6b1cfa4 |
import sys
import heapq
from math import pi
def pancakes2(input_file, output_file):
T = int(input_file.readline())
for case in xrange(1, T + 1):
args = input_file.readline().split(' ')
N = int(args[0])
K = int(args[1])
maxR = 0
surfaces = [0]*N
circle = [0]*N
sides = [0]*N
totals = [0]*N
for index in range(N):
args = input_file.readline().split(' ')
R = int(args[0])
H = int(args[1])
circle[index] = pi * R * R
sides[index] = 2 * pi * R * H
surfaces[index] = circle[index] + sides[index]
circle, sides, surfaces = zip(*sorted(zip(circle, sides, surfaces), reverse=True))
for index in range(N - K + 1):
totals[index] = surfaces[index] + sum(heapq.nlargest(K-1, sides[index + 1:]))
total_surface = max(totals)
output_file.write("Case #" + str(case) + ": " + str(total_surface) + "\n")
#pancakes2(sys.stdin, sys.stdout)
input_file = open("C:\\Users\\doritm\\Desktop\\A-small.in", "r")
output_file = open("C:\\Users\\doritm\\Desktop\\output.out", "w")
pancakes2(input_file, output_file)
input_file.close()
output_file.close()
|
20,983 | 99f744ca1dc1d7a190d273f78427c2a6bb7cc352 | from bempy import block, b
from bempy.django import uses
from bempy.django.blocks import cssreset, menu, title, href, menu_item, selected_menu_item
from islets import y_header_with_search, y_header
@block()
@uses(cssreset, menu, title, href, y_header_with_search, y_header)
def page_with_menu(request, menu_items, **content):
context = content.copy()
context['header'] = y_header_with_search('Bempy')
context['title'] = title("Bempy's Blog")
context['cssreset'] = cssreset()
if request.user.is_authenticated() or True:
pass
# context['login'] = b.menu(
# b.href('Preferences', '/settings/'),
# b.href('Logout', '/logout/'),
# label=request.user.username or 'svetlyak40wt',
# type='dropdown')
else:
context['login'] = href('Login', '/login/')
context['menu'] = menu(items=[
selected_menu_item(label=label,
path=path)
if (request.path == path)
else menu_item(label=label,
path=path)
for path, label in menu_items])
return context
|
20,984 | bce4f6f47fbc0d54ca5aed778a1088472e02883d | from multiprocessing import Pool
import json
import requests
from urllib.parse import urlencode
import pymongo
from config import *
client = pymongo.MongoClient(MONGO_URL,connect=False)
db = client[MONGO_DB]
proxy_pool_url = 'http://127.0.0.1:5000/get'
headers = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.26 Safari/537.36 Core/1.63.6756.400 QQBrowser/10.3.2473.400'
# 获取代理ip
def get_proxy():
try:
response = requests.get(proxy_pool_url)
if response.status_code == 200:
return response.text
return None
except ConnectionError:
return None
def get_page(page):
data = {
'token': '70f12f2f4f091e459a279469fe49eca5',
'st': 'tdate',
'sr': -1,
'p': page,
'ps': 50,
'js': 'var XhfdqNBv={pages:(tp),data: (x)}',
'type': 'RZRQ_LSTOTAL_NJ',
'mk_time': 1,
}
url = 'http://dcfm.eastmoney.com//EM_MutiSvcExpandInterface/api/js/get?' + urlencode(data)
response = requests.get(url, headers)
try:
if response.status_code == 200:
return response.text
if response.status_code == 302:
print('302')
proxy = get_proxy()
if proxy:
print('Using Proxy', proxy)
return get_page(page)
else:
print('Get Proxy Failed')
return None
except Exception:
print("请求失败,未获取数据")
def parse_page(html):
try:
results = html.split('data: ')[1][:-1] #列表包字典
results = json.loads(results)
return results
except Exception:
print("未获取results")
def save_to_mongo(results, page):
try:
if results:
for result in results:
rzrqye = {"日期": result.get("tdate"),"融资融券余额": round(result.get("rzrqye")/10**8, 2)}
db[MONGO_TABLE].insert(rzrqye)
s = requests.session()
s.keep_alive = False
print('存储第{}页'.format(page))
print()
return True
return False
except Exception:
print("存储失败")
def main(page):
html = get_page(page)
results = parse_page(html)
save_to_mongo(results, page)
if __name__ == '__main__':
try:
pool = Pool()
# 第一个参数是函数,第二个参数是一个迭代器,将迭代器中的数字作为参数依次传入函数中
pool.map(main,[page for page in range(1, 43)])
except requests.exceptions.ConnectionError:
print("退出连接")
|
20,985 | e7ee9405e1a3171378c291394d9f1a65051ac540 | import torch
from torch import nn
import numpy as np
from torch.distributions import Normal
from training.loss.torch_loss_fns import crps_torch
class CRPSLoss(nn.Module):
def __init__(self):
super().__init__()
self.const = (1 / torch.Tensor([np.pi]).sqrt())
def forward(self, preds, target):
assert not target.requires_grad
assert preds.size(0) == target.size(0)
assert preds.shape[-1] % 2 == 0
assert preds.shape[-1] / 2 == target.shape[-1]
# first mean of all output dims then var of all output dims
outut_dim = int(preds.shape[-1] / 2)
mu = preds[:, :outut_dim]
sigma = preds[:, outut_dim:]
# use softplus to be numerically stable and not depend on activation functions of nn
softplus = torch.nn.Softplus()
sigma = softplus(sigma)
return crps_torch(mu, sigma, target)
|
20,986 | 68d10194d15d3520129f81a8e3a5ec036c8a25d1 | numero = int(input('Digite algo: \n'))
if (numero % 2) == 0:
print("Número par")
else:
print("Número impar")
|
20,987 | c3f996ee58386f26c07dff0a10fadf16f7a6064c | # Generated by Django 3.0.7 on 2021-05-02 11:28
from django.db import migrations, models
import website.helperFunctions
class Migration(migrations.Migration):
dependencies = [
('sig', '0001_initial'),
('event', '0003_auto_20210211_1809'),
]
operations = [
migrations.AlterField(
model_name='event',
name='poster',
field=models.ImageField(blank=True, default=website.helperFunctions.default_event_poster_path, upload_to=website.helperFunctions.event_poster_upload_path),
),
migrations.AlterField(
model_name='event',
name='sigs',
field=models.ManyToManyField(related_name='sigs', to='sig.SIG'),
),
migrations.AlterField(
model_name='event',
name='venue',
field=models.CharField(blank=True, default='MS Teams', max_length=100),
),
]
|
20,988 | 7bf0e64e201e86104a9d3834581d7b823c84a82d | import numpy as np
from scipy.linalg import pinv
def distance_vec_rep_of_fibers(fi):
'''This function calculates the distance of each point on the fiber fr m th first point
Input:
fi - a (n,3) np.ndarray of a single fiber. n is the number of points that represent the fiber
Output:
dist_vec - a (,n) column vec of distance represntation of the fiber'''
p1 = fi[0,:]
dist_vec = np.zeros(fi.shape[0])
for pi,i in zip(fi,range(fi.shape[0])):
disti = np.linalg.norm(p1-pi)
dist_vec[i] = disti
return dist_vec
def distance_powered_matrix(dist_vec, degree):
'''This function calculates the matrix to interpolate polynomial function for X,Y & Z of each fiber.
it takes the distance representation vector and power it according to the chosen degree.
Input:
dist_vec - a (,n) column vec of distance represntation of the fiber
degree - the polynomial degree wanted
Output:
dist_mat - a (n, degree+1) np.ndarray of fiber points an their calculated powers'''
dist_mat = np.zeros([len(dist_vec), degree+1])
for i in range(degree+1):
dist_mat[:,i] = dist_vec.T**i
return dist_mat
def least_squares_poly_rep(fi,comp,dist_mat):
'''This function calculates the least square polynomial function for a single component of the fiber
Calculates the follow Eq: poly_vec = (dist_mat.T * dist_mat).pinv * dist_mat.T * fi[:,comp]
Input:
fi - a (n,3) np.ndarray of a single fiber. n is the number of points that represent the fiber
comp - {'X','Y','Z'} is the current component for polynomial calculation
dist_mat - a (n, degree+1) np.ndarray of fiber points an their calculated powers
Output:
poly_vec - a (,degree+1) vec representation of the polynomial parameters
'''
if comp == 'X':
ax = 0
elif comp == 'Y':
ax = 1
elif comp == 'Z':
ax = 2
dup_mat = np.matmul(dist_mat.T, dist_mat)
inv_dup_mat = pinv(dup_mat)
poly_vec = np.matmul(np.matmul(inv_dup_mat, dist_mat.T), fi[:,ax])
return poly_vec
def poly_xyz_vec_calc(fi, degree=3):
''''''
dist_vec = distance_vec_rep_of_fibers(fi)
dist_mat = distance_powered_matrix(dist_vec,degree)
poly_vec_x = least_squares_poly_rep(fi,'X',dist_mat)
poly_vec_y = least_squares_poly_rep(fi,'Y',dist_mat)
poly_vec_z = least_squares_poly_rep(fi,'Z',dist_mat)
poly_xyz = np.concatenate([poly_vec_x,poly_vec_y,poly_vec_z],0)
return poly_xyz
|
20,989 | 35be5a1351d1dfd28f94c5d890e30e03059280a7 | from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello():
return "hello world"
@app.route('/dojo')
def dojo():
return "Dojo"
@app.route('/say/<name>')
def say(name):
return "hi " + name
@app.route('/repeat/<num>/<string>')
def repeat(num, string):
return (string + " ") * int(num)
if __name__=="__main__":
app.run(debug=True) |
20,990 | 01843b3f1aaa8fcc57a070bfc0057b58808f328c | import diceroll
class DiceParser:
def __init__(self, expression):
self.str = expression + "\0"
self.index = 0
def evaluate(self):
try:
return self.expr()
except BaseException:
pass
def expr(self):
e = self.term()
while (self.str[self.index]=="+" or self.str[self.index]=="-"):
if self.str[self.index] == "+":
self.index+=1
e+=self.expr()
else:
self.index+=1
e-=self.expr()
return e
def term(self):
e = self.factor()
while self.str[self.index]=="*":
self.index += 1
e *= self.term()
return e
def factor(self):
if self.str[self.index].isdecimal(): return self.value()
self.index+=1 # skip '('
e = self.expr()
self.index+=1 # skip ')'
return e
def number(self):
e = int(self.str[self.index])
self.index+=1
while self.str[self.index].isdecimal():
e = e*10 + int(self.str[self.index])
self.index+=1
return e
def value(self):
e = self.number()
if self.str[self.index] == "d" or self.str[self.index] == "D":
self.index+=1
n = self.number()
dice = diceroll.Dice.roll(e,n)
print("roll-> " + str(dice))
return sum(dice)
else:
# print(e)
return e |
20,991 | 1139d07e62fc8edda15635e4e5c800db5e750bf4 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root@localhost:3306/pathsharing'
app.config['MONGO_DBNAME'] = 'PathSharing'
db = SQLAlchemy(app)
mongo = PyMongo(app)
class Users(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.String, unique=True)
password = db.Column(db.String)
email = db.Column(db.String, unique=True)
class Groups(db.Model):
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
description = db.Column(db.String, unique=False)
admin = db.Column(db.Integer, unique=False)
name = db.Column(db.String(265), unique=True)
class Users_has_Groups(db.Model):
__tablename__ = 'users_has_groups'
pkey = db.Column(db.Integer, primary_key=True, autoincrement=True)
users_id = db.Column(db.Integer)
groups_id = db.Column(db.Integer)
class invitations(db.Model):
__tablename__ = 'invitations'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer)
group_id = db.Column(db.Integer)
|
20,992 | 60cbcf91d21a277b0ebc780efa6d0cdf9547bedf | b = (2,3,5,1)
import numpy as np
print(np.argmax(b))
print(sorted(b,reverse=True))
|
20,993 | 51941346c219471e34e99dfa4b64857ed2c56d18 | import sqlite3
from add_record import add_class, add_score
add_score(2, 1, 12012020, 99)
add_class(1,2020,1)
|
20,994 | 99ec872dc263bd9ad7a340fc9b4ea1f39e52f6f4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_messaging.rpc import dispatcher
from heat.common import exception
from heat.common import template_format
from heat.engine import service
from heat.engine import stack
from heat.engine import template as templatem
from heat.objects import stack as stack_object
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
class StackServiceActionsTest(common.HeatTestCase):
def setUp(self):
super(StackServiceActionsTest, self).setUp()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
self.man.create_periodic_tasks()
@mock.patch.object(stack.Stack, 'load')
@mock.patch.object(service.ThreadGroupManager, 'start')
def test_stack_suspend(self, mock_start, mock_load):
stack_name = 'service_suspend_test_stack'
t = template_format.parse(tools.wp_template)
stk = utils.parse_stack(t, stack_name=stack_name)
s = stack_object.Stack.get_by_id(self.ctx, stk.id)
mock_load.return_value = stk
thread = mock.MagicMock()
mock_link = self.patchobject(thread, 'link')
mock_start.return_value = thread
result = self.man.stack_suspend(self.ctx, stk.identifier())
self.assertIsNone(result)
mock_load.assert_called_once_with(self.ctx, stack=s)
mock_link.assert_called_once_with(mock.ANY)
mock_start.assert_called_once_with(stk.id, mock.ANY, stk)
stk.delete()
@mock.patch.object(stack.Stack, 'load')
@mock.patch.object(service.ThreadGroupManager, 'start')
def test_stack_resume(self, mock_start, mock_load):
stack_name = 'service_resume_test_stack'
t = template_format.parse(tools.wp_template)
stk = utils.parse_stack(t, stack_name=stack_name)
mock_load.return_value = stk
thread = mock.MagicMock()
mock_link = self.patchobject(thread, 'link')
mock_start.return_value = thread
result = self.man.stack_resume(self.ctx, stk.identifier())
self.assertIsNone(result)
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_link.assert_called_once_with(mock.ANY)
mock_start.assert_called_once_with(stk.id, mock.ANY, stk)
stk.delete()
def test_stack_suspend_nonexist(self):
stack_name = 'service_suspend_nonexist_test_stack'
t = template_format.parse(tools.wp_template)
tmpl = templatem.Template(t)
stk = stack.Stack(self.ctx, stack_name, tmpl)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.stack_suspend, self.ctx,
stk.identifier())
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
def test_stack_resume_nonexist(self):
stack_name = 'service_resume_nonexist_test_stack'
t = template_format.parse(tools.wp_template)
tmpl = templatem.Template(t)
stk = stack.Stack(self.ctx, stack_name, tmpl)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.stack_resume, self.ctx,
stk.identifier())
self.assertEqual(exception.StackNotFound, ex.exc_info[0])
def _mock_thread_start(self, stack_id, func, *args, **kwargs):
func(*args, **kwargs)
return mock.Mock()
@mock.patch.object(service.ThreadGroupManager, 'start')
@mock.patch.object(stack.Stack, 'load')
def test_stack_check(self, mock_load, mock_start):
stack_name = 'service_check_test_stack'
t = template_format.parse(tools.wp_template)
stk = utils.parse_stack(t, stack_name=stack_name)
stk.check = mock.Mock()
mock_load.return_value = stk
mock_start.side_effect = self._mock_thread_start
self.man.stack_check(self.ctx, stk.identifier())
self.assertTrue(stk.check.called)
stk.delete()
class StackServiceUpdateActionsNotSupportedTest(common.HeatTestCase):
scenarios = [
('suspend_in_progress', dict(action='SUSPEND', status='IN_PROGRESS')),
('suspend_complete', dict(action='SUSPEND', status='COMPLETE')),
('suspend_failed', dict(action='SUSPEND', status='FAILED')),
('delete_in_progress', dict(action='DELETE', status='IN_PROGRESS')),
('delete_complete', dict(action='DELETE', status='COMPLETE')),
('delete_failed', dict(action='DELETE', status='FAILED')),
]
def setUp(self):
super(StackServiceUpdateActionsNotSupportedTest, self).setUp()
self.ctx = utils.dummy_context()
self.man = service.EngineService('a-host', 'a-topic')
@mock.patch.object(stack.Stack, 'load')
def test_stack_update_actions_not_supported(self, mock_load):
stack_name = '%s-%s' % (self.action, self.status)
t = template_format.parse(tools.wp_template)
old_stack = utils.parse_stack(t, stack_name=stack_name)
old_stack.action = self.action
old_stack.status = self.status
sid = old_stack.store()
s = stack_object.Stack.get_by_id(self.ctx, sid)
mock_load.return_value = old_stack
params = {'foo': 'bar'}
template = '{ "Resources": {} }'
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.update_stack,
self.ctx, old_stack.identifier(), template,
params, None, {})
self.assertEqual(exception.NotSupported, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=s)
old_stack.delete()
|
20,995 | 79df5b0e19c48adeb54d9207f502f57edd2525bc | # coding: utf-8
import qstylizer.descriptor.stylerule
class PseudoPropDescriptor(qstylizer.descriptor.stylerule.StyleRuleDescriptor):
"""Pseudo-property descriptor."""
@property
def rule_cls(self):
import qstylizer.style
return qstylizer.style.PseudoPropRule
class PseudoPropParent(qstylizer.descriptor.stylerule.StyleRuleParent):
"""Pseudo-property setter.
Contains descriptors for all known pseudo-properties.
"""
_descriptor_cls = PseudoPropDescriptor
left = _descriptor_cls("left")
right = _descriptor_cls("right")
top = _descriptor_cls("top")
bottom = _descriptor_cls("bottom")
|
20,996 | 27072ccd3292c4f63d7b192467a96e38d4cb1c0a | import json
import requests
def data_upload(data):
"""数据上传"""
headers = {'Content-Type': 'application/json'}
result = requests.post('http://172.16.165.46/mk-collect', json=data, headers=headers)
res = json.loads(result.text)
if res['code'] != 200:
return res['cnmsg']
else:
return 'success'
if __name__ == '__main__':
data = '{"m": "/report/questionnaire", "v": 1, "p": {"common": {"user": 4566232, "request_id": "1617092390334566232", "sid": "6e85bf3f02a16af7a87669fca8af8b2795965f3c", "platform": "wechat", "ip": "221.226.177.146", "user_agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36", "time": 1617092390}, "event": {"gender": 2, "occupation": "\u4eba\u4e8b/\u8d22\u52a1/\u884c\u653f", "schooling": "\u5927\u5b66\u672c\u79d1", "province": "\u6c5f\u82cf\u7701", "city": "\u5357\u4eac\u5e02", "age": "", "profession_category": "", "studying_reason": "\u63d0\u9ad8\u6280\u672f\u5b9e\u529b\uff0c\u4e0d\u5728\u5927\u6570\u636e\u65f6\u4ee3\u843d\u4f0d ", "birth_year": "1975"}}}'
data = json.loads(data)
print(data)
res = data_upload(data)
print(res) |
20,997 | d3e1c35f4c4c4ffcd55f1caef81233c508f4082d | # -*- coding: utf-8 -*-
DEBUG = False
HTTP_PORT = 80
SECRET_KEY = "business_web-secret"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SESSION_COOKIE_HTTPONLY = False
if DEBUG:
SENTRY_RELEASE = u"测试环境"
TESTING = True
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = "postgresql://business:1vbrcu2@localhost:5432/business"
else:
SENTRY_RELEASE = u"线上环境"
TESTING = False
WTF_CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = "postgresql://business:1vbrcu2@localhost:5432/business"
|
20,998 | 77341f1b8c313ae43226abc83a976c2b9c18cd46 | from .base import *
from typing import List
__all__ = ['Rdp', 'RdpContainer']
class Rdp(ObjBase):
@property
def name(self) -> str:
return self.remote_destination_profile_name
@property
def description(self) -> str:
return self.dict['DESCRIPTION']
@property
def mobility_user(self) -> str:
return self.mobility_user_id
@property
def device_pool(self) -> str:
return self.dict['DEVICE POOL']
@property
def css(self) -> str:
return self.dict['CSS']
pass
class RdpContainer(CsvBase):
factory = Rdp
@property
def list(self) -> List[Rdp]:
return super(RdpContainer, self).list
|
20,999 | ee998e7340729d3047e9b10ccf72c38b7c7d6281 | import views
import django.contrib.auth as django_auth
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', views.index, name='home'),
url(r'^login$', django_auth.views.login,
{'template_name': 'login.djhtml'},
name='login'
),
# Views that create new information
url(r'^nueva_persona', views.crear_persona,
name='nueva_persona'
),
url(r'^nueva_historia', views.crear_historia,
name='nueva_historia'
),
# Views that show information
url(r'^pacientes', django_auth.decorators.login_required(views.PacienteListView.as_view()),
name='lista_pacientes'),
url(r'^historias/(\d+)/', django_auth.decorators.login_required(views.HistoriaListView.as_view()),
name='historias_de_paciente'),
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.