blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d72eea374af069f5569957f854f1bdfc504b7e91 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_249/ch75_2019_04_04_17_56_20_680821.py | UTF-8 | 404 | 2.71875 | 3 | [] | no_license | def verifica_primos(L):
dicionario={}
i=0
divisor=2
primo=True
while i<len(L):
if L[i]<2:
dicionario[L[i]]=false
elif L[i]==2:
dicionario[L[i]]=True
else:
while divisor<L[i]:
if L[i] % divisor==0:
primo=False
divisor+=1
dicionario[L[i]]=primo
return dicionario | true |
c111012acbd9433dd66df2fbf25a1baed1ab43d5 | Python | Yamadads/piro3 | /Postprocessing.py | UTF-8 | 706 | 2.609375 | 3 | [] | no_license | import numpy as np
import cv2
import scipy
import DataLoader
def process_image(image):
image *= 255
im = np.array(image, np.uint8)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im, connectivity=8)
sizes = stats[1:, -1]
nb_components = nb_components - 1
min_size = 150
img2 = np.zeros((output.shape))
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255
kernel = np.ones((3, 3), np.uint8)
img3 = cv2.morphologyEx(img2, cv2.MORPH_OPEN, kernel)
img4 = scipy.signal.medfilt(img3, kernel_size=3)
final_image = DataLoader.get_compressed_image(img4, 1500)
return final_image
| true |
05c3317d7b162143d11a523046b8e291124bf5ee | Python | MURDriverless/control_pysim | /src/utils/cubic_spline.py | UTF-8 | 4,088 | 3.59375 | 4 | [
"MIT"
] | permissive | import numpy as np
import bisect
class Spline:
def __init__(self, t, x):
self.t = t
self.x = x
# For n points, we have n-1 segments
number_of_segments = len(x) - 1
tf_vector = np.diff(t)
self.spline_coefficients = [self.get_spline_coefficients(x[i], 0, x[i + 1], 0, tf_vector[i])
for i in range(number_of_segments)]
def search_spline_index(self, t):
# bisect() does not merely find the nearest index of the element 't'. It finds
# the first index to the where we have to insert the element 't' to keep it sorted
# (if it sounds weird, check bisect's documentation).
# That means, if I have t = [0.0, 5.0, 10.0], bisect(4.5) will return 1, not 0.
# So, to get the index of the starting spline, we need to subtract 1 from index.
index = bisect.bisect(self.t, t) - 1
# Additionally, if we are interpolating at the end index, there are no splines
# after the end index. So at the end index, set index -= 1 to use the spline just
# one index before the end index. Otherwise, just return index
return index - 1 if index == (len(self.t)-1) else index
def interpolate(self, t):
"""
Given input 't', calculate the interpolated x if it is within the interpolation range
Args:
t (float): Current input to interpolate result
Returns:
float: Value of x which is interpolated at time 't' of the input argument
"""
# Check if t is within the interpolation range
if t < self.t[0]:
return None
elif t > self.t[-1]:
return None
index = self.search_spline_index(t)
coefficients = self.spline_coefficients[index]
t = t - self.t[index]
t2 = t ** 2
t3 = t2 * t
return coefficients[0] + coefficients[1] * t + coefficients[2] * t2 + coefficients[3] * t3
@staticmethod
def get_spline_coefficients(x_init, xdot_init, x_final, xdot_final, tf):
"""
Calculate the spline coefficients a0, a1, a2 and a3
Args:
x_init (float): f(0), initial f(t) value at the start of the spline
xdot_init (float): f'(0), initial f'(t) value at the start of the spline
x_final (float): f(tf), final f(t) value at the end of the spline
xdot_final (float): f'(tf), final f'(t) value at the end of the spline
tf (float): The 't' value for x_final
Returns:
np.ndarray: [a0, a1, a2, a3]
"""
tf_2 = tf ** 2
tf_3 = tf_2 * tf
# We will get our segment coefficient using the equation AX = B
# 'A' is a 4x4 matrix on the left-most side containing values in terms of a0, a1, a2 and a3
# 'X' is a 4x1 matrix containing our segment coefficients [a0; a1; a2; a3;]
# 'C' is a 4x1 matrix on the right-hand side containing [x(0); x'(0); x(tf); x'(tf)]
A = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[1, tf, tf_2, tf_3],
[0, 1, 2*tf, 3*tf_2]
])
B = np.array([x_init, xdot_init, x_final, xdot_final])
# Solve for X
return np.linalg.solve(A, B)
class Spline2D:
def __init__(self, x, y):
self.t = self.get_t_vector(x, y)
self.sx = Spline(self.t, x)
self.sy = Spline(self.t, y)
@staticmethod
def get_t_vector(x, y):
# Assuming x and y are n points, dx & dy capture the difference between successive points
dx = np.diff(x)
dy = np.diff(y)
magnitude = np.hypot(dx, dy)
# The time vector is expressed as the projection of dx and dy onto a single axis using hypotenuse,
# and we use cumulative sum to build up a time vector.
cumulative_sum = np.cumsum(magnitude)
# Add the value '0' to index 0
return np.insert(cumulative_sum, 0, 0)
def interpolate(self, t):
x = self.sx.interpolate(t)
y = self.sy.interpolate(t)
return x, y
| true |
3e4d04d1ae7eafce91ce92f60292858fa4c3372e | Python | Ben-Stacey/Comp150 | /Lab 4/film_critic.py | UTF-8 | 302 | 4.5625 | 5 | [] | no_license | def rate_movie(movie, rate):
print("Movie: ", movie, "; Your rating: ", rate, ".")
def age(name, current_age):
print("Hello ", name, ", you are ", current_age, " years old.")
print("Next year you will be ", current_age + 1, ".")
rate_movie("The Meaning of Life", 4)
age("Eric", 67) | true |
124f965484a4d438f1da60c7705cd0fdbdd2e483 | Python | alirezazahiri/SortAlgos | /selectionSort.py | UTF-8 | 358 | 3.46875 | 3 | [] | no_license | def selectionSort(list: list, key='') -> list:
for i in range(len(list)):
min_idx = i
for j in range(i+1, len(list)):
if list[min_idx] > list[j]:
min_idx = j
list[i], list[min_idx] = list[min_idx], list[i]
if key.casefold() == 'reverse':
list.reverse()
return list
| true |
b989b048d179317d795518a28d3c297737059f80 | Python | tommychoi724/Python | /simple_client.py | UTF-8 | 711 | 2.515625 | 3 | [] | no_license | import requests
#r = requests.get('https://www.google.com.tw/')
sum=0
r = requests.get('http://192.168.0.136:5000/magic')
#print(r.text)
m = r.json()
print (m[0]["result"])
sum += m[0]["result"]
r = requests.get('http://192.168.0.142:5000/magic')
#print(r.text)
m = r.json()
print (m[0]["result"])
sum += m[0]["result"]
r = requests.get('http://192.168.0.141:5000/magic')
#print(r.text)
m = r.json()
print (m[0]["result"])
sum += m[0]["result"]
r = requests.get('http://192.168.0.140:5000/magic')
#print(r.text)
m = r.json()
print (m[0]["result"])
sum += m[0]["result"]
r = requests.get('http://192.168.0.134:5000/magic')
#print(r.text)
m = r.json()
print (m[0]["result"])
sum += m[0]["result"]
print(sum) | true |
917515033eedb6cf3a8341c3c1f6e75ddff48d68 | Python | Kawdrin/Clayman | /src/map_creator.py | UTF-8 | 2,171 | 2.625 | 3 | [] | no_license | from pygame import Rect
from pygame.sprite import Sprite
from pygame.transform import scale
import json
from src.sprite_sheet import SpriteSheet
class OgmoMap:
def __init__(self, level, tilemap_file):
with open(level, "r") as mapa_novo:
self.mapa_atual = json.load(mapa_novo)
self.tilemap_level = SpriteSheet("res/CreyMan.png")
def create_tile(self, layer_id, group):
layer_level = self.mapa_atual["layers"][layer_id]
index_item = -1
for celula in layer_level["dataCoords"]:
index_item += 1
if celula == [-1]:
continue
y = index_item // layer_level["gridCellsY"]
x = index_item % layer_level["gridCellsX"]
celula_bloco = Sprite(group)
celula_bloco.image = scale(self.tilemap_level.clip_sprite(celula[0]*16, celula[1]*16, 16, 16), (32, 32))
celula_bloco.rect = Rect(x*32, y*32, 32, 32)
def create_grid(self, layer_id, group):
layer_level = self.mapa_atual["layers"][layer_id]
index_item = -1
for celula in layer_level["grid"]:
index_item += 1
if celula == '0':
continue
y = index_item // layer_level["gridCellsY"]
x = index_item % layer_level["gridCellsX"]
celula_bloco = Sprite(group)
celula_bloco.image = scale(self.tilemap_level.clip_sprite(0, 0, 16, 16), (32, 32))
celula_bloco.rect = Rect(x*32, y*32, 32 ,32)
def spawn_entities(self, layer_id, group, scale=1):
from src.ent.hero import Hero
from src.ent.argila import Argila
entidades = self.mapa_atual["layers"][layer_id]["entities"]
for ent in entidades:
if ent["name"] == "Argila":
Argila(ent["x"]*scale, ent["y"]*scale, group)
def get_pos_entitie(self, name_entitie, layer_id, scale=1):
from src.ent.hero import Hero
entidades = self.mapa_atual["layers"][layer_id]["entities"]
for ent in entidades:
if ent["name"] == name_entitie:
return (ent["x"]*scale, ent["y"]*scale)
return (0, 0)
| true |
85edddfe031dd696f73a259dfba4681e421051ce | Python | ArthurNdy/Appli-web-ECL | /documentation/TD3-4/TD3-serveur1.py | UTF-8 | 4,091 | 2.703125 | 3 | [] | no_license | # TD3-serveur1.py
import http.server
import socketserver
from urllib.parse import urlparse, parse_qs, unquote
import json
# définition du handler
class RequestHandler(http.server.SimpleHTTPRequestHandler):
# sous-répertoire racine des documents statiques
static_dir = '/client'
# version du serveur
server_version = 'TD3-serveur1.py/0.1'
# on surcharge la méthode qui traite les requêtes GET
def do_GET(self):
self.init_params()
# prénom et nom dans le chemin d'accès
if self.path_info[0] == 'coucou':
self.send_html('<p>Bonjour {} {}</p>'.format(*self.path_info[1:]))
# prénom et nom dans la chaîne de requête
elif self.path_info[0] == "toctoc":
self.send_toctoc()
# requête générique
elif self.path_info[0] == "service":
self.send_html('<p>Path info : <code>{}</p><p>Chaîne de requête : <code>{}</code></p>' \
.format('/'.join(self.path_info),self.query_string));
else:
self.send_static()
# méthode pour traiter les requêtes HEAD
def do_HEAD(self):
self.send_static()
# méthode pour traiter les requêtes POST
def do_POST(self):
self.init_params()
# prénom et nom dans la chaîne de requête dans le corps
if self.path_info[0] == "toctoc":
self.send_toctoc()
# requête générique
elif self.path_info[0] == "service":
self.send_html(('<p>Path info : <code>{}</code></p><p>Chaîne de requête : <code>{}</code></p>' \
+ '<p>Corps :</p><pre>{}</pre>').format('/'.join(self.path_info),self.query_string,self.body));
else:
self.send_error(405)
#
# On envoie un document le nom et le prénom
#
def send_toctoc(self):
# on envoie un document HTML contenant un seul paragraphe
#self.send_html('<p>Bonjour {} {}</p>'.format(self.params['Prenom'][0],self.params['Nom'][0]))
dict = {
'given_name': self.params['Prenom'][0],
'family_name': self.params['Nom'][0]
}
body = json.dumps(dict)
# on envoie le document statique demandé
def send_static(self):
# on modifie le chemin d'accès en insérant le répertoire préfixe
self.path = self.static_dir + self.path
# on appelle la méthode parent (do_GET ou do_HEAD)
# à partir du verbe HTTP (GET ou HEAD)
if (self.command=='HEAD'):
http.server.SimpleHTTPRequestHandler.do_HEAD(self)
else:
http.server.SimpleHTTPRequestHandler.do_GET(self)
# on envoie un document html dynamique
def send_html(self,content):
headers = [('Content-Type','text/html;charset=utf-8')]
html = '<!DOCTYPE html><title>{}</title><meta charset="utf-8">{}' \
.format(self.path_info[0],content)
self.send(html,headers)
# on envoie la réponse
def send(self,body,headers=[]):
encoded = bytes(body, 'UTF-8')
self.send_response(200)
[self.send_header(*t) for t in headers]
self.send_header('Content-Length',int(len(encoded)))
self.end_headers()
self.wfile.write(encoded)
#
# on analyse la requête pour initialiser nos paramètres
#
def init_params(self):
# analyse de l'adresse
info = urlparse(self.path)
self.path_info = [unquote(v) for v in info.path.split('/')[1:]] # info.path.split('/')[1:]
self.query_string = info.query
self.params = parse_qs(info.query)
# récupération du corps
length = self.headers.get('Content-Length')
ctype = self.headers.get('Content-Type')
if length:
self.body = str(self.rfile.read(int(length)),'utf-8')
if ctype == 'application/x-www-form-urlencoded' :
self.params = parse_qs(self.body)
else:
self.body = ''
# traces
print('info_path =',self.path_info)
print('body =',length,ctype,self.body)
print('params =', self.params)
# instanciation et lancement du serveur
httpd = socketserver.TCPServer(("", 8080), RequestHandler)
httpd.serve_forever()
| true |
9cd4df1241965e72288a797600fd0163a397d066 | Python | daicang/Euler | /util.py | UTF-8 | 173 | 2.75 | 3 | [
"MIT"
] | permissive | # util lib
def list2int(l):
if not l:
return 0
s = 0
for curr in l:
assert isinstance(curr, int)
s *= 10
s += curr
return s
| true |
4f405f27e9e3c5b601824260012164c67cbef08a | Python | Siva900/CS312-AI-Lab | /Resources/SVP/lab2/testcases/script.py | UTF-8 | 1,018 | 3.359375 | 3 | [
"MIT"
] | permissive | """
For generation random matrices, go to https://onlinemathtools.com/generate-random-matrix
Download them into the directory as input1.txt, input2.txt etc
Add a line in the beginning of the file with size of matrix
And to get the solutions, run
$python3 script.py > output.txt
"""
import os
import requests
i = 1
files = os.listdir()
while f"input{i}" in files:
with open(f"input{i}", "r") as file:
text = []
firstLine = True
for line in file:
if firstLine:
firstLine = False
continue
line = "-".join(line.split())
text.append(line)
text = "--".join(text)
url = f"http://www.hungarianalgorithm.com/solve.php?c={text}&random=1"
# url = text[:46]+text[47:]
text = requests.get(url).text
index = text.index("optimal value equals ")+21
text = text[index:]
index = text.index(".")
text = text[:index]
print(text, url)
i += 1
| true |
0df1decfba15bde489e1ecbc428e1d6d3ba819f4 | Python | matt-fielding8/ODI_Cricket | /src/data/gather_data.py | UTF-8 | 4,400 | 3.328125 | 3 | [
"MIT"
] | permissive | """
All the scripts required to gather missing data from
https://www.espncricinfo.com/
"""
from bs4 import BeautifulSoup
import requests
import numpy as np
def getSoup(url):
'''
Returns soup for url response object.
'''
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
return soup
def getMatchid(soup):
''' (html) -> list of str
Return match_id as list of string from soup.
'''
try:
return soup.find(lambda tag: tag.name == 'a' and 'ODI no' in tag.get_text()).contents
except Exception as e:
print("Match ID Extraction Error\n", e, '\n', url)
return ['-']
# Gather missing score data
def getMissingData(url):
''' str -> dct
Uses requests and bs4 libraries to extract and parse html data from url.
Returns a dct with 'match_id', 'country', 'score', 'detailed_score' keys.
'''
soup = getSoup(url)
# Extract match_id
try:
match_id = soup.find(lambda tag: tag.name == 'a' and 'ODI no' in tag.get_text()).contents
except Exception as e:
print("Match ID Extraction Error\n", e, '\n', url)
match_id = [np.NaN]
print(match_id)
# Extract score data from soup
score = soup.find_all(class_='cscore_score')
try:
score_lst = [i.contents[0] for i in score]
except Exception as e:
print("Score Extraction Error\n", e, '\n', match_id, url)
score_lst = [np.NaN]*2
# Extract country data from soup
country = soup.find_all(class_='cscore_name--long')
try:
country_lst = [i.contents[0] for i in country]
except Exception as e:
print("Country Extraction\n", e, '\n', e, url)
country_lst = [np.NaN]*2
# Extract detailed score data from soup
## Find tags containg "TOTAL"
tot_tags = soup.find_all(lambda tag: tag.name == 'div' and \
tag.get('class')==['cell'] and tag.get_text()=='TOTAL')
if len(tot_tags) == 2:
try:
detailed_score = [i.findNext().contents[0] for i in tot_tags]
except Exception as e:
print("detailed_score Extraction Error\n", e, '\n', url)
detailed_score = [np.NaN]*2
else:
print("No result likely", url)
detailed_score = [np.NaN]*2
# Write information to dct
score_dct = {'match_id':match_id*2,
'country':country_lst[:2],
'score':score_lst[:2],
'detailed_score':detailed_score}
return score_dct
# Get page links directing to all results per year
def yearPageLinks(soup):
''' wb -> list of str
Extracts relative links in "QuoteSummary" class from soup.
Returns relative url's as a list of str.
'''
link_list = []
try:
for i in soup.find_all(class_='QuoteSummary'):
link_list.append(i['href'])
except:
print('Class "QuoteSummary" does not exist')
return link_list
# Filter links based on criteria
def filterLinks(links, lst):
""" (list of str, list of str) -> list of str
Filters elements in links which contain elements in lst as a substring.
Returns filtered elements as a list.
"""
filt_links = ([(list(filter(lambda x: i in x, links))) for i in lst])
# Flatten filt_links list
return [i for link in filt_links for i in link]
# Turn relative url to absolute using prefix
def absoluteUrl(prefix, relative):
'''
Joins prefix with relative. Returns an absolute url.
'''
prefix = prefix.rstrip('/')
return [prefix + link for link in relative]
# Get scorecard links
def scorecardLinks(year_links, match_ids):
''' (lst of str, list of str) -> list of str
Loops through year_links and returns a list of relative links for all
id's in match_ids.
'''
# Generate soup for all year_links
soups = [getSoup(link) for link in year_links]
# Retrieve all links within each soup
raw_links = [soup.find_all(['tr', 'td','a'], class_=['data-link'], attrs=['href']) for soup in soups]
# Extract all links associated with elements in match_ids
sc_links_found = []
for year_page in raw_links:
for link in year_page:
if link.contents[0] in match_ids:
sc_links_found.append(link['href'])
return sc_links_found
def flattenList(lst):
''' (lst of lst) -> lst
Flattens elements of lst.
'''
return [j for i in lst for j in i]
| true |
93930c73a4cd9833da938e0b32768dda54ed4292 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2495/60692/275256.py | UTF-8 | 464 | 2.828125 | 3 | [] | no_license | str1 = list(input())
str2 = input()[1:-1].split(",")
dict1 = {}
for i in str2:
dict1[i[1:-1]] = len(i[1:-1])
z = zip(dict1.values(), dict1.keys())
res = dict(reversed(sorted(z)))
ans = []
maxlen = 0
for v in res.values():
contains = True
for c in v:
if str1.count(c) == 0:
contains = False
break
if contains:
if len(v) >= maxlen:
ans.append(v)
maxlen = len(v)
ans.sort()
print(ans[0]) | true |
04bfc2734df8f8915c373a5691f6c8a871946bdd | Python | anilkonsal/python-flask-rsg | /app/models/User.py | UTF-8 | 2,065 | 2.671875 | 3 | [] | no_license | from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flask_login import UserMixin
from .. import login_manager
from .. import db
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
email = db.Column(db.String(120), unique=True)
password_hash = db.Column(db.String(128))
password_reset_token = db.Column(db.String(128))
def __init__(self, name, email, password):
self.name = name,
self.email = email
self.password = password
def __repr__(self):
return '<User %r>' % self.name
@property
def password(self):
raise AttributeError('Password is not a readable property')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_password_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
token = s.dumps({'token': self.id})
self.password_reset_token = token
db.session.add(self)
return token
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('token') != self.id:
return False
self.password_reset_token = None
db.session.add(self)
return True
@staticmethod
def load_user_by_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
user_id = data.get('token')
return User.query.get(int(user_id))
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| true |
39aa9329d330be1b227f954d24b468d3ddc8d3f2 | Python | Ameiche/Homework_24 | /Homework_24.py | UTF-8 | 6,182 | 3.4375 | 3 | [] | no_license | class Node:
def __init__(self, data):
self.data = data
self.next = None
class linkedList:
def __init__(self):
self.head = None
def append(self, data):
newNode = Node(data)
if self.head == None:
self.head = newNode
return
else:
lastNode = self.head
while lastNode.next != None:
lastNode = lastNode.next
lastNode.next = newNode
def prepend(self, data):
newNode = Node(data)
if self.head == None:
self.head = newNode
return
else:
newNode.next = self.head
self.head = newNode
def insertAfterNode(self, prevNode, data):
newNode = Node(data)
newNode.next = prevNode.next
prevNode.next = newNode
def printList(self):
curNode = self.head
while curNode != None:
print(curNode.data)
curNode = curNode.next
def deleteNode(self, key):
curNode = self.head
if curNode != None and curNode.data == key:
self.head = curNode.next
curNode = None
return
else:
prev = None
while curNode != None and curNode.data != key:
prev = curNode
curNode = curNode.next
if curNode == None:
print("The data is not found in the list")
return
else:
prev.next = curNode.next
curNode = None
def deleteAtPos(self, pos):
curNode = self.head
if pos == 0:
self.head = curNode.next
curNode = None
return
else:
cnt = 0
prev = None
while curNode != None and cnt != pos:
prev = curNode
curNode = curNode.next
cnt += 1
if curNode == None:
print("The node doesn't exist")
return
else:
prev.next = curNode.next
curNode = None
def len_iterative(self):
cnt = 0
curNode = self.head
while curNode != None:
curNode = curNode.next
cnt += 1
return cnt
def len_recursive(self, headNode):
if headNode is None:
return 0
else:
return 1 + self.len_recursive(headNode.next)
def swapNode(self, key1, key2):
if key1 == key2:
print("The two nodes are the same nodes, cannot be swapped")
return
prev1 = None
curNode1 = self.head
while curNode1 != None and curNode1.data != key1:
prev1 = curNode1
curNode1 = curNode1.next
prev2 = None
curNode2 = self.head
while curNode2 != None and curNode2.data != key2:
prev2 = curNode2
curNode2 = curNod2.next
if curNode1 == None or curNode2 == None:
print("The nodes doesn't exist in the list")
return
else:
if prev1 == None:
self.head = curNode2
prev2.next = curNode1
elif prev2 == None:
self.head = curNode1
prev1.next = curNode2
else:
prev1.next = curNode2
prev2.next = curNode1
temp1 = curNode1.next
temp2 = curNode2.next
curNode1.next = temp2
curNode2.next = temp1
def reverse_iterative(self):
prev = None
curNode = self.head
while curNode != None:
nxt_temp = curNode.next
curNode.next = prev
prev = curNode
curNode = nxt_temp
self.head = prev
def remove_duplicates(self):
prev = None
curNode = self.head
data_freq = dict()
while curNode != None:
if curNode.data not in data_freq:
data_freq[curNode.data] = 1
prev = curNode
curNode = curNode.next
else:
prev.next = curNode.next
curNode = None
curNode = prev.next
def print_nth_from_last(self, n):
total_len = self.len_iterative()
distance = total_len - 1
curNode = self.head
while curNode != None:
if distance == n - 1:
print(curNode.data)
return curNode
else:
distance -= 1
curNode = curNode.next
def occurences(self, data):
cnt = 0
curNode = self.head
while curNode != None:
if curNode.data == data:
cnt += 1
curNode = curNode.next
return cnt
def rotate(self, k):
if k == 0 or k >= self.len_iterative():
print("The list can't be rotated or is out of range")
return
p = self.head
q = self.head
prev = None
cnt = 0
while p != None and cnt < k:
prev = p
p = p.next
q = q.next
cnt += 1
p = prev
while q != None:
prev = q
q = q.next
q = prev
q.next = self.head
self.head = p.next
p.next = None
def tail_to_head(self):
lastNode = self.head
secondLast = None
while lastNode.next != None:
secondLast = lastNode
lastNode = lastNode.next
lastNode.next = self.head
self.head = lastNode
secondLast.next = None
# Testing Section
lst = linkedList()
lst.append(1)
lst.append(2)
lst.append(3)
lst.append(4)
lst.append(5)
lst.printList()
lst.print_nth_from_last(4)
print(lst.occurences(3))
lst.append(3)
print(lst.occurences(3))
print(" ")
lst.rotate(2)
lst.printList()
print(" ")
lst.tail_to_head()
lst.printList()
lst.remove_duplicates()
print(" ")
lst.printList()
| true |
c1bbb9651f7dca5991396e76cb5a3ca04b1ee617 | Python | g10draw/chatbot | /chatbot.py | UTF-8 | 683 | 4.03125 | 4 | [] | no_license | import random
"""
Chat Bot 1.0:
This is a basic chat bot with zero training and with random response
"""
# Greetings and responses
keywords = ['hello', 'hai', 'greetings', 'what\'s up']
responses = ['hey', 'hello', 'what\'s up bro']
def check_for_greeting(message):
"""If any of the words in the user's input was a greeting, greet in return"""
for word in message.split():
if word.lower() in keywords:
return random.choice(responses)
else:
return 'hmmm'
if __name__ == '__main__':
msg = ''
user = input('Enter your good name? ')
print('Start by greeting robot')
while msg != 'bye':
msg = input('%s :' % user)
print('bot: ' + check_for_greeting(msg))
| true |
e08ac3d61a169c756d1858a26df0525f2c626321 | Python | oprk/project-euler | /p040_champernownes_constant/champernownes_constant.py | UTF-8 | 1,060 | 4.125 | 4 | [] | no_license | # Champernowne's constant
# Problem 40
# An irrational decimal fraction is created by concatenating the positive integers:
# 0.123456789101112131415161718192021...
# .-----------^ 12
# .--------------------------------^ 33
#
# It can be seen that the 12th digit of the fractional part is 1.
# If dn represents the nth digit of the fractional part, find the value of the
# d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000
# 1-9, 9 1-digits
# 10-99, 90 2-digits
# 100-999, 900 3-digits
import operator
import time
def champernownes_constant_nth_digit(n):
# nth digit is 1-indexed.
n -= 1
num_digits = 1
num = 9 * 10 ** (num_digits - 1)
while n > num:
n -= num * num_digits
num_digits += 1
num *= 10
div = n / num_digits
rem = n % num_digits
return int(str(10**(num_digits - 1) + div)[rem])
t0 = time.time()
result = reduce(operator.mul,
(champernownes_constant_nth_digit(10**i)
for i in xrange(7)))
t1 = time.time()
print(result)
print('time %f' % (t1 - t0))
# 210
# time 0.000039
| true |
56d56335094a6b76bc28ce1a4fec1de8315691c9 | Python | hmunduri/MyPython | /basics/ex15.py | UTF-8 | 96 | 3.4375 | 3 | [] | no_license | pi = 3.1415926535897931
r = 6.0
V = 4.0/3.0*pi* r**3
print('The volume of the sphere is: ', V)
| true |
1d66adc1f294fecafd09e6b363281cc1d6611380 | Python | CS196Illinois/sp18-hw-ref | /hw5-test.py | UTF-8 | 1,139 | 3.375 | 3 | [] | no_license | from hw5 import *
def test_all():
test_stack_push_with_size()
test_stack_pop_and_peek()
test_empty_stack_pop()
test_queue_push_with_size()
test_queue_pop_and_peek()
test_empty_queue_pop()
# Begin testing stack
def test_stack_push_with_size():
s = Stack()
s.push(5)
assert(s.size() == 1)
s.push(7)
s.push(9)
assert(s.size() == 3)
def test_stack_pop_and_peek():
s = Stack()
s.push(11)
peek = s.peek()
data = s.pop()
assert(peek == 11)
assert(data == 11)
s.push(13)
s.push(15)
data = s.pop()
assert(data == 15)
def test_empty_stack_pop():
s = Stack()
assert(s.is_empty())
assert(s.size() == 0)
data = s.pop()
assert(s.pop() == None)
# Begin testing queue
def test_queue_push_with_size():
q = Queue()
q.push(5)
assert(q.size() == 1)
q.push(7)
q.push(9)
assert(q.size() == 3)
def test_queue_pop_and_peek():
q = Queue()
q.push(11)
peek = q.peek()
data = q.pop()
assert(data == 11)
assert(peek == 11)
q.push(13)
q.push(15)
data = q.pop()
assert(data == 13)
def test_empty_queue_pop():
q = Queue()
assert(q.is_empty())
assert(q.size() == 0)
data = q.pop()
assert(q.pop() == None)
test_all()
| true |
471bf96d1e9e80ca0a0d60f842908a22e9b8e316 | Python | maximus3/msu_nn_spring_2021 | /hw_05/train_utils.py | UTF-8 | 7,768 | 2.5625 | 3 | [] | no_license | # %load train_utils.py
import numpy as np
#from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision import transforms
from IPython.display import clear_output
def collate_fn(batch):
return tuple(zip(*batch))
def get_datasets(download=False, transform=None, test=True):
transform = transform or transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = MNIST('.', train=True, download=download, transform=transform)
if test:
test_dataset = MNIST('.', train=False, transform=transform)
return train_dataset, test_dataset if test else train_dataset
def get_loaders(download=False, new_transform=None, batch_size=32):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset, test_dataset = get_datasets(download)
if new_transform:
new_train_dataset = get_datasets(download=True, transform=new_transform, test=False)
train_dataset = train_dataset + new_train_dataset
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)# , collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)# , collate_fn=collate_fn)
return train_loader, test_loader
def _epoch(network, loss, loader,
backward=True,
optimizer=None,
device='cpu',
ravel_init=False):
losses = []
accuracies = []
for X, y in loader:
X = X.to(device)
y = y.to(device)
if ravel_init:
X = X.view(X.size(0), -1)
network.zero_grad()
prediction = network(X)
loss_batch = loss(prediction, y)
losses.append(loss_batch.cpu().item())
if backward:
loss_batch.backward()
optimizer.step()
prediction = prediction.max(1)[1]
accuracies.append((prediction == y).cpu().float().numpy().mean())
return losses, accuracies
def train(network, train_loader=None, test_loader=None, epochs=10,
learning_rate=1e-3, plot=True, verbose=True, loss=None,
optimizer=None, clear_data=True, get_loaders_func=None,
ravel_init=False, device='cpu', tolerate_keyboard_interrupt=True):
loss = loss() if loss else nn.NLLLoss()
optimizer = optimizer(network.parameters(), learning_rate) if optimizer else torch.optim.Adam(network.parameters(), lr=learning_rate)
if train_loader is None and get_loaders_func is None:
raise RuntimeError("No train_loader")
train_loss_epochs = []
test_loss_epochs = []
train_accuracy_epochs = []
test_accuracy_epochs = []
network = network.to(device)
try:
for epoch in range(epochs):
if get_loaders_func:
train_loader, test_loader = get_loaders_func()
if train_loader:
network.train()
losses, accuracies = _epoch(network,
loss,
train_loader,
True,
optimizer,
device,
ravel_init)
train_loss_epochs.append(np.mean(losses))
train_accuracy_epochs.append(np.mean(accuracies))
if test_loader:
network.eval()
losses, accuracies = _epoch(network,
loss,
test_loader,
False,
optimizer,
device,
ravel_init)
test_loss_epochs.append(np.mean(losses))
test_accuracy_epochs.append(np.mean(accuracies))
if verbose:
if clear_data:
clear_output(True)
if test_loader:
print('Epoch {0}... (Train/Test) Loss: {1:.3f}/{2:.3f}\tAccuracy: {3:.3f}/{4:.3f}'.format(
epoch, train_loss_epochs[-1], test_loss_epochs[-1],
train_accuracy_epochs[-1], test_accuracy_epochs[-1]))
else:
print('Epoch {0}... (Train) Loss: {1:.3f}\tAccuracy: {2:.3f}'.format(
epoch, train_loss_epochs[-1], train_accuracy_epochs[-1]))
if plot:
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(train_loss_epochs, label='Train')
if test_loader:
plt.plot(test_loss_epochs, label='Test')
plt.xlabel('Epochs', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.legend(loc=0, fontsize=16)
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(train_accuracy_epochs, label='Train accuracy')
if test_loader:
plt.plot(test_accuracy_epochs, label='Test accuracy')
plt.xlabel('Epochs', fontsize=16)
plt.ylabel('Accuracy', fontsize=16)
plt.legend(loc=0, fontsize=16)
plt.grid()
plt.show()
except KeyboardInterrupt:
if tolerate_keyboard_interrupt:
pass
else:
raise KeyboardInterrupt
return train_loss_epochs, \
test_loss_epochs, \
train_accuracy_epochs, \
test_accuracy_epochs
def plot_comp(test_loss, test_accuracy, name_start='', name_end=''):
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.title('Loss')
for name in test_loss:
if name.startswith(name_start) and name.endswith(name_end):
plt.plot(test_loss[name], label=name)
plt.xlabel('Epochs', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.legend(loc=0, fontsize=16)
plt.grid()
plt.subplot(1, 2, 2)
plt.title('Accuracy')
for name in test_accuracy:
if name.startswith(name_start) and name.endswith(name_end):
plt.plot(test_accuracy[name], label=name)
plt.xlabel('Epochs', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.legend(loc=0, fontsize=16)
plt.grid()
plt.show()
def plot_analysis(network):
wrong_X = []
correct_y = []
predicted_y = []
logits = []
for X, y in test_loader:
prediction = network(X)
prediction = np.exp(prediction.data.numpy())
prediction /= prediction.sum(1, keepdims=True)
for i in range(len(prediction)):
if np.argmax(prediction[i]) != y[i]:
wrong_X.append(X[i])
correct_y.append(y[i])
predicted_y.append(np.argmax(prediction[i]))
logits.append(prediction[i][y[i]])
wrong_X = np.row_stack(wrong_X)
correct_y = np.row_stack(correct_y)[:, 0]
predicted_y = np.row_stack(predicted_y)[:, 0]
logits = np.row_stack(logits)[:, 0]
plt.figure(figsize=(10, 5))
order = np.argsort(logits)
for i in range(21):
plt.subplot(3, 7, i+1)
plt.imshow(wrong_X[order[i]].reshape(28, 28), cmap=plt.cm.Greys_r)
plt.title('{}({})'.format(correct_y[order[i]], predicted_y[order[i]]), fontsize=20)
plt.axis('off')
| true |
1bb7df3f99527568b85c779f000ace848748a421 | Python | ZviBaratz/research | /research/data_classes/sheets/xlsx_parser/sheet_parser.py | UTF-8 | 1,220 | 3.125 | 3 | [] | no_license | import pandas as pd
LEN_SUBJECT_ID = 9
class SheetParser:
def __init__(self):
pass
def read_from_path(self, path: str, sheet_name: str):
return pd.read_excel(path, sheet_name=sheet_name, index_col=0)
def fix_column_name(self, name: str):
return name.replace(' ', '_').replace("'", '').lower()
def create_fixed_column_names_dict(self, column_names: pd.Index):
return {name: self.fix_column_name(name) for name in column_names}
def fix_column_names(self, df: pd.DataFrame):
fixed_names_dict = self.create_fixed_column_names_dict(df.columns)
return df.rename(columns=fixed_names_dict)
def fix_index_names(self, df: pd.DataFrame):
return [self.fix_column_name(name) for name in df.index.names]
def fix_index(self, value):
return str(value).zfill(LEN_SUBJECT_ID)
def fix_index_values(self, df: pd.DataFrame):
return df.index.map(self.fix_index)
def parse_sheet(self, path: str, sheet_name: str):
raw_df = self.read_from_path(path, sheet_name)
raw_df.index.names = self.fix_index_names(raw_df)
raw_df.index = self.fix_index_values(raw_df)
return self.fix_column_names(raw_df)
| true |
42b86fc639f697488a512db3c8fb7826ccd3e3c6 | Python | spezifisch/hocr-parser | /tests/test_hocr.py | UTF-8 | 9,489 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | import json
import os
import sys
import unittest
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from hocr_parser import parser
if sys.version_info < (3, 0):
from io import open
class BaseTestClass(unittest.TestCase):
"""Super class for all test cases"""
@classmethod
def setup_class(cls):
"""
Sets up fixtures used during tests.
Creates a parser instance and saves it in cls.document.
Additionally, parses the hocr document again with BeautifulSoup and
saves the result in cls.soup so the parsed document can later be
checked against the original html.
"""
own_dir = os.path.dirname(os.path.abspath(__file__))
hocr_file = "output.tesseract.hocr"
hocr_path = os.path.join(own_dir, "data", hocr_file)
with open(hocr_path) as f:
hocr_data = f.read()
expected_file = hocr_file.rsplit(".", 1)[0] + ".expected.json"
expected_path = os.path.join(own_dir, "data", expected_file)
with open(expected_path, encoding="utf-8") as f:
expected_data = f.read()
cls.document = parser.HOCRParser(hocr_path, is_path=True)
cls.soup = BeautifulSoup(hocr_data, "html.parser")
cls.expected = json.loads(expected_data)
@staticmethod
def get_children_of_node(node):
def child_node_filter(node):
if isinstance(node, NavigableString):
return False
if not node.has_attr("id"):
return False
return True
return list(filter(child_node_filter, node.contents))
def recursively_compare_tree_against_html(self, func):
"""
Utility function for the common task of looping through the document
and html trees and comparing the obj and html nodes to each other.
Takes a comparator function as argument. Comparator functions receive
the following keyword arguments when they get called:
- obj: The current ocr object
- node: The current node in the html tree
Defines an inner function that takes obj, node, parent as arguments.
The inner function executes the comparator function with its input
arguments. Then it loops through the children, calling itself
with the child nodes as arguments.
The inner function is invoked with the root nodes.
:param func: A function object. Comparator function that gets called
for each element on each level. The comparator function
receives the three previous arguments as keyword arguments
on invocation
"""
def inner(obj, node):
# invoke comparator function
func(obj=obj, node=node)
# filter
child_nodes = self.get_children_of_node(node)
# same number of object children and html child nodes
self.assertEqual(len(obj.children), len(child_nodes))
# loop over children and call recursive compare on them
for (child_obj, child_node) in zip(obj.children, child_nodes):
inner(obj=child_obj, node=child_node)
# call inner() with root elements
inner(obj=self.document.root, node=self.soup.body)
class TreeStructureTests(BaseTestClass):
def test_equivalency(self):
"""
test_equivalency (test_hocr.TreeStructureTests)
Recursively compares an obj against the html node and checks different
aspects to see if the generated object and the html node are
equivalent, i.e. the object was generated from this node and all
information was parsed correctly.
Tests:
- same id
- same html
- parents have same id
- same number of children
- children have same ids
"""
def compare_func(obj, node):
# same id
self.assertEqual(obj.id, node.get("id"))
# same html
self.assertEqual(obj.html.prettify, node.prettify)
# parents have same id (only for non-root elements)
if not obj == self.document.root:
self.assertEqual(obj.parent.id, node.parent.get("id"))
# same number of children
child_nodes = self.get_children_of_node(node)
self.assertEqual(len(obj.children), len(child_nodes))
# children have same ids
for (child_obj, child_node) in zip(obj.children, child_nodes):
self.assertEqual(child_obj.id, child_node.get("id"))
self.recursively_compare_tree_against_html(compare_func)
def test_parent_link(self):
"""
test_parent_link (test_hocr.TreeStructureTests)
Recursively compares the parent node of the current obj
to the parent element of the html node.
Tests for parent-child link
The parent object in obj.parent must contain obj in its
children list.
"""
def compare_func(obj, node):
# no need to test for parents on root level of the tree
if obj == self.document.root:
return
# parent-child link. obj must be in obj.parent.children
self.assertTrue(obj in obj.parent.children)
self.recursively_compare_tree_against_html(compare_func)
def test_child_link(self):
"""
test_child_link (test_hocr.TreeStructureTests)
Recursively compares the child elements of an object against the
child nodes of the corresponding html node.
Tests for parent-child link
Child objects must have obj as their parent
"""
def compare_func(obj, node):
child_nodes = self.get_children_of_node(node)
for (child_obj, child_node) in zip(obj.children, child_nodes):
# parent-child link (children must have obj as their parent)
self.assertEqual(child_obj.parent, obj)
self.recursively_compare_tree_against_html(compare_func)
class HOCRParserTests(BaseTestClass):
def test_parsing(self):
# Strings next to other siblings shouldn't be parsed as nodes.
html = BeautifulSoup("""
<div id='node'>
I am noise. Have some newlines.
\n\n
<p id='child'>I am content</p>
</div>
""", "html.parser")
node = parser.HOCRNode(html.div)
self.assertEqual(len(node.children), 1)
self.assertEqual(node.ocr_text, "I am content")
# Strings inside tags should be parsed as ocr_text but not as children
html = BeautifulSoup("""
<div id='node'>I am not noise</div>
""", "html.parser")
node = parser.HOCRNode(html.div)
self.assertEqual(len(node.children), 0)
self.assertEqual(node.ocr_text, "I am not noise")
# tags without id should not be parsed
html = BeautifulSoup("""
<div id='node'>
<p>I don't have an id</p>
<p id='child'>I have an id</p>
</div>
""", "html.parser")
node = parser.HOCRNode(html.div)
self.assertEqual(len(node.children), 1)
self.assertEqual(node.children[0].ocr_text, "I have an id")
def test_consistency(self):
"""
test_consistency (test_ocr.HOCRParserTests)
- number of children must be consistent
obj.nchildren == len(obj._children)
== len(obj.children)
- obj.html equals node.prettify()
- coordinates
obj.__coordinates == obj.coordinates == expected_coordinates
"""
def compare_func(obj, node):
# number of children must be consistent
self.assertEqual(
len(obj.children),
len(obj._children)
)
# obj.html equals node
self.assertEqual(obj._html, node)
# coordinates
self.assertEqual(
obj._coordinates,
obj.coordinates,
self.expected["coordinates"][obj.id or "document"]
)
# confidence
self.assertAlmostEqual(
obj.confidence,
self.expected["confidence"][obj.id or "document"]
)
self.recursively_compare_tree_against_html(compare_func)
def test_ocr_text(self):
expected_text = self.expected["ocr_text"]
def compare_func(obj, node):
if obj == self.document.root:
expected = expected_text["document"]
else:
expected = expected_text[obj.id]
self.assertEqual(obj.ocr_text, expected)
self.recursively_compare_tree_against_html(compare_func)
def test_page_coordinates(self):
expected_coordinates = self.expected["coordinates"]
def compare_func(obj, node):
if obj == self.document.root:
expected = expected_coordinates["document"]
else:
expected = expected_coordinates[obj.id]
self.assertEqual(obj.coordinates, tuple(expected))
self.recursively_compare_tree_against_html(compare_func)
def test_creation_method_equality(self):
doc1 = self.document
doc2 = parser.HOCRParser(self.soup.prettify(), is_path=False)
self.assertEqual(doc1.ocr_text, doc2.ocr_text)
| true |
5625b1835a885d93e9b48d6fa898b44d42dbbba7 | Python | gistable/gistable | /all-gists/6468146/snippet.py | UTF-8 | 1,551 | 2.5625 | 3 | [
"MIT"
] | permissive | import requests
import subprocess
import json
import sys
import threading
import time
from Queue import Queue
numberOfViewers = int(sys.argv[1])
builderThreads = int(sys.argv[2])
startTime = time.time()
numberOfSockets = 0
concurrent = 25
urls = []
urlsUsed = []
def getURL(): # Get tokens
output = subprocess.Popen(["livestreamer", "twitch.tv/CHANNEL_NAME", "-j"], stdout=subprocess.PIPE).communicate()[0]
return json.loads(output)['streams']['worst']['url'] # Parse json and return the URL parameter
def build(): # Builds a set of tokens, aka viewers
global numberOfSockets
global numberOfViewers
while True:
if numberOfSockets < numberOfViewers:
numberOfSockets += 1
print "Building viewers " + str(numberOfSockets) + "/" + str(numberOfViewers)
urls.append(getURL())
def view(): # Opens connections to send views
global numberOfSockets
while True:
url=q.get()
requests.head(url)
if (url in urlsUsed):
urls.remove(url)
urlsUsed.remove(url)
numberOfSockets -= 1
else:
urlsUsed.append(url)
q.task_done()
if __name__ == '__main__':
for i in range(0, builderThreads):
threading.Thread(target = build).start()
while True:
while (numberOfViewers != numberOfSockets): # Wait until sockets are built
time.sleep(1)
q=Queue(concurrent*2)
for i in range(concurrent):
try:
t=threading.Thread(target=view)
t.daemon=True
t.start()
except:
print 'thread error'
try:
for url in urls:
print url
q.put(url.strip())
q.join()
except KeyboardInterrupt:
sys.exit(1) | true |
07b4a2eae17488368200b42f61a763cba043792e | Python | rickcanham/Wishes | /wishes_app/models.py | UTF-8 | 4,031 | 2.59375 | 3 | [] | no_license | from django.db import models
from django.db.models.fields import BooleanField, CharField, DateTimeField, EmailField, TextField
import datetime
import re
import bcrypt
# Create your models here.
class UserManager(models.Manager):
def login_validator(self,postData):
errors = {}
all_users = User.objects.all()
user_id = -1
for user in all_users:
if user.email == postData['login_email']:
user_id = user.id
if user_id > 0:
break
if user_id != -1:
user_obj = User.objects.get(id=user_id)
hash1 = user_obj.pw_hash
if bcrypt.checkpw(postData['login_password'].encode(), hash1.encode()):
user.logged_in = True
user.save()
else:
errors['user_password'] = "Error: Incorrect password."
else:
errors['user_email'] = "Error: User not found. Please check your email or register."
return errors, user_id
def register_validator(self,postData):
regex = r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b'
errors = {}
if len(postData['register_first_name']) < 2:
errors['first_name'] = "Error: First name must be at least 2 characters."
if len(postData['register_last_name']) < 2:
errors['last_name'] = "Error: Last name must be at least 2 characters."
if not(re.match(regex, postData['register_email'])):
errors['email'] = "Error: Invalid email address."
else:
try:
user_obj = User.objects.all()
for i in user_obj:
if postData['register_email'] == i.email:
errors['email'] = "Error: E-mail address is already in database. Please enter a different e-mail."
break
except User.DoesNotExist:
pass
if len(postData['register_password']) < 8:
errors['password'] = "Error: Password must be at least 8 characters."
if postData['register_password'] != postData['register_confirm_password']:
errors['password_match'] = "Error: Password and confirm password do not match."
return errors
class WishManager(models.Manager):
def wish_validator(self,postData):
errors = {}
if len(postData['wish_for']) < 3:
errors['wish_for'] = "Error: I wish for must be at least 3 characters."
if len(postData['wish_desc']) < 3:
errors['wish_desc'] = "Error: Wish description must be at least 3 characters."
return errors
# Create your models here.
class User(models.Model):
first_name = CharField(max_length=255)
last_name = CharField(max_length=255)
email = EmailField(max_length=254)
pw_hash = CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
#wish_for =
#wish_granted =
#liked_wishes
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class Wish(models.Model):
item = CharField(max_length=255)
desc = TextField()
granted = BooleanField(default=False)
date_granted = DateTimeField(default=datetime.time(0,0))
wisher = models.ForeignKey(User, on_delete=models.CASCADE, related_name="wish_for") #OneToMany - One user can make many wishes, but each wish can only be made by one user
granted_by = models.ForeignKey(User, on_delete=models.CASCADE, related_name="wish_granted") #OneToMany - One user can grant many wishes, but each wish can only be granted by one user
like_by = models.ManyToManyField(User, related_name="liked_wishes") #ManyToMany - Many users can like a wish, and many wishes can be liked by a user
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = WishManager()
| true |
13648e6563d75805ac14d72078b2a397f5e1c851 | Python | Llmoment/House-Price | /model_train.py | UTF-8 | 3,102 | 2.546875 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torch.autograd as autograd
import numpy as np
from dlmodel import MLSTM
from data_org import *
from sklearn.metrics import r2_score
#定义超参数
BATCH_SIZE = 80
LR = 0.0005
EPOCH = 25
DATA_PATH = "house_pos.csv"
dataorg = DataOrganizer(DATA_PATH)
# 组织数据集
x_train,x_test,y_train,y_test = dataorg.load_data()
print(x_train.shape)
print(y_train.shape)
x_train = torch.from_numpy(x_train).float()
x_test = torch.from_numpy(x_test).float()
y_train = torch.from_numpy(y_train).float()
y_test = torch.from_numpy(y_test).float()
train_dataset = data.TensorDataset(x_train,y_train)
test_dataset = data.TensorDataset(x_test,y_test)
#按照batch_size分割训练集
train_loader = data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
r2_list = []
r2_list_test = []
model = MLSTM()
if torch.cuda.is_available():
model.cuda()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
for epoch in range(EPOCH):
model.train()
print("\n")
print('*' * 10)
print("epoch {}".format(epoch + 1))
print('*' * 10)
running_loss = 0.0
running_r2 = 0.0
plt_r2 = 0.0
#定义训练过程
for i, data in enumerate(train_loader, 1):
entrys, price = data
if torch.cuda.is_available():
entrys = entrys.cuda()
price = price.cuda()
out = model(entrys)
out = out.squeeze()
loss = criterion(out, price)
out_cpu = out.cpu().detach().numpy()
price_cpu = price.cpu().numpy()
#计算r2分数
r2score = r2_score(price_cpu, out_cpu)
running_r2 += r2score
plt_r2 += r2score
running_loss += loss.item()
if i % 20 == 0:
r2_list.append(plt_r2/20)
plt_r2 = 0.0
if i % 50 == 0:
print("Batch r2: {:.6f}".format(running_r2/i))
print("Batch loss: {:.6f}".format(running_loss /i))
optimizer.zero_grad()
loss.backward()
optimizer.step()
#模型评价
model.eval()
eval_loss = 0.
eval_r2 = 0.
for i, data in enumerate(test_loader, 1):
entrys, price = data
if torch.cuda.is_available():
entrys = entrys.cuda()
price = price.cuda()
out = model(entrys)
out = out.squeeze()
loss = criterion(out, price)
out_cpu = out.cpu().detach().numpy()
price_cpu = price.cpu().numpy()
r2score = r2_score(price_cpu, out_cpu)
eval_r2 += r2score
eval_loss += loss.item()
print("evaling module:")
print('*'*10)
print("eval r2: {:.6f}".format(eval_r2*BATCH_SIZE/len(test_dataset)))
print("The eval loss is: {:.6f}".format((eval_loss * BATCH_SIZE)/len(test_dataset)))
r2_list_test.append(eval_r2*BATCH_SIZE/len(test_dataset))
np.savetxt("r2_score.txt", r2_list)
np.savetxt("r2_score_test.txt", r2_list_test)
| true |
18519a0b552dba7d88e03d71ff228986c12548c4 | Python | PedroRamos360/PythonCourseUdemy | /kivy/aulas/Seção 9 - Tomada de Decisão/Exercicios/Ex11.py | UTF-8 | 262 | 3.65625 | 4 | [] | no_license | def verficiarDecimal(numero):
if type(numero) is float:
print("{} é decimal".format(numero))
else:
print("{} não é decimal".format(numero))
verficiarDecimal(9)
verficiarDecimal("banana")
verficiarDecimal(9.3)
verficiarDecimal(-0.1)
| true |
79a2175c8b64af0c3624eff5870ede6db03585ff | Python | DAC-hub-101/Software_Academy_Python_Course_Semester_2 | /lab5/TASK_regex_basics.py | UTF-8 | 565 | 2.921875 | 3 | [] | no_license | import re
# Задача:
# За нас, валиден емайл адрес е всеки низ, който отговаря на следното условие:
prefix@domain.tld
където
prefix е:
- поне 3 символа (които и да с)
domain:
-
"<at least 3 symbols>@<at least 1 letter>.<at least 3 letters>"
user_mail = "alabala@test.com"
# tel = r"\+359 [0-9]{8}"
pattern = r"[A-Z]{6}"
strings = [
"IVANOV", #
"Ivanov" # no
]
for str in strings:
res = re.search(pattern, str)
if res:
print(f"{str} => {res}")
else:
print("no Match")
| true |
efc0b1fc587680401efed42f2717d8be3b3ffc29 | Python | jakobj/process-text | /process-text | UTF-8 | 3,728 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""process-text
Usage:
process-text <config> [--verbose]
process-text --version
Options:
-h --help Show this screen.
--version Show version.
-v --verbose Tell me what's happening.
"""
import docopt
import json
import os
if __name__ == '__main__':
args = docopt.docopt(__doc__, version='0.0.1')
with open(args['<config>'], 'r') as f:
config = json.load(f)
# use only one order if none are given
if 'replace' in config and '0' not in config['replace']:
tmp = config['replace'].copy()
config['replace'].clear()
config['replace']['0'] = tmp
for fn in sorted(config.keys()):
if fn == 'replace': # skip global replacement entry
continue
# if no outputfile is given, write to filename + -mod
if 'structure' not in config[fn]:
basename, ext = os.path.splitext(fn)
config[fn]['structure'] = {'*': basename + '-mod' + ext}
# write to /dev/null if not default given
if '*' not in config[fn]['structure']:
config[fn]['structure']['*'] = '/dev/null'
# use only one order if none are given
if 'replace' in config[fn] and '0' not in config[fn]['replace']:
tmp = config[fn]['replace'].copy()
config[fn]['replace'].clear()
config[fn]['replace']['0'] = tmp
if args['--verbose']:
print('writing from {}[0] to {}'.format(fn, config[fn]['structure']['*']))
touched_files = set() # need to keep track of files already written to to not overwrite content
outfile = open(config[fn]['structure']['*'], 'w')
touched_files.add(config[fn]['structure']['*'])
with open(fn, 'r', errors='ignore') as f:
for linum, l in enumerate(f):
# set file pointer according to structure
for key in config[fn]['structure']:
if key != '*' and key in l:
if args['--verbose']:
print('writing from {}[{}] to {}:'.format(fn, linum, config[fn]['structure'][key]))
outfile.close()
# append if already written to, otherwise clear content
if config[fn]['structure'][key] in touched_files:
outfile = open(config[fn]['structure'][key], 'a')
else:
outfile = open(config[fn]['structure'][key], 'w')
touched_files.add(config[fn]['structure'][key])
# replace strings defined for this source file
if 'replace' in config[fn]:
for order in sorted(config[fn]['replace']):
for key in config[fn]['replace'][order]:
if key in l:
if args['--verbose']:
print(' {}[{}]:'.format(fn, linum), key, '->', config[fn]['replace'][order][key])
l = l.replace(key, config[fn]['replace'][order][key])
# replace strings defined globally
if 'replace' in config:
for order in sorted(config['replace']):
for key in config['replace'][order]:
if key in l:
if args['--verbose']:
print(' {}[{}]:'.format(fn, linum), key, '->', config['replace'][order][key])
l = l.replace(key, config['replace'][order][key])
# write line
outfile.write(l)
outfile.close()
| true |
3d14917940123d58dff22522aedddff8b54ce615 | Python | d-an/stats | /stats.py | UTF-8 | 3,417 | 2.90625 | 3 | [] | no_license | import statsmodels.api as sm
import patsy
import scipy
import numpy as np
def lm(formula, data):
"""
this function takes a patsy formula and
a pandas dataframe. the names of variables
in the formula are columns of the dataframe
"""
y, X = patsy.dmatrices(formula, data, return_type='dataframe')
results = sm.OLS(y, X).fit()
print(results.summary())
return results
def data(dataname = None, package = None, cache = False):
"""
loads R dataset called 'dataname' from package called 'package'
"""
#if dataname == None and data == None:
# from rpy2.robjects import r
# print(r.data())
return sm.datasets.get_rdataset(dataname = dataname, package = package, cache = cache).data
def submodel(model_formula, submodel_formula, data):
"""
specify model and submodel formulas and model data.
Function tests submodel using F test.
Returns the value of F and the pvalue of the test.
"""
y1, X1 = patsy.dmatrices(model_formula, data, return_type='dataframe')
y2, X2 = patsy.dmatrices(submodel_formula, data, return_type='dataframe')
model = sm.OLS(y1, X1).fit()
submodel = sm.OLS(y2, X2).fit()
F=((submodel.ssr-model.ssr)/(submodel.df_resid-model.df_resid))/model.mse_resid
df1, df2 = submodel.df_resid-model.df_resid, model.df_resid
pvalue = 1-scipy.stats.f.cdf(F, df1, df2)
message = """
Null hypothesis: submodel holds
F statistic: %(F)s
df1, df1 = %(df1)s, %(df2)s
p-value: %(pvalue)s
""" % {'F': F, 'df1': int(df1), 'df2': int(df2), 'pvalue': pvalue}
print(message)
return F, pvalue
def chisq_test(observed):
"""
performs a chi squared test of independence
on a contingency table (NumPy array). Returns
the test statistic and the p-value of the test.
"""
n, k = observed.shape
row = observed.sum(axis=0).reshape(1,-1)
col = observed.sum(axis=1).reshape(-1,1)
expected = np.dot(col, row)/observed.sum()
#chi2, pvalue = scipy.stats.mstats.chisquare(observed.ravel(), expected.ravel(), ddof = n+k-2)
chi2 = (((observed-expected)**2)/expected).sum()
pvalue = 1-scipy.stats.chi2.cdf(chi2, (n-1)*(k-1))
message = """
Performing the test of independence in a contingency table.
test statistic: %(chi2)s
degrees of freedom: %(df)s
p-value: %(pvalue)s
""" % {'chi2': chi2, 'df': (n-1)*(k-1), 'pvalue': pvalue}
print(message)
warning = """
Warning message:
Chi-squared approximation may be incorrect
"""
if expected.min() < 5:
print(warning)
return chi2, pvalue
def predict(L, formula, data, level=0.95, interval="prediction", model_matrix = False):
"""
L is either a model matrix or a data frame
of the same structure like the data argument.
formula and data describe the model.
interval: "prediction" of "confidence"
"""
y, X = patsy.dmatrices(formula, data, return_type='dataframe')
model = sm.OLS(y, X).fit()
if not model_matrix:
L = patsy.dmatrices(formula, L, return_type="matrix")[1] # same columns like the model matrix now
xtx_pinv = np.linalg.pinv(X.T.dot(X))
if interval=="confidence":
se = np.array([np.sqrt(model.mse_resid*vect.dot(xtx_pinv).dot(vect.T)) for vect in L])
else:
se = np.array([np.sqrt(model.mse_resid*(1+vect.dot(xtx_pinv).dot(vect.T))) for vect in L])
t = scipy.stats.t.ppf((level+1)/2, model.df_resid)
point_estimates = np.array([(vect*model.params).sum() for vect in L])
lower = point_estimates - t*se
upper = lower + 2*t*se
return np.hstack([lower.reshape(-1,1), upper.reshape(-1,1)])
| true |
38bc5601f465532e921e684c1ac84c01b2fc1c76 | Python | Kang-bh/Coding_test_with_Python | /4. 구현/4-3.py | UTF-8 | 305 | 3.234375 | 3 | [] | no_license | # 판
m = input()
move_x = ord(m[0]) - 96
move_y = int(m[1])
m = (move_x, move_y)
moves = [(2, 1), (2, -1), (1, 2), (1, -2), (-1, -2), (-1, 2), (-2, 1), (-2, -1)]
count = 0
for mo in moves:
if 9 > m[0] + mo[0] > 0 and 9 > m[1] + mo[1] > 0:
count += 1
else:
continue
print(count) | true |
1289dd25eaaacaaab9c910e8e1fca00ab2f1f983 | Python | CrimsonVista/UTAustin-Courses | /2021fa_cs361s/labs/lab5/newsapp/newslister/utils.py | UTF-8 | 1,366 | 2.546875 | 3 | [] | no_license | import urllib.parse as urlparse
from django.core.exceptions import BadRequest
from urllib.parse import (
quote as _quote, unquote as _unquote, urlencode as _urlencode,
)
def list_to_scope(scope):
"""Convert a list of scopes to a space separated string."""
return " ".join([str(s) for s in scope])
def scope_to_list(scope):
"""Convert a space separated string to a list of scopes."""
if isinstance(scope, (tuple, list, set)):
return [str(s) for s in scope]
elif scope is None:
return None
else:
return scope.strip().split(" ")
def encode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are encoded to
bytestrings using UTF-8.
"""
encoded = []
for k, v in params:
encoded.append((k.encode('utf-8'), v.encode('utf-8')))
return encoded
def urlencode(params):
utf8_params = encode_params_utf8(params)
urlencoded = _urlencode(utf8_params)
if isinstance(urlencoded, str):
return urlencoded
else:
return urlencoded.decode("utf-8")
def add_params_to_qs(query, params):
"""Extend a query with a list of two-tuples."""
if isinstance(params, dict):
params = params.items()
queryparams = urlparse.parse_qsl(query, keep_blank_values=True)
queryparams.extend(params)
return urlencode(queryparams) | true |
359fc665a7fb36507ad47d07c4b1a2afa0e69081 | Python | alkaf499/Softuni | /Python Advanced/Python OOP/Encapsulation - Lab/03. Profile/unitest.py | UTF-8 | 838 | 3.078125 | 3 | [] | no_license | import unittest
class Tests(unittest.TestCase):
def test_invalid_password(self):
with self.assertRaises(ValueError) as ve:
self.profile = Profile('My_username', 'My-password')
self.assertEqual(str(ve.exception), "The password must be 8 or more characters with at least 1 digit and 1 uppercase letter.")
def test_invalid_username(self):
with self.assertRaises(ValueError) as ve:
self.profile = Profile('Too_long_username', 'Any')
self.assertEqual(str(ve.exception), "The username must be between 5 and 15 characters.")
def test_correct_profile(self):
self.profile = Profile("Username", "Passw0rd")
self.assertEqual(str(self.profile), 'You have a profile with username: "Username" and password: ********')
if __name__ == "__main__":
unittest.main()
| true |
11d792b34f3b93136b18a2f01bf20a040f6d6631 | Python | marcovnyc/penguin-code | /Impractical-Python-Projects/chapter_8_9/missing_words_finder.py | UTF-8 | 3,123 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 17 16:29:46 2019
@author: toddbilsborough
Project 15 - Counting Syllables from Impractical Python Projects
Subproject - Missing words finder
Objective
- (overall) Write a Python program that counts the number of syllables in an
English word or phrase
- This program finds words missing from the CMU corpus and creates a
dictionary of the missing words and their syllable counts
Notes
- Ignored some of the error correction; this is for me to create a syllable
exception dictionary, and that file will be available in the repository
"""
import sys
from string import punctuation
import pprint
import json
from nltk.corpus import cmudict
cmudict = cmudict.dict() # Carnegie Mellon University Pronouncing Dictionary
def load_haiku(filename):
"""Open and return training corpus of haiku as a set.
Mostly copied from book"""
with open(filename) as in_file:
haiku = set(in_file.read().replace('-', ' ').split())
return haiku
def cmudict_missing(word_set):
"""Find and return words in word set missing from cmudict.
Mostly copied from book"""
exceptions = set()
for word in word_set:
word = word.lower().strip(punctuation)
if word.endswith("'s"):
word = word[:-2]
if word not in cmudict:
exceptions.add(word)
print("\nExceptions: ")
print(*exceptions, sep='\n')
print("\nNumber of unique words in haiku corpus = {}"
.format(len(word_set)))
print("Number of words in corpus not in cmudict = {}"
.format(len(exceptions)))
membership = (1 - (len(exceptions) / len(word_set))) * 100
print("CMUdict membership = {:.1f}{}".format(membership, '%'))
return exceptions
def make_exceptions_dict(exceptions_set):
"""Return dictionary of words and syllable counts from a set of words
Mostly copied from book, left out some of the error correcting stuff
for now"""
missing_words = {}
print("Input number of syllables in word")
for word in exceptions_set:
while True:
num_syllables = input("Enter number syllables in {}, x to exit: "
.format(word))
if num_syllables == 'x':
sys.exit()
if num_syllables.isdigit():
break
missing_words[word] = int(num_syllables)
print()
pprint.pprint(missing_words, width=1)
return missing_words
def save_exceptions(missing_words):
"""Save exceptions dictionary as json file"""
json_string = json.dumps(missing_words)
f = open('missing_words.json', 'w')
f.write(json_string)
f.close()
print("\nFile saved as missing_words.json")
def main():
"""Loads the training text. Finds exceptions. Makes a dictionary of
the words and their syllables. Saves.
Mostly copied from book"""
haiku = load_haiku('train.txt')
exceptions = cmudict_missing(haiku)
missing_words_dict = make_exceptions_dict(exceptions)
save_exceptions(missing_words_dict)
if __name__ == '__main__':
main()
| true |
01c2227a2c462acc37bd7eaa86e8e94785639eec | Python | nunoyu/JAQS | /jaqs/data/align.py | UTF-8 | 8,415 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | # encoding: utf-8
from __future__ import print_function
import numpy as np
import pandas as pd
def get_neareast(df_ann, df_value, date):
"""
Get the value whose ann_date is earlier and nearest to date.
Parameters
----------
df_ann : np.ndarray
announcement dates. shape = (n_quarters, n_securities)
df_value : np.ndarray
announcement values. shape = (n_quarters, n_securities)
date : np.ndarray
shape = (1,)
Returns
-------
res : np.array
The value whose ann_date is earlier and nearest to date. shape (n_securities)
"""
"""
df_ann.fillna(99999999, inplace=True) # IMPORTANT: At cells where no quarterly data is available,
# we know nothing, thus it will be filled nan in the next step
"""
mask = date[0] >= df_ann
# res = np.where(mask, df_value, np.nan)
n = df_value.shape[1]
res = np.empty(n, dtype=df_value.dtype)
# for each column, get the last True value
for i in range(n):
v = df_value[:, i]
m = mask[:, i]
r = v[m]
res[i] = r[-1] if len(r) else np.nan
return res
def align(df_value, df_ann, date_arr):
"""
Expand low frequency DataFrame df_value to frequency of data_arr using announcement date from df_ann.
Parameters
----------
df_ann : pd.DataFrame
DataFrame of announcement dates. shape = (n_quarters, n_securities)
df_value : pd.DataFrame
DataFrame of announcement values. shape = (n_quarters, n_securities)
date_arr : list or np.array
Target date array. dtype = int
Returns
-------
df_res : pd.DataFrame
Expanded DataFrame. shape = (n_days, n_securities)
"""
df_ann = df_ann.fillna(99999999).astype(int)
date_arr = np.asarray(date_arr, dtype=int)
res = np.apply_along_axis(lambda date: get_neareast(df_ann.values, df_value.values, date), 1, date_arr.reshape(-1, 1))
df_res = pd.DataFrame(index=date_arr, columns=df_value.columns, data=res)
return df_res
def demo_usage():
# -------------------------------------------------------------------------------------
# input and pre-process demo data
fp = '../output/test_align.csv'
raw = pd.read_csv(fp)
raw.columns = [u'symbol', u'ann_date', u'report_period', u'oper_rev', u'oper_cost']
raw.drop(['oper_cost'], axis=1, inplace=True)
idx_list = ['report_period', 'symbol']
raw_idx = raw.set_index(idx_list)
raw_idx.sort_index(axis=0, level=idx_list, inplace=True)
# -------------------------------------------------------------------------------------
# get DataFrames
df_ann = raw_idx.loc[pd.IndexSlice[:, :], 'ann_date']
df_ann = df_ann.unstack(level=1)
df_value = raw_idx.loc[pd.IndexSlice[:, :], 'oper_rev']
df_value = df_value.unstack(level=1)
# -------------------------------------------------------------------------------------
# get data array and align
# date_arr = ds.get_trade_date(20160325, 20170625)
date_arr = np.array([20160325, 20160328, 20160329, 20160330, 20160331, 20160401, 20160405, 20160406,
20160407, 20160408, 20160411, 20160412, 20160413, 20160414, 20160415, 20160418,
20160419, 20160420, 20160421, 20160422, 20160425, 20160426, 20160427, 20160428,
20160429, 20160503, 20160504, 20160505, 20160506, 20160509, 20160510, 20160511,
20160512, 20160513, 20160516, 20160517, 20160518, 20160519, 20160520, 20160523,
20160524, 20160525, 20160526, 20160527, 20160530, 20160531, 20160601, 20160602,
20160603, 20160606, 20160607, 20160608, 20160613, 20160614, 20160615, 20160616,
20160617, 20160620, 20160621, 20160622, 20160623, 20160624, 20160627, 20160628,
20160629, 20160630, 20160701, 20160704, 20160705, 20160706, 20160707, 20160708,
20160711, 20160712, 20160713, 20160714, 20160715, 20160718, 20160719, 20160720,
20160721, 20160722, 20160725, 20160726, 20160727, 20160728, 20160729, 20160801,
20160802, 20160803, 20160804, 20160805, 20160808, 20160809, 20160810, 20160811,
20160812, 20160815, 20160816, 20160817, 20160818, 20160819, 20160822, 20160823,
20160824, 20160825, 20160826, 20160829, 20160830, 20160831, 20160901, 20160902,
20160905, 20160906, 20160907, 20160908, 20160909, 20160912, 20160913, 20160914,
20160919, 20160920, 20160921, 20160922, 20160923, 20160926, 20160927, 20160928,
20160929, 20160930, 20161010, 20161011, 20161012, 20161013, 20161014, 20161017,
20161018, 20161019, 20161020, 20161021, 20161024, 20161025, 20161026, 20161027,
20161028, 20161031, 20161101, 20161102, 20161103, 20161104, 20161107, 20161108,
20161109, 20161110, 20161111, 20161114, 20161115, 20161116, 20161117, 20161118,
20161121, 20161122, 20161123, 20161124, 20161125, 20161128, 20161129, 20161130,
20161201, 20161202, 20161205, 20161206, 20161207, 20161208, 20161209, 20161212,
20161213, 20161214, 20161215, 20161216, 20161219, 20161220, 20161221, 20161222,
20161223, 20161226, 20161227, 20161228, 20161229, 20161230, 20170103, 20170104,
20170105, 20170106, 20170109, 20170110, 20170111, 20170112, 20170113, 20170116,
20170117, 20170118, 20170119, 20170120, 20170123, 20170124, 20170125, 20170126,
20170203, 20170206, 20170207, 20170208, 20170209, 20170210, 20170213, 20170214,
20170215, 20170216, 20170217, 20170220, 20170221, 20170222, 20170223, 20170224,
20170227, 20170228, 20170301, 20170302, 20170303, 20170306, 20170307, 20170308,
20170309, 20170310, 20170313, 20170314, 20170315, 20170316, 20170317, 20170320,
20170321, 20170322, 20170323, 20170324, 20170327, 20170328, 20170329, 20170330,
20170331, 20170405, 20170406, 20170407, 20170410, 20170411, 20170412, 20170413,
20170414, 20170417, 20170418, 20170419, 20170420, 20170421, 20170424, 20170425,
20170426, 20170427, 20170428, 20170502, 20170503, 20170504, 20170505, 20170508,
20170509, 20170510, 20170511, 20170512, 20170515, 20170516, 20170517, 20170518,
20170519, 20170522, 20170523, 20170524, 20170525, 20170526, 20170531, 20170601,
20170602, 20170605, 20170606, 20170607, 20170608, 20170609, 20170612, 20170613,
20170614, 20170615, 20170616, 20170619, 20170620, 20170621, 20170622, 20170623])
# df_res = align(df_ann, df_evaluate, date_arr)
# -------------------------------------------------------------------------------------
# demo usage of parser
parser = Parser()
# expr_formula = 'Delta(signal, 1) / Delay(signal,1)'
expr_formula = 'Delay(signal,0)'
expression = parser.parse(expr_formula)
df_res = parser.evaluate({'signal': df_value}, df_ann, date_arr)
# -------------------------------------------------------------------------------------
# print to validate results
sec = '600000.SH'
# print "\nValue:"
# print df_value.loc[:, sec]
print("\n======Expression Formula:\n{:s}".format(expr_formula))
print("\n======Report date, ann_date and evaluation value:")
tmp = pd.concat([df_ann.loc[:, sec], df_evaluate.loc[:, sec]], axis=1)
tmp.columns = ['ann_date', 'eval_value']
print(tmp)
print("\n======Selection of result of expansion:")
print("20161028 {:.4f}".format(df_res.loc[20161028, sec]))
print("20161031 {:.4f}".format(df_res.loc[20161031, sec]))
print("20170427 {:.4f}".format(df_res.loc[20170427, sec]))
print()
if __name__ == "__main__":
import time
t_start = time.time()
demo_usage()
t3 = time.time() - t_start
print("\n\n\nTime lapsed in total: {:.1f}".format(t3))
| true |
73498213b15fde624a01a3593a04c869d1c7436e | Python | smsrikanthreddy/InterviewBit | /binary_search/sqrt.py | UTF-8 | 943 | 3.453125 | 3 | [] | no_license | '''
def sqrt(A):
i = 1
min = 1
max = 1
while i < A:
min = max
i = max + 10
max = i
i = i * i
while min <= max:
j = min
if j * j == A:
return j
if j * j < A:
min = j + 1
if j * j > A:
max = j -1
if j * j < A and (j+1)*(j+1) > A:
return j
A = 122
#A = 11
#A = 273189320
#import pdb
#pdb.set_trace()
print('sqrt is:-', sqrt(A))
'''
#with more optimized binary search
def binsearch(start, end, A):
while start <= end:
mid = (start+end)//2
if mid * mid == A:
return mid
if mid*mid < A:
ans = mid
start = mid+1
else:
end = mid-1
return ans
def sqrt(A):
start = 0
end = A
if A==0 or A==1:
return A
else:
return binsearch(start, end, A)
print('sqrt is:-', sqrt(11))
| true |
772d1fc0daa3cf50dea21598df9f54c879d5267a | Python | mtleis/Bioinformatics-Specialisation-UC | /03_GenomeSequencing/01_IntroductionOverlap_DeBruijnPath/04_Overlap.py | UTF-8 | 1,856 | 3.359375 | 3 | [] | no_license | # Input: A collection Patterns of k - mers.
# Output: The overlap graph Overlap(Patterns), in the form of an adjacency list (dictionary).
from collections import defaultdict
def Overlap(patterns):
graph = defaultdict(list)
for pattern in patterns:
suffix = pattern[1:len(pattern)]
for nextpattern in patterns:
prefix = nextpattern[0:len(nextpattern)-1]
if suffix == prefix:
graph[pattern].append(nextpattern)
# Remove duplicate values
result = defaultdict(list)
for key, values in graph.items():
for value in values:
if value not in result[key]:
result[key].insert(0, value)
return result
dna = ['ACTG', 'CTGC', 'ATTC', 'CTGA', 'AAAA']
# print(*Overlap(dna), sep='\n')
result = Overlap(dna)
# Change format into XXX->YYY
formatted = []
for key, values in result.items():
line = key
for value in values:
if '->' in line:
line = line + ',' + value
else:
line = line + '->' + value
formatted.append(line)
print(*formatted, sep='\n')
for pattern, adjacencies in result.items():
if len(adjacencies) > 0:
print(pattern, '->', ','.join(adjacencies))
# Fetch Input
#inputDirectory = '/Users/tleis/PycharmProjects/BioInformaticsI/03_GenomeSequencing/dataset_198_10.txt'
#inputFile = open(inputDirectory, 'r')
#dna = list()
#lines = inputFile.readlines()
#for line in lines:
# line = line.replace('\n', '')
# dna.append(line)
#inputFile.close()
#print(*Overlap(dna), sep='\n')
# This code worked for submission
#import sys
#Input = sys.stdin.readlines()
#patternList = [pattern.strip() for pattern in Input]
#overlapList = Overlap(patternList)
#for pattern, adjacencies in overlapList.items():
# if len(adjacencies) > 0:
# print(pattern, '->', ','.join(adjacencies)) | true |
fa76be2ce84f0bd7f280dbbab041ebae15427913 | Python | rmp918/CPG | /models/net_sphere.py | UTF-8 | 10,733 | 3.296875 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn import Parameter
import math
import torchvision.models as models
class AngleLinear(nn.Module):
def __init__(self, in_features, out_features, m = 4):
super(AngleLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(in_features,out_features))
self.weight.data.uniform_(-1, 1).renorm_(2,1,1e-5).mul_(1e5)
self.m = m
self.mlambda = [
lambda x: x**0, # cos(0*theta)=1
lambda x: x**1, # cos(1*theta)=cos(theta)
lambda x: 2*x**2-1, # cos(2*theta)=2*cos(theta)**2-1
lambda x: 4*x**3-3*x, # cos(3*theta)=4*cos(theta)**3-3cos(theta)
lambda x: 8*x**4-8*x**2+1,
lambda x: 16*x**5-20*x**3+5*x
]
def forward(self, input):
# input为输入的特征,(B, C),B为batchsize,C为图像的类别总数
x = input # size=(B,F),F为特征长度,如512
w = self.weight # size=(F,Classnum) F=in_features Classnum=out_features
ww = w.renorm(2,1,1e-5).mul(1e5)
# 对w进行归一化,renorm使用L2范数对第1维度进行归一化,将大于1e-5的截断,乘以1e5,
# 使得最终归一化到1.如果1e-5设置的过大,裁剪时某些很小的值最终可能小于1
# 注意,第0维度只对每一行进行归一化(每行平方和为1),
# 第1维度指对每一列进行归一化。由于w的每一列为x的权重,因而此处需要对每一列进行归一化。
# 如果要对x归一化,需要对每一行进行归一化,此时第二个参数应为0
xlen = x.pow(2).sum(1).pow(0.5) # size=B
# 对输入x求平方,而后对不同列求和,再开方,得到每行的模,最终大小为第0维的,即B
# (由于对x不归一化,但是计算余弦时需要归一化,因而可以先计算模。
# 但是对于w,不太懂为何不直接使用这种方式,而是使用renorm函数?)
wlen = ww.pow(2).sum(0).pow(0.5) # size=Classnum
# 对权重w求平方,而后对不同行求和,再开方,得到每列的模
# (理论上之前已经归一化,此处应该是1,但第一次运行到此处时,并不是1,不太懂),最终大小为第1维的,即C
cos_theta = x.mm(ww) # size=(B,Classnum)
# 矩阵相乘(B,F)*(F,C)=(B,C),得到cos值,由于此处只是乘加,故未归一化
cos_theta = cos_theta / xlen.view(-1,1) / wlen.view(1,-1)
# 对每个cos值均除以B和C,得到归一化后的cos值
cos_theta = cos_theta.clamp(-1,1)
# 将cos值截断到[-1,1]之间,理论上不截断应该也没有问题,毕竟w和x都归一化后,cos值不可能超出该范围
# ------------------------------------------------
cos_m_theta = self.mlambda[self.m](cos_theta)
# 通过cos_theta计算cos_m_theta,mlambda为cos_m_theta展开的结果
theta = Variable(cos_theta.data.acos())
# 通过反余弦,计算角度theta,(B,C)
k = (self.m*theta/3.14159265).floor()
# 通过公式,计算k,(B,C)。此处为了保证theta大于k*pi/m,转换过来就是m*theta/pi,再向上取整
n_one = k*0.0 - 1
# 通过k的大小,得到同样大小的-1矩阵,(B,C)
phi_theta = (n_one**k) * cos_m_theta - 2*k
# 通过论文中公式,得到phi_theta。(B,C)
# --------------------------------------------
cos_theta = cos_theta * xlen.view(-1,1)
# 由于实际上不对x进行归一化,此处cos_theta需要乘以B。(B,C)
phi_theta = phi_theta * xlen.view(-1,1)
# 由于实际上不对x进行归一化,此处phi_theta需要乘以B。(B,C)
output = (cos_theta,phi_theta)
return output # size=(B,Classnum,2)
class AngleLoss(nn.Module):
def __init__(self, gamma=0):
super(AngleLoss, self).__init__()
self.gamma = gamma
self.it = 0
self.LambdaMin = 5.0
self.LambdaMax = 1500.0
self.lamb = 1500.0
def forward(self, input, target):
self.it += 1
cos_theta,phi_theta = input # cos_theta,(B,C)。 phi_theta,(B,C)
target = target.view(-1,1) #size=(B,1)
index = cos_theta.data * 0.0 #size=(B,Classnum)
# 得到和cos_theta相同大小的全0矩阵。(B,C)
index.scatter_(1,target.data.view(-1,1),1)
# 得到一个one-hot矩阵,第i行只有target[i]的值为1,其他均为0
index = index.byte()# index为float的,转换成byte类型
index = Variable(index)
self.lamb = max(self.LambdaMin,self.LambdaMax/(1+0.1*self.it )) # 得到lamb
output = cos_theta * 1.0 #size=(B,Classnum)
# 如果直接使用output=cos_theta,可能不收敛(未测试,但其他程序中碰到过直接对输入使用[index]无法收敛,加上*1.0可以收敛的情况)
output[index] -= cos_theta[index]*(1.0+0)/(1+self.lamb)# 此行及下一行将target[i]的值通过公式得到最终输出
output[index] += phi_theta[index]*(1.0+0)/(1+self.lamb)
logpt = F.log_softmax(output,dim=1) # 得到概率, ..I change this line (dim=1)
logpt = logpt.gather(1,target) # 下面为交叉熵的计算(和focal loss的计算有点类似,当gamma为0时,为交叉熵)。
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp()) # ln(e) = 1
loss = -1 * (1-pt)**self.gamma * logpt
loss = loss.mean()
# target = target.view(-1) # 若要简化,理论上可直接使用这两行计算交叉熵(此处未测试,在其他程序中使用后可以正常训练)
# loss = F.cross_entropy(cos_theta, target)
return loss
class sphere(nn.Module):
def __init__(self,embedding_size,classnum,feature=False):
super(sphere, self).__init__()
self.embedding_size = embedding_size
self.classnum = classnum
self.feature = feature
#input = B*3*112*112
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*56
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*28
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*28
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*14
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*14
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*14
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*14
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*7
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*7,self.embedding_size)
self.fc6 = AngleLinear(self.embedding_size,self.classnum)
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x.view(x.size(0),-1)
x = self.fc5(x)
#x = self.l2_norm(x)
if self.feature:
return x
x = self.fc6(x)
return x
class sphereVGG(nn.Module):
def __init__(self,embedding_size,classnum,feature=False):
super(sphereVGG, self).__init__()
self.embedding_size = embedding_size
self.classnum = classnum
self.feature = feature
# load feature extractor from vgg16_bn pretrained-model
#self.vgg16_bn_feat_extractor = models.vgg16_bn(pretrained=False).features
self.vgg16_bn_feat_extractor = nn.Sequential(*list(models.vgg16_bn(pretrained=False).features))
# concatenate the embedding layer
self.fc5 = nn.Linear(512*5*5,self.embedding_size)
#self.fc6 = AngleLinear(self.embedding_size,self.classnum)
self.fc6 = nn.Linear(self.embedding_size,self.classnum)
def l2_norm(self,input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def forward(self, x):
x = self.vgg16_bn_feat_extractor(x)
x = x.view(x.size(0),-1)
x = self.fc5(x)
#x = self.l2_norm(x)
if self.feature:
return x
x = self.fc6(x)
return x | true |
a8d607786959eb653c8e692372ed6d6631bc4c78 | Python | gistable/gistable | /dockerized-gists/4049625/snippet.py | UTF-8 | 5,407 | 2.546875 | 3 | [
"MIT"
] | permissive | import os
import sys
import pickle
import console
# I moved 'dropboxlogin' into a sub folder so it doesn't clutter my main folder
sys.path += [os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib')]
import dropboxlogin # this code can be found here https://gist.github.com/4034526
STATE_FILE = '.dropbox_state'
class dropbox_state:
def __init__(self):
self.cursor = None
self.local_files = {}
self.remote_files = {}
# use ignore_path to prevent download of recently uploaded files
def execute_delta(self, client, ignore_path = None):
delta = client.delta(self.cursor)
self.cursor = delta['cursor']
for entry in delta['entries']:
path = entry[0][1:]
meta = entry[1]
# this skips the path if we just uploaded it
if path != ignore_path:
if meta != None:
path = meta['path'][1:] # caps sensitive
if meta['is_dir']:
print '\n\tMaking Directory:',path
self.makedir_local(path)
elif path not in self.remote_files:
print '\n\tNot in local'
self.download(client, path)
elif meta['rev'] != self.remote_files[path]['rev']:
print '\n\tOutdated revision'
self.download(client, path)
# remove file or directory
else:
if os.path.isdir(path):
print '\n\tRemoving Directory:', path
os.removedirs(path)
elif os.path.isfile(path):
print '\n\tRemoving File:', path
os.remove(path)
del self.local_files[path]
del self.remote_files[path]
else:
pass # file already doesn't exist localy
# makes dirs if necessary, downloads, and adds to local state data
def download(self, client, path):
print '\tDownloading:', path
# TODO: what if there is a folder there...?
head, tail = os.path.split(path)
# make the folder if it doesn't exist yet
if not os.path.exists(head) and head != '':
os.makedirs(head)
#open file to write
local = open(path,'w')
remote, meta = client.get_file_and_metadata(os.path.join('/',path))
local.write(remote.read())
#clean up
remote.close()
local.close()
# add to local repository
self.local_files[path] = {'modified': os.path.getmtime(path)}
self.remote_files[path] = meta
def upload(self, client, path):
print '\tUploading:', path
local = open(path,'r')
meta = client.put_file(os.path.join('/',path), local, True)
local.close()
self.local_files[path] = {'modified': os.path.getmtime(path)}
self.remote_files[path] = meta
# clean out the delta for the file upload
self.execute_delta(client, ignore_path=meta['path'])
def delete(self, client, path):
print '\tFile deleted locally. Deleting on Dropbox:',path
try:
client.file_delete(path)
except:
# file was probably already deleted
print '\tFile already removed from Dropbox'
del self.local_files[path]
del self.remote_files[path]
# safely makes local dir
def makedir_local(self,path):
if not os.path.exists(path): # no need to make a dir that exists
os.makedirs(path)
elif os.path.isfile(path): # if there is a file there ditch it
os.remove(path)
del self.files[path]
os.makedir(path)
# recursively list files on dropbox
def _listfiles(self, client, path = '/'):
meta = client.metadata(path)
filelist = []
for item in meta['contents']:
if item['is_dir']:
filelist += self._listfiles(client,item['path'])
else:
filelist.append(item['path'])
return filelist
def download_all(self, client, path = '/'):
filelist = self._listfiles(client)
for file in filelist:
self.download(client, file[1:]) # trim root slash
def check_state(self, client, path):
# lets see if we've seen it before
if path not in self.local_files:
# upload it!
self.upload(client, path)
elif os.path.getmtime(path) > self.local_files[path]['modified']:
# newer file than last sync
self.upload(client, path)
else:
pass # looks like everything is good
def loadstate():
fyle = open(STATE_FILE,'r')
state = pickle.load(fyle)
fyle.close()
return state
def savestate(state):
fyle = open(STATE_FILE,'w')
pickle.dump(state,fyle)
fyle.close()
if __name__ == '__main__':
console.show_activity()
print """
****************************************
* Dropbox File Syncronization *
****************************************"""
client = dropboxlogin.get_client()
print '\nLoading local state'
# lets see if we can unpickle
try:
state = loadstate()
except:
print '\nCannot find state file. ***Making new local state***'
# Aaaah, we have nothing, probably first run
state = dropbox_state()
print '\nDownloading everything from Dropbox'
# no way to check what we have locally is newer, gratuitous dl
state.download_all(client)
print '\nUpdating state from Dropbox'
state.execute_delta(client)
print '\nChecking for new or updated local files'
# back to business, lets see if there is anything new or changed localy
filelist = []
for root, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename != STATE_FILE:
filelist.append( os.path.join(root, filename)[2:])
for file in filelist:
state.check_state(client,file)
print '\nChecking for deleted local files'
old_list = state.local_files.keys()
for file in old_list:
if file not in filelist:
state.delete(client, file)
print '\nSaving local state'
savestate(state)
print '\nSync complete'
| true |
c66d0ac343d30012f47ec226f7c6cca203ef827f | Python | arbuz-team/medinox | /server/service/searcher/product/sort_accuracy.py | UTF-8 | 3,270 | 2.75 | 3 | [] | no_license | from server.manage.switch.website.base import *
from server.ground.product.models import *
class Product_Content:
def Get_Product_Descriptions(self):
result = ''
for description in SQL.Filter(
Model_Description, product=self.product):
result += description.header
result += description.paragraph
return result.lower()
def Calculate_Priority(self, word):
word = word.lower()
accuracy = 0
accuracy += self.name.count(word) * 10
accuracy += self.brand.count(word) * 3
accuracy += self.description.count(word)
return accuracy
def __init__(self, product):
self.product = product
self.name = product.name.lower()
self.brand = product.brand.name.lower() if product.brand else ''
self.description = self.Get_Product_Descriptions()
class Sort_List:
def Generate_Priority_List(self):
priority_list = []
for element in self.sort_list:
# create vars
product_id = element[0]
content = element[1]
priority = 0
# calculate priority for all words
for word in self.words:
priority += content.Calculate_Priority(word)
# create list element
priority_list.append((priority, product_id))
return priority_list
def Create_List(self):
for product in self.products:
element = (
product.pk, # repetitions set range
Product_Content(product)
)
self.sort_list.append(element)
def __init__(self, products, words):
self.sort_list = []
self.products = products
self.words = words
self.Create_List()
class Sort_By_Accuracy(Base):
def __Get_Sorted_Products(self, sorted_priority_list):
# get index column from priority list
index_list = [e[1] for e in sorted_priority_list]
sorting = ''
# create sorting extra query
for index_to, index_from in enumerate(index_list):
sorting += 'WHEN id={0} THEN {1} '.format(index_from, index_to)
sorting = 'CASE {0} END'.format(sorting)
# get sorted product from database
return SQL.Filter(Model_Product, pk__in=self.products).extra(
select={'sorting': sorting}, order_by=['sorting'])
def Sort(self):
# have no words
if not self.words:
return self.products
# have no products
if not self.products:
return self.products
# generate priority list
sort_list = Sort_List(self.products, self.words)
priority_list = sort_list.Generate_Priority_List()
# sort products
direction = self.request.session['searcher_sort_direction']
if direction == 'descending': priority_list.sort(reverse=True)
if direction == 'ascending': priority_list.sort()
# select sorted products from database
return self.__Get_Sorted_Products(priority_list)
def __init__(self, search_engine):
Base.__init__(self, search_engine)
self.products = search_engine.products
self.words = search_engine.words
| true |
c51f78ba501953da7c12015f96a2b0827f9973a9 | Python | lgtateos/gis540 | /example_scripts/buffer_clip.py | UTF-8 | 518 | 2.609375 | 3 | [] | no_license | #buffer_clip.py (hard-coded version)
#Purpose: Buffer a zone and use it to clip another file
import arcpy, sys
arcpy.env.overwriteOutput = True
arcpy.env.workspace = "C:/Temp/"
# Set buffer params
fireDamage = "special_regions.shp"
fireBuffer = fireDamage[:-4] + "_buffer.shp"
bufferDist = "1 mile"
# Set clip params
park = "COVER63p.shp"
clipOutput = park[:-4] + "_damageBuffer.shp"
arcpy.Buffer_analysis(fireDamage, fireBuffer,bufferDist)
arcpy.Clip_analysis(park, fireBuffer, clipOutput ) | true |
c1899fc2373814fe4734e0b51ea67729905bff0c | Python | KareliaConsolidated/Pandas | /Pandas_Tricks/26_PyCon_2019.py | UTF-8 | 10,857 | 2.5625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
ted = pd.read_csv('Datasets/ted.csv')
print(ted.head())
# print(pd.show_versions())
print(ted.shape) # (2550, 17)
print(ted.isna().sum())
# comments 0
# description 0
# duration 0
# event 0
# film_date 0
# languages 0
# main_speaker 0
# name 0
# num_speaker 0
# published_date 0
# ratings 0
# related_talks 0
# speaker_occupation 6 <-
# tags 0
# title 0
# url 0
# views 0
# dtype: int64
print(ted.dtypes)
# comments int64
# description object
# duration int64
# event object
# film_date int64
# languages int64
# main_speaker object
# name object
# num_speaker int64
# published_date int64
# ratings object
# related_talks object
# speaker_occupation object
# tags object
# title object
# url object
# views int64
# dtype: object
##### WHICH TALKS PROVOKE THE MOST ONLINE DISCUSSION ?
# Consider the limitations and biases of your data when analyzing it.
print(ted.sort_values('comments').tail())
# comments description ... url views
# 1787 2673 Our consciousness is a fundamental aspect of o... ... https://www.ted.com/talks/david_chalmers_how_d... 2162764
# 201 2877 Jill Bolte Taylor got a research opportunity f... ... https://www.ted.com/talks/jill_bolte_taylor_s_... 21190883
# 644 3356 Questions of good and evil, right and wrong ar... ... https://www.ted.com/talks/sam_harris_science_c... 3433437
# 0 4553 Sir Ken Robinson makes an entertaining and pro... ... https://www.ted.com/talks/ken_robinson_says_sc... 47227110
# 96 6404 Richard Dawkins urges all atheists to openly s... ... https://www.ted.com/talks/richard_dawkins_on_m... 4374792
ted['comments_per_view'] = ted.comments / ted.views
print(ted.sort_values('comments_per_view').tail())
# comments description ... views comments_per_view
# 954 2492 Janet Echelman found her true voice as an arti... ... 1832930 0.001360
# 694 1502 Filmmaker Sharmeen Obaid-Chinoy takes on a ter... ... 1057238 0.001421
# 96 6404 Richard Dawkins urges all atheists to openly s... ... 4374792 0.001464
# 803 834 David Bismark demos a new system for voting th... ... 543551 0.001534
# 744 649 Hours before New York lawmakers rejected a key... ... 292395 0.002220
ted['views_per_comment'] = ted.views / ted.comments
print(ted.sort_values('views_per_comment').head())
# comments description duration ... views comments_per_view views_per_comment
# 744 649 Hours before New York lawmakers rejected a key... 453 ... 292395 0.002220 450.531587
# 803 834 David Bismark demos a new system for voting th... 422 ... 543551 0.001534 651.739808
# 96 6404 Richard Dawkins urges all atheists to openly s... 1750 ... 4374792 0.001464 683.134291
# 694 1502 Filmmaker Sharmeen Obaid-Chinoy takes on a ter... 489 ... 1057238 0.001421 703.886818
# 954 2492 Janet Echelman found her true voice as an arti... 566 ... 1832930 0.001360 735.525682
##### VISUALIZE THE DISTRIBUTION OF COMMENTS
# 1. Choose your plot type based on the question you are answering and the data type(s) you are working with.
# 2. Use pandas as one-liners to iterate through plots quickly
# 3. Try modifing the plot defaults
# 4. Creating Plots invloves decision-making
print(ted[ted.comments >= 1000].shape) # 32,9
################# WAY 01 #################
# ted[ted.comments < 1000].comments.plot(kind='hist')
# OR
################# WAY 02 #################
# ted.query('comments < 1000').comments.plot(kind='hist')
# OR
################# WAY 03 #################
ted.loc[ted.comments < 1000, 'comments'].plot(kind='hist',bins=20)
# plt.show()
# PLOT THE NUMBER OF TALKS THAT TOOK PLACE EACH YEAR
# 1. Read the Documentation
# 2. Use the datetime data type for dates and times
# 3. Check your work as you go
# 4. Consider excluding data if it might not be relevant
print(ted.event.sample(10)) # Random 10 Samples
print(ted.film_date.head()) # Unix TimeStamp
# 0 1140825600
# 1 1140825600
# 2 1140739200
# 3 1140912000
# 4 1140566400
# Name: film_date, dtype: int64
print(pd.to_datetime(ted.film_date, unit='s').head())
# 0 2006-02-25
# 1 2006-02-25
# 2 2006-02-24
# 3 2006-02-26
# 4 2006-02-22
# Name: film_date, dtype: datetime64[ns]
ted['film_datetime'] = pd.to_datetime(ted.film_date, unit='s')
print(ted[['event','film_datetime']].sample(5))
# event film_datetime
# 2140 TEDxCreativeCoast 2015-05-01
# 1906 TEDGlobal 2014 2014-10-06
# 148 TED2007 2007-03-03
# 1202 TEDxChange 2012-04-05
# 1492 TED2013 2013-02-26
print(ted.film_datetime.dt.year.head()) #dayofyear
# 0 2006
# 1 2006
# 2 2006
# 3 2006
# 4 2006
# Name: film_datetime, dtype: int64
print(ted.film_datetime.dt.year.value_counts().sort_index().plot())
# 2013 270
# 2011 270
# 2010 267
# 2012 267
# 2016 246
# 2015 239
# 2014 237
# 2009 232
# 2007 114
# 2017 98
# 2008 84
# 2005 66
# 2006 50
# 2003 33
# 2004 33
# 2002 27
# 1998 6
# 2001 5
# 1983 1
# 1991 1
# 1994 1
# 1990 1
# 1984 1
# 1972 1
# Name: film_datetime, dtype: int64
# plt.show()
##### What were the "BEST" events in TED history to attend ?
print(ted.event.value_counts().head())
print(ted.groupby('event').views.agg(['count','mean','sum']).sort_values('sum').tail())
# TEDxNorrkoping 6569493.0
# TEDxCreativeCoast 8444981.0
# TEDxBloomington 9484259.5
# TEDxHouston 16140250.5
# TEDxPuget Sound 34309432.0 <-
# Name: views, dtype: float64
# TEDxPuget Sound have 34 Mil. Views Per Talks
# count mean sum
# event
# TED2006 45 3.274345e+06 147345533
# TED2015 75 2.011017e+06 150826305
# TEDGlobal 2013 66 2.584163e+06 170554736
# TED2014 84 2.072874e+06 174121423
# TED2013 77 2.302700e+06 177307937
###### UNPACK THE RATINGS DATA
print(ted.ratings.head())
# Looking at First Row
print(ted.loc[0, 'ratings']) or print(ted.ratings[0])
#[{'id': 7, 'name': 'Funny', 'count': 19645}, {'id': 1, 'name': 'Beautiful', 'count': 4573}, {'id': 9, 'name': 'Ingenious', 'count': 6073}, {'id': 3, 'name': 'Courageous', 'count': 3253}, {'id': 11, 'name': 'Longwinded', 'count': 387}, {'id': 2, 'name': 'Confusing', 'count': 242}, {'id': 8, 'name': 'Informative', 'count': 7346}, {'id': 22, 'name': 'Fascinating', 'count': 10581}, {'id': 21, 'name': 'Unconvincing', 'count': 300}, {'id': 24, 'name': 'Persuasive', 'count': 10704}, {'id': 23, 'name': 'Jaw-dropping', 'count': 4439}, {'id': 25, 'name': 'OK', 'count': 1174}, {'id': 26, 'name': 'Obnoxious', 'count': 209}, {'id': 10, 'name': 'Inspiring', 'count': 24924}]
print(type(ted.ratings[0])) # str - String Representation of Dictionary
import ast # Abstract Syntax Tree, to unpack string of dictionary
print(type(ast.literal_eval('[1,2,3]'))) # List
def str_to_list(ratings_str):
return ast.literal_eval(ratings_str)
print(str_to_list(ted.ratings[0]))
# [{'id': 7, 'name': 'Funny', 'count': 19645}, {'id': 1, 'name': 'Beautiful', 'count': 4573}, {'id': 9, 'name': 'Ingenious', 'count': 6073}, {'id': 3, 'name': 'Courageous', 'count': 3253}, {'id': 11, 'name': 'Longwinded', 'count': 387}, {'id': 2, 'name': 'Confusing', 'count': 242}, {'id': 8, 'name': 'Informative', 'count': 7346}, {'id': 22, 'name': 'Fascinating', 'count': 10581}, {'id': 21, 'name': 'Unconvincing', 'count': 300}, {'id': 24, 'name': 'Persuasive', 'count': 10704}, {'id': 23, 'name': 'Jaw-dropping', 'count': 4439}, {'id': 25, 'name': 'OK', 'count': 1174}, {'id': 26, 'name': 'Obnoxious', 'count': 209}, {'id': 10, 'name': 'Inspiring', 'count': 24924}]
print(ted.ratings.apply(str_to_list).head())
print(ted.ratings.apply(ast.literal_eval).head())
ted['ratings_list']=ted.ratings.apply(lambda x: ast.literal_eval(x))
# 0 [{'id': 7, 'name': 'Funny', 'count': 19645}, {...
# 1 [{'id': 7, 'name': 'Funny', 'count': 544}, {'i...
# 2 [{'id': 7, 'name': 'Funny', 'count': 964}, {'i...
# 3 [{'id': 3, 'name': 'Courageous', 'count': 760}...
# 4 [{'id': 9, 'name': 'Ingenious', 'count': 3202}...
# Name: ratings, dtype: object
print(type(ted['ratings_list'][0])) # list
#### COUNT THE TOTAL NUMBER OF RATINGS RECEIVED BY EACH TALK
# New column named "num_ratings"
# Bonus:
# For each talk, calculate the percentage of ratings that were negative.
# For each talk, calculate the average number of ratings it received per day since it was published.
print(ted.ratings_list[0])
def get_num_ratings(list_of_dicts):
num = 0
for d in list_of_dicts:
num += d['count']
return num
ted['num_ratings'] = ted.ratings_list.apply(get_num_ratings)
print(ted.num_ratings.describe())
# count 2550.000000
# mean 2436.408235
# std 4226.795631
# min 68.000000
# 25% 870.750000
# 50% 1452.500000
# 75% 2506.750000
# max 93850.000000
# Name: num_ratings, dtype: float64
def get_negative_ratings(list_of_dicts):
neg_list = ['Ingenious','Longwinded','Confusing','Unconvincing','Obnoxious']
num = 0
for d in list_of_dicts:
if d['name'] in neg_list:
num += d['count']
return num
def calculate_percentage_neg(list_of_dicts):
return get_negative_ratings(list_of_dicts) / get_num_ratings(list_of_dicts) * 100
print(ted.ratings_list.apply(calculate_percentage_neg))
##### Which occupations deliver the funniest TED talks on average?
# Bonus :
# For each talk, calculate the most frequent rating
# For each talk, clean the occupation data so that there's only once occupation per talk
def get_funny_ratings(list_of_dicts):
num = 0
for d in list_of_dicts:
if d['name'] == "Funny":
num += d['count']
return num
ted['funny_ratings'] = ted.ratings_list.apply(get_funny_ratings)
print(ted['funny_ratings'].head())
def calculate_percentage_funny(list_of_dicts):
return get_funny_ratings(list_of_dicts) / get_num_ratings(list_of_dicts) * 100
ted['funny_rate'] = ted.ratings_list.apply(calculate_percentage_funny)
print(ted.funny_rate.head())
print(ted.sort_values('funny_rate').speaker_occupation.tail(10)) | true |
dc85933a15bd0d65d7f2a39aaf30621e7cf58e6b | Python | achudy/JPWP_chudy | /dziurawy_komunikator-master/backend/resources/conversation.py | UTF-8 | 3,347 | 2.546875 | 3 | [] | no_license | from flask_restful import Resource, reqparse
from flask_jwt_extended import jwt_required, get_jwt_identity
from flask_socketio import SocketIO, emit
from datetime import datetime
import uuid
from models.conversation import ConversationModel
from models.conversationInfo import ConversationInfoModel
def maxLength(s):
maxContentLength = 300
minContentLength = 1
if len(s) > maxContentLength:
raise ValueError(
"Maximum length of content is {} characters.".format(maxContentLength))
if len(s) < minContentLength:
raise ValueError(
"Minimum length of content is {} characters.".format(minContentLength))
return s
class MessageSender(Resource):
def __init__(self,socket):
self.socket = socket;
parser_post = reqparse.RequestParser()
parser_post.add_argument(
"content", type=maxLength, required=True, help="Field Cannot be blank!"
)
@jwt_required
def post(self, conversation_id):
member_id = get_jwt_identity()
data = MessageSender.parser_post.parse_args()
content = data['content']
if not ConversationModel.check_if_user_is_a_member(
member_id, conversation_id):
return {'message': 'Conversation does not exist or you are not a member.'}, 403
date = datetime.utcnow()
conv = ConversationModel(conversation_id, member_id, date, content)
conv.save_to_db()
update(conversation_id, content, member_id, date)
self.socket.emit('newMessage', conversation_id, broadcast=True)
return {'message': "Message sucessfully sent!"},201
class MessagesFinder(Resource):
@jwt_required
def get(self, conversation_id, last_message_id):
member_id = get_jwt_identity()
if not ConversationModel.check_if_user_is_a_member(
member_id, conversation_id):
return {'message': 'Conversation does not exist or you are not a member.'}, 403
new_messages = ConversationModel.find_by_conversation_id_and_last_message_id(conversation_id,last_message_id)
return {
"new_messages" : [new_message.messageJson() for new_message in new_messages]
}
class ConversationList(Resource):
@jwt_required
def get(self):
memberId = get_jwt_identity()
convs = ConversationModel.find_by_member_id(memberId)
return {'conversations': [conv.conversationJson() for conv in convs]}
def create_conversation(member_id_1, member_id_2):
date = datetime.utcnow()
_id = uuid.uuid4().hex
conv_info = ConversationInfoModel(_id,2, date)
conv1 = ConversationModel(_id,member_id_1, date, None)
conv2 = ConversationModel(_id,member_id_2, date, None)
conv1.save_to_db()
conv2.save_to_db()
conv_info.save_to_db()
# updates conversationInfoModel table, method called when new message is sent
def update(_id, last_message, last_message_user_id, sent_on, member_count=None):
conv = ConversationInfoModel.find_by_id(_id)
conv.message_count = conv.message_count + 1
conv.last_message = last_message
conv.last_message_user_id = last_message_user_id
if(member_count):
conv.member_count = member_count
conv.last_message_sent_on = sent_on
conv.save_to_db() | true |
b3904fa2953b74b3bd6ed78d03c1bd44c878eeb1 | Python | ashish-5209/TreeDataStructure | /reverseLevelOrder.py | UTF-8 | 730 | 3.765625 | 4 | [] | no_license | from queue import Queue
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def reverseOrder(root):
if root is None:
return
q = Queue()
lis = []
q.put(root)
while q.empty() == False:
temp = q.get()
lis.append(temp)
if temp.right:
q.put(temp.right)
if temp.left:
q.put(temp.left)
while len(lis) != 0:
temp = lis.pop()
print(temp.data, end=" ")
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
root.left.right = Node(5)
root.right.left = Node(6)
root.right.right = Node(7)
reverseOrder(root)
| true |
b1479bc095162c56fc040b6ceb06504e703f023d | Python | wzk1015/Data-Structure | /homework1/3.py | UTF-8 | 394 | 3.359375 | 3 | [] | no_license | s = input()
s = s.split()
for i in range(len(s)):
s[i] = s[i].lower()
if i == 0:
s[i] = s[i].capitalize()
elif s[i-1][-1] == '.':
s[i] = s[i].capitalize()
s = " ".join(s)
s = s.split(' i,')
s = ' I,'.join(s)
s = s.split(' i ')
s = ' I '.join(s)
s = s.split(',i,')
s = ',I,'.join(s)
s = s.split(',i ')
s = ',I '.join(s)
print(s)
| true |
169b83db5a5330651df8be52dec15ce566cfe269 | Python | bruna/lab-de-programacao | /bruna_vasconcelos_E9.py | UTF-8 | 2,905 | 2.953125 | 3 | [] | no_license | class No:
def __init__(self, data):
self.data = data
self.nextNo = None
def getData(self):
return self.data
def setData(self,data):
self.data=data
def getNextNo(self):
return self.nextNo
def setNextNo(self,newNo):
self.nextNo = newNo;
class Lista:
def __init__(self):
self.primNo = None
self.ultNo = None
def isEmpty(self):
if self.primNo == None and self.ultNo == None:
return True
return False
def inserirComeco(self,valor):
novoNo = No(valor)
if self.isEmpty():
self.primNo = self.ultNo = novoNo
else:
novoNo.setNextNo(self.primNo)
self.primNo = novoNo
def removerFim(self):
if self.isEmpty():
return
ultNoValor = self.ultNo.getData()
if self.primNo is self.ultNo:
self.primNo = self.ultNo = None
else:
NoAtual = self.primNo
while NoAtual.getNextNo() is not self.ultNo:
NoAtual = NoAtual.getNextNo()
NoAtual.setNextNo(None)
self.ultNo = NoAtual
return ultNoValor
def imprime(self):
ult = self.ultNo
if self.isEmpty():
fecha.write("0 ")
if ult != None:
string = ult.getData()+" " ##Apenas o primeiro de cada fila, que no caso vai ser o ultimo elemento
fecha.write(string)
return None
import sys
abre = open(sys.argv[1],"r")
fecha = open(sys.argv[2],"w")
numCasos = abre.readline()
for nCaso in range(int(numCasos)):
qtComandos = abre.readline()
caso = str(nCaso+1)
fecha.write("Caso "+caso+":\n")
preferencial = Lista()
regular = Lista()
for i in range(int(qtComandos)):
comando = abre.readline().split()
if comando[0] == "I":
regular.imprime()
preferencial.imprime()
fecha.write("\n")
elif comando[0] == "A":
if regular.isEmpty() == True:
preferencial.removerFim()
regular.removerFim()
elif comando[0] == "B":
if preferencial.isEmpty() == True:
regular.removerFim()
preferencial.removerFim()
else:
if len(comando) > 1:
codigoPessoa = comando[1]
if comando[0] == "p":
preferencial.inserirComeco(codigoPessoa)
else:
regular.inserirComeco(codigoPessoa)
abre.close()
fecha.close()
| true |
843645b0a924d46b345b46044429d47324833683 | Python | kddor/PythonAction | /face_to_offer/13FindKthToTail.py | UTF-8 | 1,646 | 4.03125 | 4 | [] | no_license | # -*- coding:utf-8 -*-
class Node:
def __init__(self, data):
self.data = data
self.next = None
#定义链表
class LinkedList:
def __init__(self):
"""
Linked list 的初始化
"""
self.length = 0
self.head = None
def is_empty(self):
"""
判断该链表是否为空
:return: boolean
"""
return self.length == 0
def append(self, this_node):
"""
在链表末添加node/值,如果是node对象直接添加,否则先转换为node对象
:param this_node: 数据或者node对象
:return: None
"""
if isinstance(this_node, Node):
pass
else:
this_node = Node(data=this_node)
if self.is_empty():
# 链表为空的情况将头指针指向当前node
self.head = this_node
else:
node = self.head
while node.next:
node = node.next
node.next = this_node
self.length += 1
def FindKthToTail(self, head, k):
front = head
later = head
for i in range(k):
if front==None:
return
if front.next == None and i==k-1:
return head
front = front.next
while front.next != None:
front = front.next
later = later.next
return later.next
if __name__ == '__main__':
#node1=ListNode(val='1')
list=LinkedList()
for i in range(10):
list.append(i)
head=list.head
print(head)
k=list.FindKthToTail(head,2)
print(k)
| true |
c2913285b413815ff3fa0acfe372fc214325656b | Python | Ankygurjar/DSA-Course | /Graphs/adjcencyList.py | UTF-8 | 614 | 3.828125 | 4 | [] | no_license |
class Graph:
root = dict()
def nodes(self, nodes: list):
for node in nodes:
if node not in self.root.keys():
self.root[node] = []
def vertices(self, node, edges: list):
if node in self.root.keys():
cur: list = self.root.get(node)
for edge in edges:
if edge not in cur:
cur.append(edge)
g = Graph()
g.nodes([1, 0, 2, 3])
g.vertices(0, [1, 2])
g.vertices(1, [0, 2])
g.vertices(2, [0, 1, 3])
g.vertices(3, [2])
myList: list = g.root.values()
print(myList)
# print(g.root)
| true |
2d5fc0a0a55049455f5cea34cc81af2fd78fcbe3 | Python | zmichaelov/papyrus | /papyrus.py | UTF-8 | 13,390 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
# This class is the main starting point for our application
# Initializes all of our GUI code
import wx, wx.richtext, wx.aui
import os.path, codecs
import scribe
class PapyrusApp(wx.App):
def MacReopenApp(self):
if self.GetTopWindow() is None:
"""Called when the doc icon is clicked, and ???"""
frame = MainWindow()
self.SetTopWindow(frame)
frame.Center()
frame.Show()
defaultname = '[No Name]'
app = PapyrusApp(redirect=False)
class MainWindow(wx.Frame):
def __init__(self):
super(MainWindow, self).__init__(None, size=(800, -1))
# used as a temporary variable to store filenames, for files we are opening
self.filename = defaultname
self.dirname = '.'
# to be appended to our file names
self.extension = ".txt"
# initialize GUI components
self.CreateInteriorWindowComponents()
self.CreateExteriorWindowComponents()
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
#self.control.SetCursor(wx.StockCursor(wx.CURSOR_POINT_LEFT))
def NewScribe(self):
'''Creates a new RichTextCtrl with Scribe functionality'''
control = wx.richtext.RichTextCtrl(self.notebook, style=wx.TE_MULTILINE)
control.SetBackgroundColour('#F6F6EF')
newscribe = scribe.Scribe(control)
control.Bind(wx.EVT_TEXT, self.OnTextChanged)
control.Bind(wx.EVT_CHAR, newscribe.OnChar)
return control
def CreateInteriorWindowComponents(self):
''' Create "interior" window components. In this case it is just a
simple multiline text control. '''
self.panel = wx.Panel(self)
self.notebook = wx.aui.AuiNotebook(self.panel)
# create our RichTextCtrl as a child of the notebook
# add our first page to the notebook
self.notebook.AddPage(self.NewScribe(), defaultname, select=True)
# listen for close and double-click events
self.notebook.Bind(wx.aui.EVT_AUINOTEBOOK_BG_DCLICK, self.OnNewTab)
self.notebook.Bind(wx.aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnCloseTab)
# adjust sizing parameters
sizer = wx.BoxSizer()
sizer.Add(self.notebook, 1, wx.EXPAND)
self.panel.SetSizer(sizer)
def CreateExteriorWindowComponents(self):
''' Create "exterior" window components, such as menu and status
bar. '''
self.CreateMenu()
self.CreateToolbar()
self.CreateStatusBar()
self.SetTitle()
def CreateToolbar(self):
toolbar = self.CreateToolBar( wx.TB_HORIZONTAL | wx.NO_BORDER )
toolbar.AddSimpleTool(801, wx.Bitmap('assets/img/page.png'), 'New', 'Create a new document')
toolbar.AddSimpleTool(802, wx.Bitmap('assets/img/folder.png'), 'Open', 'Open an existing document')
toolbar.AddSimpleTool(803, wx.Bitmap('assets/img/save.png'), 'Save', 'Save the current document')
toolbar.AddSeparator()
# add undo and redo and print
toolbar.AddSimpleTool(804, wx.Bitmap('assets/img/cut.png'), 'Cut', 'Cut')
toolbar.AddSimpleTool(805, wx.Bitmap('assets/img/page_full.png'), 'Copy', 'Copy')
toolbar.AddSimpleTool(806, wx.Bitmap('assets/img/glyphicons_029_paste.png'), 'Paste', 'Paste')
toolbar.AddSeparator()
toolbar.AddSimpleTool(807, wx.Bitmap('assets/img/undo.png'), 'Undo', 'Undo')
toolbar.AddSimpleTool(808, wx.Bitmap('assets/img/redo.png'), 'Redo', 'Redo')
#toolbar.AddCheckTool(807, wx.Bitmap('assets/img/glyphicons_102_bold.png'))
#toolbar.AddCheckTool(808, wx.Bitmap('assets/img/glyphicons_101_italic.png'))
#toolbar.AddCheckTool(809, wx.Bitmap('assets/img/glyphicons_103_text_underline.png'))
# TODO: add left, center and right justified icons
toolbar.Realize()
self.Bind(wx.EVT_TOOL, self.OnNew , id=801)
self.Bind(wx.EVT_TOOL, self.OnOpen, id=802)
self.Bind(wx.EVT_TOOL, self.OnSave, id=803)
self.Bind(wx.EVT_TOOL, self.OnCut, id=804)
self.Bind(wx.EVT_TOOL, self.OnCopy, id=805)
self.Bind(wx.EVT_TOOL, self.OnPaste,id=806)
self.Bind(wx.EVT_TOOL, self.OnUndo, id=807)
self.Bind(wx.EVT_TOOL, self.OnRedo, id=808)
def CreateMenu(self):
fileMenu = wx.Menu()
for id, label, helpText, handler in \
[(wx.ID_NEW, '&New Window\tCtrl+N', 'New window', self.OnNew),
(101, '&New Tab\tCtrl+T', 'New tab', self.OnNewTab),
(wx.ID_ABOUT, '&About', 'Information about this program', self.OnAbout),
(wx.ID_OPEN, '&Open\tCtrl+O', 'Open a new file', self.OnOpen),
(wx.ID_SAVE, '&Save\tCtrl+S', 'Save the current file', self.OnSave),
(wx.ID_SAVEAS, 'Save &As\tShift+Ctrl+S', 'Save the file under a different name', self.OnSaveAs),
(None, None, None, None),
(wx.ID_EXIT, 'E&xit', 'Terminate the program', self.OnQuit)]:
if id == None:
fileMenu.AppendSeparator()
else:
item = fileMenu.Append(id, label, helpText)
self.Bind(wx.EVT_MENU, handler, item)
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, '&File') # Add the fileMenu to the MenuBar
# Edit Menu
editMenu = wx.Menu()
for id, label, helpText, handler in \
[(wx.ID_UNDO, '&Undo\tCtrl+Z', 'Undo the previous action', self.OnUndo),
(wx.ID_REDO, '&Redo\tShift+Ctrl+Z', 'Redo the previous action', self.OnRedo),
(None, None, None, None),
(102, '&Close Tab\tCtrl+W', 'Close the current tab', self.OnCloseTab),
(103, '&Close Window\tShift+Ctrl+W','Close the current window', self.OnCloseWindow)]:
if id == None:
editMenu.AppendSeparator()
else:
item = editMenu.Append(id, label, helpText)
self.Bind(wx.EVT_MENU, handler, item)
menuBar.Append(editMenu, '&Edit')
self.SetMenuBar(menuBar) # Add the menuBar to the Frame
def SetTitle(self):
# MainWindow.SetTitle overrides wx.Frame.SetTitle, so we have to
# call it using super:
super(MainWindow, self).SetTitle('Papyrus')
# Helper methods:
def GetCurrentCtrl(self):
'''Returns the RichTextCtrl of the currently active tab'''
current = self.notebook.GetSelection()
return self.notebook.GetPage(current)
def defaultFileDialogOptions(self):
''' Return a dictionary with file dialog options that can be
used in both the save file dialog as well as in the open
file dialog. '''
return dict(message='Choose a file', defaultDir=self.dirname,
wildcard='*'+self.extension)
def askUserForFilename(self, **dialogOptions):
dialog = wx.FileDialog(self, **dialogOptions)
if dialog.ShowModal() == wx.ID_OK:
userProvidedFilename = True
self.filename = dialog.GetFilename()
self.dirname = dialog.GetDirectory()
else:
userProvidedFilename = False
dialog.Destroy()
return userProvidedFilename
# Event handlers:
def OnNew(self, event):
frame = MainWindow()
frame.Show()
def OnNewTab(self, event):
# add our new page to the notebook
self.notebook.AddPage(self.NewScribe(), defaultname, select=True)
def OnAbout(self, event):
dialog = wx.MessageDialog(self, 'A text editor inspired by Google Scribe', 'About Papyrus Editor', wx.OK | wx.ICON_QUESTION)
dialog.ShowModal()
dialog.Destroy()
def OnCloseWindow(self, event):
# iterate through those tabs that are still open
size = self.notebook.GetPageCount()
for i in xrange(0, size):
self.OnCloseTab(event)
#self.notebook.AdvanceSelection()
def OnQuit(self, event):
# destroy the app main loop
app.Exit()
def OnSave(self, event):
current = self.notebook.GetSelection()
filename = self.notebook.GetPageText(current)
if filename.startswith("*"):
filename = filename[1:]
self.notebook.SetPageText(current, filename)
control = self.notebook.GetPage(current)
if filename == defaultname:
if self.askUserForFilename(defaultFile=filename, style=wx.SAVE, **self.defaultFileDialogOptions()):
# get the updated filename
textfile = codecs.open(os.path.join(self.dirname, self.filename+self.extension), 'w', 'utf-8', 'strict')
textfile.write(control.GetValue())
textfile.close()
#control.SaveFile(os.path.join(self.dirname, self.filename+self.extension))
self.notebook.SetPageText(current, self.filename+self.extension)
else:
textfile = codecs.open(os.path.join(self.dirname, filename), 'w','utf-8', 'strict')
textfile.write(control.GetValue())
textfile.close()
#control.SaveFile(os.path.join(self.dirname, filename))
def OnOpen(self, event):
if self.askUserForFilename(style=wx.OPEN,
**self.defaultFileDialogOptions()):
# check and see if we have a currently opened tab that has not been modified
current = self.notebook.GetSelection()
control = self.NewScribe()
if self.GetCurrentCtrl().GetValue() == "" and self.notebook.GetPageText(current) == defaultname:
control = self.GetCurrentCtrl() # use the existing page
current = self.notebook.GetSelection() # get the updated current tab
self.notebook.SetPageText(current, self.filename) # give it the appropriate filename
else:
self.notebook.AddPage(control, self.filename, select=True) # add a new page
textfile = open(os.path.join(self.dirname, self.filename), 'r')
control.SetValue(textfile.read()) # this will fire our OnTextChanged event
# we have to remove the asterisk that will be prepended to the filename
current = self.notebook.GetSelection()
filename = self.notebook.GetPageText(current)
if filename.startswith("*"):
filename = filename[1:]
self.notebook.SetPageText(current, filename)
textfile.close()
#control.LoadFile(os.path.join(self.dirname,self.filename))
def OnSaveAs(self, event):
current = self.notebook.GetSelection()
filename = self.notebook.GetPageText(current)
if self.askUserForFilename(defaultFile=filename, style=wx.SAVE,
**self.defaultFileDialogOptions()):
self.OnSave(event)
def OnCut(self, event):
control = self.GetCurrentCtrl()
control.Cut()
def OnCopy(self, event):
control = self.GetCurrentCtrl()
control.Copy()
def OnPaste(self, event):
control = self.GetCurrentCtrl()
control.Paste()
def OnBold(self, event):
control = self.GetCurrentCtrl()
control.BeginBold()
def OnUndo(self, event):
control = self.GetCurrentCtrl()
control.Undo()
def OnRedo(self, event):
control = self.GetCurrentCtrl()
control.Redo()
def TabCloseHelper(self, event):
"""Returns False if the user presses cancel, True otherwise"""
current = self.notebook.GetSelection() # get the updated current tab
filename = self.notebook.GetPageText(current)
modify = filename.startswith("*")
if modify:
dlg = wx.MessageDialog(self, 'Save before Close?', '', wx.YES_NO | wx.YES_DEFAULT |
wx.CANCEL | wx.ICON_QUESTION)
val = dlg.ShowModal()
if val == wx.ID_YES:
self.OnSave(event)
elif val == wx.ID_CANCEL:
return False
else:
return True
return True
def OnCloseTab(self, event):
# prompt to save if tab has been modified
still_close = self.TabCloseHelper(event)
if not still_close:
event.Veto() # if the user presses Cancel, don't close the tab
return
# close whole window if this is our last tab
count = self.notebook.GetPageCount()
if count == 1:
self.Destroy()
elif count > 1:
current = self.notebook.GetSelection()
self.notebook.DeletePage(current)
def OnTextChanged(self, event):
current = self.notebook.GetSelection() # get the updated current tab
filename = self.notebook.GetPageText(current)
if filename.startswith("*"):
pass
else:
self.notebook.SetPageText(current, '*'+filename) # give it the appropriate filename
# get the notebook tab and append an asterisk to its title
event.Skip()
# Initialize our application
frame = MainWindow()
app.SetTopWindow(frame)
app.SetExitOnFrameDelete(False)
frame.Centre()
frame.Show()
app.MainLoop()
| true |
612e5c2104fdca7b54bb0922b1e32e7c9266def1 | Python | jsmartin/report_aggregator | /CombineReports.py | UTF-8 | 3,312 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
import sys, csv, glob, re
def build_latencies(stats_arr, filename):
i = 0
with open(filename, 'rb') as summary_file:
reader = csv.reader(summary_file)
reader.next() #skip header line
for row in reader:
row = map(str.strip, row)
vals = map(float, row)
elapsed, window, n, minimum, mean, median, nine5, nine9, nine9_9, maximum, errors = vals[:11]
if len(stats_arr) <= i:
stats_arr.append([elapsed, window, n, minimum, mean, median, nine5, nine9, nine9_9, maximum, errors])
else:
stats_arr[i][0] = (stats_arr[i][0] + float(elapsed)) / 2
stats_arr[i][1] = (stats_arr[i][1] + window) / 2
stats_arr[i][2] = int(stats_arr[i][2] + n)
stats_arr[i][3] = int(min(stats_arr[i][3], minimum))
stats_arr[i][4] = (stats_arr[i][4] + mean) / 2
stats_arr[i][5] = int((stats_arr[i][5] + median) / 2)
stats_arr[i][6] = int((stats_arr[i][6] + nine5) / 2)
stats_arr[i][7] = int((stats_arr[i][7] + nine9) / 2)
stats_arr[i][8] = int((stats_arr[i][8] + nine9_9) / 2)
stats_arr[i][9] = int(max(stats_arr[i][9], maximum))
stats_arr[i][10] = int(stats_arr[i][10] + errors)
i += 1
return stats_arr
def build_summary(stats_arr, filename):
i = 0
with open(filename, 'rb') as summary_file:
reader = csv.reader(summary_file)
reader.next() #skip header line
for row in reader:
row = map(str.strip, row)
vals = map(float, row)
elapsed, window, total, successful, failed = vals[:5]
if len(stats_arr) <= i:
stats_arr.append([elapsed, window, total, successful, failed])
else:
stats_arr[i][0] = (stats_arr[i][0] + float(elapsed)) / 2
stats_arr[i][1] = (stats_arr[i][1] + window) / 2
stats_arr[i][2] = int(stats_arr[i][2] + total)
stats_arr[i][3] = int(stats_arr[i][3] + successful)
stats_arr[i][4] = int(stats_arr[i][4] + failed)
i += 1
return stats_arr
results_base_dir = sys.argv[1]
latency_dict = {}
for latency_file in glob.glob(results_base_dir + "/*/*latencies.csv"):
matchObj = re.match( r'(.*)\/(.*)\/(.*)', latency_file, re.M|re.I)
if matchObj:
latency_dict[matchObj.group(3)] = []
#Write Latencies
for latency_name in latency_dict:
for latency_file in glob.glob(results_base_dir + "/*/" + latency_name):
stats_arr = build_latencies(latency_dict[latency_name], latency_file)
f = open(latency_name, 'w')
f.write("elapsed, window, n, min, mean, median, 95th, 99th, 99_9th, max, errors\n")
for row in stats_arr:
f.write(','.join(map(str,row)) + '\n')
f.close
#Write Summary
stats_arr = []
for stat_file in glob.glob(results_base_dir + "/*/summary.csv"):
stats_arr = build_summary(stats_arr, stat_file)
f = open('summary.csv', 'w')
f.write("elapsed, window, total, successful, failed\n")
for row in stats_arr:
f.write(','.join(map(str,row)) + '\n')
f.close | true |
eb3ccd1b2d69d71c2ae8d78ebcd7254242e857d5 | Python | rgliuca/pygame | /pygame_starter.py | UTF-8 | 821 | 3.296875 | 3 | [] | no_license | import pygame
screen = pygame.display.set_mode((800,800))
pygame.display.set_caption("Draw Basic Objects")
GREEN = (0, 255, 0) # (R, G, B)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
break
if event.type == pygame.MOUSEBUTTONDOWN:
print(pygame.mouse.get_pos())
'''
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_q]:
done = True
'''
#screen.fill((255, 255, 255))
pygame.draw.line(screen, GREEN, (200, 200), (400, 400))
pygame.draw.circle(screen, RED, (400, 200), 50, 0)
pygame.draw.rect(screen, BLUE, pygame.Rect(300, 200, 350, 250), 5)
pygame.display.flip()
pygame.quit()
| true |
7aa5ad405c85e04c9a7587c888dca7797ebe2f99 | Python | angkunz/python | /week3/test3.2.py | UTF-8 | 321 | 3.171875 | 3 | [] | no_license | #โปรแกรมหาผลรวม
i=1
sum=0
loop = int(input("กรุณากรอกจำนวนครั้งในการรับค่า : "))
while( i <= loop ) :
a = int(input("กรอกตัวเลข : "))
sum+=a
i+=1
print("ผลรวมทั้งหมด = ", sum) | true |
d04425d7e751b9267d1f893efe8bc7ba88cc2881 | Python | rubenleblancpressenda/IN104_leBlanc-Ruben_Arignavong-Mattheo | /in104.py | UTF-8 | 2,123 | 3.40625 | 3 | [] | no_license |
#la strategie de base est de priviligier seulement deux directions pour conserver les grandes cases dans un coin.
#mais, comment choisir entre ses deux directions ?
#la premiere strategie est de fusionner en priorite les deux cases identiques de plus grande valeur
def strat1(mat):
sens=0 #sens=5 si on va vers le haut et sens=4 si on va a gauche
h=2
for i in range(4):
for j in range(3):
if mat[i][j]==mat[i][j+1] and mat[i][j]>h :
sens=4
h=mat[i][j]
elif mat[j][i]==mat[j+1][i] and mat[j][i]>h :
sens=5
h=mat[j][i]
return sens
#la seconde strat consiste a choisir la direction ou l'on fusionne le plus de cases
def strat2(mat):
sens=0
lign=0
col=0
for i in range(4):
for j in range(3):
if mat[i][j]==mat[i][j+1]:
lign=lign+1
elif mat[j][i]==mat[j+1][i]:
col=col+1
if col=>lign and col!=0 :
sens=5
if lign>col :
sens=4
return sens
def tour(mat,strat):
if strat=1 :
sens = strat1(mat)
if strat=2 :
sens=strat2(mat)
if sens != 0: # il y a au moins deux cases identiques a cote
mat = maj(mat, sens)
else: # la strat ne permet pas de choisir la direction on choisit par defaut le haut
gauche=0
haut=0
for i in range(4):
if mat[1][i]==0:
haut=haut+1
elif mat[i][1]==0
gauche=gauche+1
if mat != maj(mat, 5) : # on peut jouer la direction haut
mat = maj(mat, 5)
elif mat != maj(mat, 4):
mat = maj(mat, 4)
elif mat != maj(mat, 2) and gauche<=haut: # cas critique : on ne peut pas respecter la strategie de base donc on va dans la
#la direction qui perturbe le moins la grille : on va ici vers le bas car il y a plus de cases vides
#sur la ligne du haut que sur la colonne de gauche
mat = maj(mat, 2)
else:
mat = maj(mat, 3)
mat = fi.grille_finale(mat)
print_mat(mat) | true |
12af922014473e4effe4745868ef99be2967b5dd | Python | python20180319howmework/homework | /zhangzhen/20180327/text2.py | UTF-8 | 190 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive |
#判断从你出生到今年共有多少的闰年
num = 0
for i in range(1994,2019):
if (i % 4 == 0 and i % 100 != 0 )or i % 400 == 0:
num = num + 1
else:
continue;
print(num)
| true |
9253962dfcec43cd84d55c709a06e407bc290d1a | Python | g10guang/offerSword | /app/list/entry_node.py | UTF-8 | 1,763 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# author: Xiguang Liu<g10guang@foxmail.com>
# 2018-05-04 17:01
# 题目描述:https://www.nowcoder.com/practice/253d2c59ec3e4bc68da16833f79a38e4?tpId=13&tqId=11208&rp=1&ru=%2Fta%2Fcoding-interviews&qru=%2Fta%2Fcoding-interviews%2Fquestion-ranking
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def EntryNodeOfLoop(self, pHead):
"""
思路:
将所有已经访问过的元素加入到集和中,如果访问到某个元素在集和中,那么该元素就是环的入口
如果链表环比较长会导致该算法的空间利用率比较底下
"""
visited = set()
p = pHead
while p and p not in visited:
visited.add(p)
p = p.next
return p
class Solution2:
def EntryNodeOfLoop(self, pHead):
"""
思路:
1) 使用双指针追及方法先找到环中的一个结点 x
2) 顺着环走计算环的长度 l
3) p, q = pHead, pHead
i) p 先走 l 步
ii) p q 同步走
当 p == q 时,p 所指向的结点就是入口结点
"""
if pHead.next is None:
return None
p, q = pHead.next, pHead
while p and q and p != q:
p = p.next
if p.next:
p = p.next.next
else:
return None
q = q.next
if not p or not q:
return None
p = p.next
l = 1
while p != q:
l += 1
p = p.next
p, q = pHead, pHead
for _ in range(l):
p = p.next
while p != q:
p = p.next
q = q.next
return p
| true |
da9683050ec38858ec13b3a13048926d3c3c0963 | Python | sqiprasanna/coding-questions | /DynamicProgramming/edit_distance.py | UTF-8 | 1,851 | 3.984375 | 4 | [] | no_license | """
url : https://practice.geeksforgeeks.org/problems/edit-distance/0
Given two strings str1 and str2 and below operations that can performed on str1. Find minimum number of edits (operations) required to convert ‘str1′ into ‘str2′.
Insert
Remove
Replace
All of the above operations are of cost=1.
Both the strings are of lowercase.
Input:
The First line of the input contains an integer T denoting the number of test cases. Then T test cases follow. Each tese case consists of two lines. The first line of each test case consists of two space separated integers P and Q denoting the length of the strings str1 and str2 respectively. The second line of each test case coantains two space separated strings str1 and str2 in order.
Output:
Corresponding to each test case, pirnt in a new line, the minimum number of operations required.
Constraints:
1<=T<=50
1<= Length(str1) <= 100
1<= Length(str2) <= 100
Example:
Input:
1
4 5
geek gesek
Output:
1
"""
def lcs(str1,str2,i,j,matrix):
if i < 0:
return j+1
if j < 0:
return i+1
if matrix[i][j] != -1:
return matrix[i][j]
if str1[i] == str2[j]:
matrix[i][j] = lcs(str1,str2,i-1,j-1,matrix)
return matrix[i][j]
matrix[i][j] = 1 + min(lcs(str1,str2,i-1,j,matrix),lcs(str1,str2,i,j-1,matrix),lcs(str1,str2,i-1,j-1,matrix))
return matrix[i][j]
def main():
t = int(input())
for i in range(0,t):
numbers = input().replace(" "," ").strip(" ").split(" ")
n1 = int(numbers[0])
n2 = int(numbers[1])
strings = input().strip(" ").split(" ")
str1 = strings[0]
str2 = strings[1]
matrix = []
for i in range(0,n1):
row = [-1]*n2
matrix.append(row)
print(lcs(str1,str2,n1-1,n2-1,matrix))
if __name__ == '__main__':
main() | true |
91ad7a4e0be7d0bae82ac9567999363aee0855a2 | Python | nicolepilsworth/tetris | /util.py | UTF-8 | 1,408 | 2.8125 | 3 | [] | no_license | import random
import numpy as np
# Given a list, return a random element from the list
def randChoice(l):
return l[random.randint(0, len(l) - 1)]
# For this implementation, concatenate board config and
# Tetromino config into a string for the state
def strState(board, tetromino):
bString = ''.join(''.join('%d' %x for x in row) for row in board)
tString = ''.join(''.join('%d' %x for x in row) for row in tetromino)
return bString + tString
def networkState(board, tetromino):
return np.array([np.append(board.flatten(), tetromino.flatten())])
def cnnState(b, tetromino):
return np.reshape(
np.concatenate(
(
np.pad(
tetromino,
(
(0, 0),
(0, b.ncols - tetromino.shape[1])
),
"constant",
constant_values=(False,)
),
b.board
),
axis=0
),
(1, b.nrows + tetromino.shape[0], b.ncols, 1)
)
def pgState(b, tetromino):
return np.append(tetromino.flatten(), b.board.flatten())
def a3cState(b):
return np.reshape(b.board, (1, b.nrows, b.ncols, 1))
def epsilonGreedy(q, epsilon, possMoves):
if random.random() < epsilon:
return randChoice(possMoves)
else:
qPossMoves = []
for p in possMoves:
qPossMoves.append(q[p[0]][p[1]])
return possMoves[np.argmax(qPossMoves)]
| true |
4b8d1812b6f96293323db724093a1eae571caa09 | Python | RijuDasgupta9116/LintCode | /Minimum Adjustment Cost.py | UTF-8 | 1,411 | 3.640625 | 4 | [
"Apache-2.0"
] | permissive | """
Given an integer array, adjust each integers so that the difference of every adjcent integers are not greater than a
given number target.
If the array before adjustment is A, the array after adjustment is B, you should minimize the sum of |A[i]-B[i]|
Note
You can assume each number in the array is a positive integer and not greater than 100
Example
Given [1,4,2,3] and target=1, one of the solutions is [2,3,2,3], the adjustment cost is 2 and it's minimal. Return 2.
"""
__author__ = 'Danyang'
class Solution:
def MinAdjustmentCost(self, A, target):
"""
state dp
f[i][j] = min(f[i-1][k] + |a[i]-j|, for k j-l to j+l)
comments: similar to Vertibi algorithm (Hidden Markov Model)
:param A: An integer array.
:param target: An integer.
"""
S = 100
n = len(A)
f = [[1<<31 for _ in xrange(S+1)] for _ in xrange(n+1)]
for j in xrange(S+1):
f[0][j] = 0
for i in xrange(1, n+1):
for j in xrange(1, S+1):
for k in xrange(max(1, j-target), min(S, j+target)+1):
f[i][j] = min(f[i][j], f[i-1][k]+abs(A[i-1]-j))
mini = 1<<31
for j in xrange(1, S+1):
mini = min(mini, f[n][j])
return mini
if __name__ == "__main__":
assert Solution().MinAdjustmentCost([12, 3, 7, 4, 5, 13, 2, 8, 4, 7, 6, 5, 7], 2) == 19
| true |
f674143dc6fd958da26b4cbfc6a2eb420e188e57 | Python | AlejandroArgueta/Tesis-Licenciatura | /GraphDayRawAvgMin.py | UTF-8 | 906 | 3.28125 | 3 | [] | no_license | #UNIVERSIDAD NACIONAL AUTÓNOMA DE MÉXICO
#Argueta Hernández, Fidel Alejandro alejo_tigres@yahoo.com
#Programa que grafica datos crudos de un día promediados por minuto
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as md
#Leemos el archivo efm
data = pd.read_csv('Data E Geofisica avg min/May2019/EFavgmin05202019.csv')
#Damos nombre a las columnas
data.columns = ['datetime', 'field']
y = data['field']
#Convertimos a datetime la columna de tiempo (que inicialmente solo es object)
data['datetime'] = pd.to_datetime(data['datetime'], format='%Y-%m-%d %H:%M:%S')
fig, ax = plt.subplots(figsize=(10,10))
ax.plot(data['datetime'], y, color = 'purple')
#Establecemos el formato H:M:S para el eje x
xfmt = md.DateFormatter('%H:%M')
ax.xaxis.set_major_formatter(xfmt)
ax.set_title('Campo eléctrico atmosférico 19-05-2018')
ax.set_xlabel('Hora')
ax.set_ylabel('Campo eléctrico [kV/m]')
plt.grid()
plt.show()
| true |
54990a595247a933f11ecc786b689c5d0389ef6a | Python | kellibudd/code-challenges | /removefromll.py | UTF-8 | 1,323 | 4.0625 | 4 | [] | no_license | # Singly-linked lists are already defined with this interface:
# class ListNode(object):
# def __init__(self, x):
# self.value = x
# self.next = None
#
def removeKFromList(l, k):
"""
Source: https://app.codesignal.com/interview-practice/task/gX7NXPBrYThXZuanm
Given a singly linked list of integers l and an integer k,
remove all elements from list l that have a value equal to k.
For l = [3, 1, 2, 3, 4, 5] and k = 3, the output should be
removeKFromList(l, k) = [1, 2, 4, 5];
For l = [1000, 1000] and k = 1000, the output should be
removeKFromList(l, k) = [];
For l = [3, 1, 2, 3, 4, 5] and k = 6, the output should be
removeKFromList(l, k) = [1, 2, 3, 4, 5];
"""
prev = l
head = prev
if prev:
current = prev.next
while prev.value == k:
if prev.next is None:
return None
prev.next = None
prev = current
current = current.next
head = prev
while current.next is not None:
if current.value == k:
prev.next = current.next
current = prev.next
continue
prev = current
current = current.next
if current.value == k:
prev.next = None
return head
| true |
cc8aa35f6209c866c8d4f158cce2878ae3e9c9df | Python | breezy1812/MyCodes | /LeetCode/492-Construct the Rectangle/solution.py | UTF-8 | 460 | 2.953125 | 3 | [] | no_license | #!/usr/local/bin/python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-02-13 13:58
# Last modified: 2017-02-13 13:59
# Filename: solution.py
# Description:
class Solution(object):
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
from math import sqrt
for f1 in range(int(sqrt(area)), 0, -1):
if area % f1 == 0:
return [area/f1, f1]
| true |
a99bb107e8e65936e8cb5456bf9bb621b0b7bf63 | Python | Amaayezing/ECS-10 | /change/change.py | UTF-8 | 815 | 3.6875 | 4 | [] | no_license | #Maayez Imam 10/2/2017
#Change program
moneyWithdrawn = int(input('Please enter the amount of money you wish to withdraw: '))
ones = 0
fives = 0
tens = 0
twenties = 0
fifties = 0
hundreds = 0
modhundreds = 0
modfifties = 0
modtwenties = 0
modtens = 0
modfives = 0
hundreds = moneyWithdrawn / 100
modhundreds = moneyWithdrawn % 100
fifties = modhundreds / 50
modfifties = modhundreds % 50
twenties = modfifties / 20
modtwenties = modfifties % 20
tens = modtwenties / 10
modtens = modtwenties % 10
fives = modtens / 5
modfives = modtens % 5
ones = modfives % 5
print("You received %d hundred(s)" %hundreds)
print("You received %d fifty(s)" %fifties)
print("You received %d twenty(s)" %twenties)
print("You received %d ten(s)" %tens)
print("You received %d five(s)" %fives)
print("You received %d one(s)" %ones)
| true |
20dc4d2b45a00c67c5e3fdb5fbd09f8d42c3b643 | Python | tiandiyijian/CTCI-6th-Edition | /01.01.py | UTF-8 | 422 | 2.953125 | 3 | [] | no_license | class Solution:
def isUnique(self, astr: str) -> bool:
if len(astr) > 26:
return False
mask = 0
for c in astr:
tmp = ord(c) - ord('a')
tmp = 1 << tmp
if mask & tmp > 0:
return False
else:
mask |= tmp
return True
if __name__ == "__main__":
s = Solution()
print(s.isUnique('leetcode'))
| true |
af093f645a1f208ec6b1f3186ec0a6ec45a32889 | Python | fervillarce/reggaeton-lyrics-generator-lstm | /Source/nicky.py | UTF-8 | 1,325 | 2.953125 | 3 | [] | no_license | import numpy as np
from keras.models import load_model
import pickle
def load_obj(name):
with open('../obj/'+ name + '.pkl', 'rb') as f:
return pickle.load(f)
def sample(preds, temperature=1.0):
# Helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def predict(sentence):
model = load_model('../Output/model.h5')
MAXLEN = 40
chars = load_obj('chars_list')
char_indices = load_obj('char_indices_dict')
indices_char = load_obj('indices_char_dict')
prediction = ''
for i in range(40):
x_pred = np.zeros((1, MAXLEN, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
# Make probability predictions with the model
preds = model.predict(x_pred, verbose=0)[0]
next_index = sample(preds, 0.2)
next_char = indices_char[next_index]
prediction += next_char
pred = prediction.split(" ")[:8] # Prevent from half words. 8 is the number of total words in the prediction.
pred = " ".join(pred)
return pred
| true |
7808b6e1847f55df9d759d8d7771dcd7ca04469a | Python | ViniciusTLR/Codigo_em_Python | /Informacao da variavel.py | UTF-8 | 168 | 3.578125 | 4 | [] | no_license | variavel = input('Digite algo: ')
print(type(variavel))
print(variavel.isalnum())
print(variavel.isalpha())
print(variavel.isascii())
print(variavel.isdecimal())
| true |
1e97870c8e6e175b9d42a10073ba58a95c488476 | Python | DeepeshYadav/AutomationMarch2020 | /PythonPractice/Decorators/property_decor_example2.py | UTF-8 | 525 | 4.0625 | 4 | [] | no_license | # Will continue from example , will solve same probplem using property decorator
class Student:
def __init__(self, name, grade):
self.name = name
self.grade = grade
@property
def msg(self):
return self.name+" got the grade "+self.grade
@msg.setter
def msg(self, new_msg):
sent = new_msg.split(" ")
self.name = sent[0]
self.grade = sent[-1]
obj = Student("Mohit", "B")
obj.msg = "Atriyo got the grade A"
print(obj.grade)
print(obj.name)
print(obj.msg) | true |
7f6bb8b3b79d4d532de2663461989f5f67e2e473 | Python | TBooker/Castaneus_Recombination_and_Phasing | /sites_2_fasta.py | UTF-8 | 2,401 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
import argparse,vcf, textwrap
parser = argparse.ArgumentParser(description="This script generates a fasta file, or several fasta files(LATER) from a full tabixed GZVCF and a file of sites with the haplotype info ")
parser.add_argument("combined", type = str, help="name of the combined file that has positions and haplotypes")
parser.add_argument("region", type = str, help="The chromosomal position, in tabix format, for the chromosome and position you used")
parser.add_argument("GZVCF",type = str, help = "The name of the tabixed, G-zipped VCF file that contains info on all sites")
parser.add_argument("--output", type = str, help="The name of the output fasta, default =[haplotypes.fasta]",default = "haplotypes.fasta")
args = parser.parse_args()
chrom = args.region.split(":")[0].split("r")[1]
start = int(args.region.split(":")[1].split("-")[0])
end = int(args.region.split(":")[1].split("-")[1])
with open(args.combined) as FILE:
for line in FILE:
header = line
break
full_haps = {}
hap_index = {}
for i in header.strip("\n").split("\t")[7:]:
full_haps[i] = ""
hap_index[header.strip("\n").split("\t").index(i)] =i
vcf = vcf.Reader(open(args.GZVCF))
with open(args.combined) as FILE:
index = 0
previous_pos = start
for line in FILE:
if line == header:
continue
items = line.strip("\n").split("\t")
var_pos = int(items[0])
print var_pos
### The water gets a little choppy here.
## what this section does is to grab the chunk of the genome before the
if var_pos - previous_pos >1:
chunk =vcf.fetch(chrom,start,var_pos-1)
ref_chunk = "".join([j.REF for j in chunk])
for key in hap_index.keys():
full_haps[hap_index[key]]+= ref_chunk+items[key]
elif var_pos-previous_pos ==1:
for key in hap_index.keys():
full_haps[hap_index[key]]+= items[key]
previous_pos = var_pos
index +=1
####
### Var_pos should now represent the last line in the file...
if end - var_pos >1:
chunk =vcf.fetch(chrom,var_pos+1,end)
ref_chunk = "".join([j.REF for j in chunk])
for key in hap_index.keys():
full_haps[hap_index[key]]+= ref_chunk
elif var_pos-previous_pos ==1:
pass
## You should now have a dictionary with n haplotypes
## You now need to write these to a FASTA file...
out_fasta = open(args.output,"w")
for i in full_haps.keys():
out_fasta.write(">" + i + "\n" + "".join(textwrap.wrap(full_haps[i],width =70)) +"\n")
| true |
da64d0fe6cbe95da4c6e4c633f7b549ac78067d8 | Python | ghobs91/Data-Structures | /heap/heap.py | UTF-8 | 1,298 | 3.546875 | 4 | [] | no_license | class Heap:
def __init__(self):
self.storage = []
def insert(self, value):
self.storage.append(value)
return self._bubble_up(self.get_size() - 1)
def delete(self):
returnvalue = self.storage[1]
self.storage[1] = self.storage[self.get_size()]
self.get_size = self.get_size() - 1
self.storage.pop()
self._sift_down(1)
return returnvalue
def get_max(self):
return self.storage[0]
def get_size(self):
return len(self.storage)
def _bubble_up(self, index):
while index // 2 > 0:
if self.storage[index] < self.storage[index // 2]:
tmp = self.storage[index // 2]
self.storage[index // 2] = self.storage[index]
self.storage[index] = tmp
index = index // 2
def _sift_down(self, index):
max_child = None
if index * 2 + 1 >= self.get_size():
return
elif index * 2 + 2 >= self.get_size():
max_child = index * 2 + 1
elif self.storage[index * 2 + 1] > self.storage[index * 2 + 2]:
max_child = index * 2 + 1
else:
max_child = index * 2 + 2
if self.storage[index] < self.storage[max_child]:
self.storage[index], self.storage[max_child] = self.storage[max_child], self.storage[index]
self._sift_down(max_child)
else:
return
| true |
52a60a755857a5b2ea3e02db790d9f87a4371d75 | Python | ptsiampas/Exercises_Learning_Python3 | /04_Functions/exercise_4.9.2.py | UTF-8 | 902 | 3.921875 | 4 | [] | no_license | __author__ = 'petert'
import turtle
import math
def draw_rectangle(t, w, h):
"""
:param t: turtle object to move
:param w: width of rectangle
:param h: height of rectable
:return: none
"""
for i in range(2):
t.forward(w)
t.left(90)
t.forward(h)
t.left(90)
def draw_square(tx,sz):
"""Make turtle t draw a square of sz."""
draw_rectangle(tx,sz,sz)
def move_me(tx):
tx.penup()
tx.right(135)
tx.forward(anglmv)
tx.pendown()
tx.left(135)
turtle.setup(600,400) # Set the size of the window to 600x400
wn = turtle.Screen() # Set up the window and its attributes
wn.bgcolor("lightgreen")
wn.title("Alex meets function")
alex=turtle.Turtle()
alex.pensize(3)
size=10
anglmv=math.hypot(size,size)
step=1
for i in range(10):
draw_square(alex,size*step)
move_me(alex)
step+=2
turtle.mainloop() | true |
75edf4ea117b694da920f175e92df75b409f012e | Python | aTakatoNakamura/TrainingProject_DA_TeamOseti | /WordCounter.py | UTF-8 | 593 | 3.875 | 4 | [] | no_license | class WordCounter():
#{"word", num}
counts_ = {}
def addNumOfWord(this,word, num):
if(word in this.counts_):
this.counts_[word] += num
else:
this.counts_[word] = num
def getRanking(this):
return sorted(this.counts_.items(), key=lambda x:x[1], reverse=True)
def printAll(this):
print(this.counts_)
if __name__ == "__main__":
hoge = WordCounter()
hoge.addNumOfWord("aaa", 100)
hoge.addNumOfWord("bbb", 100)
hoge.addNumOfWord("aaa", 100)
hoge.addNumOfWord("ccc", 150)
print(hoge.getRanking()) | true |
041ee6b7d7d6230016394c5555028f1306e2e67d | Python | luyashuangoo/taobao | /taobao/Taobao_scraping.py | UTF-8 | 2,590 | 2.546875 | 3 | [] | no_license | # usr/bin/env python3
# -*-coding:UTF-8-*-
import pymongo
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from pyquery import PyQuery as pq
import csv
import random
import time
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(chrome_options=chrome_options)
wait = WebDriverWait(browser, 10)
def index_page(page):
"""
抓取索引页
:param page: 页码
"""
print('正在爬取第', page, '页')
try:
url = 'https://daphne.tmall.com/search.htm?spm=a1z10.1-b-s.w5001-16530736392.6.e57c3a0MYNRyo&scene=taobao_shop'
browser.get(url)
if page > 1:
# 锁定页面位置
input = wait.until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'div.pagination>form>input:nth-of-type(4)')))
print(11111)
submit = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, 'div.pagination form button')))
print(111)
input.clear()
input.send_keys(page)
submit.click()
wait.until(
EC.text_to_be_present_in_element((By.CSS_SELECTOR, 'div.pagination a.page-cur'), str(page)))
print(11111)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.J_TItems div.item5line1 dl.item')))
get_products()
except TimeoutException:
print('wrong')
time.sleep(random.randint(3,6))
index_page(page)
def get_products():
"""
提取商品数据
"""
html = browser.page_source
doc = pq(html)
items = doc('div.J_TItems div.item5line1 dl.item dd.detail').items()
out = open('data.csv', 'a', newline='')
csv_write = csv.writer(out, dialect='excel')
for item in items:
title= item.find('a').text(),
price= item.find('.cprice-area').text().replace('¥ ',''),
sale=item.find('.sale-area').text().replace('总销量:','')
product=[title[0],price[0],sale]
print(product)
csv_write.writerow(product)
print('写入成功')
def main():
"""
遍历每一页
"""
for i in range(7,18):
# 伪装为正常浏览
time_wait=random.randint(10,20)
time.sleep(time_wait)
index_page(i)
browser.close()
if __name__ == '__main__':
main() | true |
dcc9e6fddfb094520058056a468b4f134a7e4e48 | Python | pweb6304/two-qubit-simulator | /two_qubit_simulator/initial_state.py | UTF-8 | 471 | 3.34375 | 3 | [
"MIT"
] | permissive | def random_quantum_state():
"""
Returns a random qubit state.
The qubit will be a column vector with complex elements a+ib and c+id.
a,b,c and d are randomly chosen. The norm ensures the state is normalised.
"""
import numpy as np
import random
a = random.random()
b = random.random()
c = random.random()
d = random.random()
norm = np.sqrt (( a + 1j * b) * (a - 1j * b) + (c + 1j * d) * (c - 1j * d))
return np.array([[a + 1j * b] , [c + 1j * d] ]) / norm | true |
35d2e8f1eabe8473475f107acd8c8afd733252d4 | Python | leeo1116/PyCharm | /Algorithms/LintCode/140_fast_power.py | UTF-8 | 310 | 3.203125 | 3 | [] | no_license | class Solution(object):
def fast_power(self, a, b, n):
if n == 1:
return a%b
if n == 0:
return 1%b
p = self.fast_power(a, b, n//2)
p = (p*p)%b
if n%2 == 1:
p = (p*a)%b
return p
s = Solution()
print(s.fast_power(3, 7, 5))
| true |
da3e011fb4836680918b11680daac114c11867f3 | Python | DaTino/CS4500 | /HW1/HW1Maiocco.py | UTF-8 | 4,348 | 4.09375 | 4 | [] | no_license | # HW1 by Alberto Maiocco
# CS4500 9/4/2019
# This program plays a game on a strongly directed graph (diagraph).
# Using a file that specifies a diagraph, this
# program will determine the following:
# 1. The number of circles used in the game.
# 2. The number of arrows used in the game.
# 3. The total number of checks on all the circles combined.
# 4. The average number of checks in a circle marked during the game.
# 5. The maximum number of checks in any one circle.
# We assume the input file describes a diagraph, but we will check
# that it is correctly formatted.
#import sys to exit from exceptions
import sys;
#Open the input file in read mode. Exit if file doesn't exist.
try:
infile = open("HW1infile.txt", "r");
except:
sys.exit("Could not open file. Does it exist? Exiting.");
#Read lines from infile and initialize relevant variables.
#specs is a list containing the lines of the infile.
#n is the number of circles between 2 and 10 inclusive
#k is the number of arrows between circles
#kList is a list containing the arrow specifications.
specs = infile.readlines();
infile.close();
#Check that file has correct format.
if int(specs[0]) < 2 or int(specs[0]) > 10:
sys.exit("Incorrect number of vertices in file. Must be between 2 and 10 inclusive. Exiting.");
if len(specs[2:]) != int(specs[1]):
sys.exit("Number of arrows does not match number of arrow specifications. Exiting.");
#Set n as nodes in game graph and k as arrows in game. kList holds the
#arrow specifications.
n = int(specs[0]);
k = int(specs[1]);
kList = [];
#for the list of arrows, we create ordered pairs by stripping the whitespace
#from the file's lines and push the numbers into tuples. kList becomes a
#list of tuples signifying the arrows' directions.
for i in specs[2:]:
i = i.replace("\n", "");
temp = i.split(" ");
kList.append((int(temp[0]), int(temp[1])));
#creating an adjacency matrix. we'll use depth first search to visit each node.
graph = {};
#parsing tuple list to make matrix.
for i in range(n):
graph[i+1] = [];
for i in kList:
graph[i[0]].append(i[1]);
#Since we have a directed diagraph, we can use depth first search to determine
#which nodes we visit. We use iterave as opposed to recursive depth first search
#because, as a diagraph, the recursive algorithm will not end.
#This version of the algorithm based off of Koder Dojo
#https://www.koderdojo.com/blog/depth-first-search-in-python-recursive-and-non-recursive-programming
#modified by me to generate the number of times a node is visited on our graph.
def depthFirstIterative(graph, start):
stack = [start];
path = [];
#Create a list of zeros to hold visited node information
nVisited = [];
for i in range(n):
nVisited.append(int(0));
while stack:
vertex = stack.pop();
nVisited[vertex-1] += 1;
if vertex in path:
continue;
path.append(vertex);
for neighbor in graph[vertex]:
stack.append(neighbor);
return nVisited;
#returned list of number of times each node visited.
gameVals = depthFirstIterative(graph, 1);
#using the list of node visits to find our game data
totalChecks = 0;
for i in gameVals:
totalChecks += i;
avgChecks = totalChecks/n;
maxChecks = max(gameVals);
#Setting the output data. We use append() purely for code readability.
#This is all output formatting.
outputData = [["1. Number of Circles:", f"{n}"]];
outputData.append(["2. Number of Arrows:", f"{k}"]);
outputData.append(["3. Total # of Checks:", f"{totalChecks}"]);
outputData.append(["4. Average # of Checks:", f"{avgChecks}"]);
outputData.append(["5. Maximum # of Checks:", f"{maxChecks}"]);
col_width = max(len(word) for row in outputData for word in row) + 2; # padding
#More screen output formatting.
print(f"Game Results:");
print(f"*************************");
for row in outputData:
print("".join(word.ljust(col_width) for word in row));
print(f"*************************");
#outputting to file
outfile = open("HW1MaioccoOutfile.txt", "w");
outfile.write(f"Game Results\n");
outfile.write(f"*************************\n");
for row in outputData:
outfile.write("".join(word.ljust(col_width) for word in row));
outfile.write("\n");
outfile.write(f"*************************\n");
outfile.close();
input("Press Enter to terminate.");
| true |
397869269206883b3bf0ceae9b94b785d53d49b2 | Python | mks-learning/intro-to-python | /func_lab.py | UTF-8 | 570 | 3.859375 | 4 | [] | no_license | # define a function that countrs the number of letters in a given string
def countLetters(words):
if len(words) < 1:
return 0
else:
return len(words[0]) + countLetters(words[1:])
def first(word):
return word[0]
def acro(word):
acro = ''
acro = acro.join(list(map(first, sentence))).upper()
return acro
sentence = ['All', 'good', 'and', 'bad', 'things', 'come', 'to', 'an', 'end']
# firstlet = list(map(first, sentence))
# acro = acro.upper()
print(sentence)
# print(firstlet)
acro = acro(sentence)
print(acro)
# print(ACRO)
| true |
e6a22230a7b55e9cb1a3c09aa17f8aaa4e7ca5fd | Python | poposhi/ibm_milp | /milp_py/milp_py/milp_py.py | UTF-8 | 2,492 | 2.640625 | 3 | [] | no_license |
# coding=utf-8
# 很重要的教學網站 https://medium.com/opex-analytics/optimization-modeling-in-python-pulp-gurobi-and-cplex-83a62129807a
import pandas as pd
from pandas import DataFrame, Series
# make matplotlib plots appear inside the notebook
import matplotlib.pyplot as plt
#%matplotlib inline
from pylab import rcParams
rcParams['figure.figsize'] = 20, 10 ############################ <-Use this to change the plot
#from IPython.core.display import HTML
#HTML("<style>.container { width:100%; }</style>")
'''準備資料 有四種發電來源 利用pandas做成表格 不同發電來源排碳成本的表格 '''
energies = ["coal", "gas", "diesel", "wind"]
df_energy = DataFrame({"co2_cost": [30, 5, 15, 0]}, index=energies)
'''有很多部機組 不同的單位有不同的特性 variable_cost 不知道是什麼東西
變成表格 key 會變成row 直軸的index 每個機組的名稱 最後變成直軸是每個機組的名稱 橫軸是機組特性 (最大最小功率)
'''
all_units = ["coal1", "coal2",
"gas1", "gas2", "gas3", "gas4",
"diesel1", "diesel2", "diesel3", "diesel4"]
ucp_raw_unit_data = {
"energy": ["coal", "coal", "gas", "gas", "gas", "gas", "diesel", "diesel", "diesel", "diesel"],
"initial" : [400, 350, 205, 52, 155, 150, 78, 76, 0, 0],
"min_gen": [100, 140, 78, 52, 54.25, 39, 17.4, 15.2, 4, 2.4],
"max_gen": [425, 365, 220, 210, 165, 158, 90, 87, 20, 12],
"operating_max_gen": [400, 350, 205, 197, 155, 150, 78, 76, 20, 12],
"min_uptime": [15, 15, 6, 5, 5, 4, 3, 3, 1, 1],
"min_downtime":[9, 8, 7, 4, 3, 2, 2, 2, 1, 1],
"ramp_up": [212, 150, 101.2, 94.8, 58, 50, 40, 60, 20, 12],
"ramp_down": [183, 198, 95.6, 101.7, 77.5, 60, 24, 45, 20, 12],
"start_cost": [5000, 4550, 1320, 1291, 1280, 1105, 560, 554, 300, 250],
"fixed_cost": [208.61, 117.37, 174.12, 172.75, 95.353, 144.52, 54.417, 54.551, 79.638, 16.259],
"variable_cost": [22.536, 31.985, 70.5, 69, 32.146, 54.84, 40.222, 40.522, 116.33, 76.642],
}
df_units = DataFrame(ucp_raw_unit_data, index=all_units)
print(df_units.index)
print(df_units["coal1"])
# Add a derived co2-cost column by merging with df_energies
# Use energy key from units and index from energy dataframe
df_up = pd.merge(df_units, df_energy, left_on="energy", right_index=True)
df_up.index.names=['units']
# Display first rows of new 'df_up' Data Frame
df_up.head()
| true |
2b088b8cfe9eefd1e3d3e6c5fd288cc2019c2e0f | Python | Alex-McEvoy/Sprint-Challenge--Graphs | /graph_dfs_debug/graph.py | UTF-8 | 1,840 | 3.59375 | 4 | [] | no_license | """
Simple graph implementation compatible with BokehGraph class.
"""
class Vertex:
def __init__(self, label, component=-1):
self.label = str(label)
self.component = component
def __repr__(self):
return 'Vertex: ' + self.label
"""Trying to make this Graph class work..."""
class Graph:
def __init__(self):
self.vertices = {}
self.components = 0
def add_vertex(self, vertex, edges=()):
self.vertices[vertex] = set(edges)
def add_edge(self, start, end, bidirectional=True):
self.vertices[start].add(end)
if bidirectional:
self.vertices[end].add(start)
def dfs(self, start, target=None):
stack = []
stack.append(start)
visited = []
while len(stack) > 0:
current = stack.pop()
visited.append(current)
# Remove "if current = target" since it will never be true, target will be an integer, current a Vertex object
stack.extend([vertex for vertex in self.vertices[current] if vertex not in visited and vertex not in stack])
return visited
def graph_rec(self, start, visited = []):
visited.append(start)
for vertex in self.vertices[start]:
print("Visited", visited)
if vertex not in visited:
self.graph_rec(vertex, visited)
return visited
def find_components(self):
visited = set()
current_component = 0
for vertex in self.vertices:
if vertex not in visited:
reachable = self.dfs(vertex)
for other_vertex in reachable:
other_vertex.component = current_component
current_component += 1
visited.update(reachable)
self.components = current_component | true |
ad1c3bb9903c229ba95550e609f8d214ae4e4455 | Python | Gabriel3421/RBF_separator_plane | /Plano_Separador_RBF.py | UTF-8 | 4,054 | 3.25 | 3 | [
"MIT"
] | permissive | '''
Aluno: Gabriel de Souza Nogueira da Silva
Matricula: 398847
'''
import time
import re
import random
import numpy as np
import math
from scipy import stats
from sklearn.cluster import KMeans
import matplotlib.pyplot as plp
num_neuronio_oculto = 100
centroides = []
cont = 0
x1 = []
x2 = []
y = []
mat_att_treino = np.ones((1001, 2))
mat_resp_treino = np.ones((1001, 1))
mat_plano_separador = np.ones((100, 2))
dados = open("twomoons.dat", "r")
for line in dados:
# separando o que é x do que é d
line = line.strip() # quebra no \n
line = re.sub('\s+', ',', line) # trocando os espaços vazios por virgula
xa, xb, y1 = line.split(",") # quebra nas virgulas e retorna 3 valores
x1.append(float(xa))
x2.append(float(xb))
y.append(float(y1))
dados.close()
def cria_mat_all():#pega os dados extraidos da base e monta uma matriz com todos eles
mat = np.ones((1001, 3))
for i in range(0, 1001):
mat[i][0] = x1[i]
mat[i][1] = x2[i]
mat[i][2] = y[i]
#mistura todas as linhas da matriz
mat = np.random.permutation(mat)
return mat
def cria_centroides():#cria os centroides usando kmeans
global centroides
mat_all = cria_mat_all()
mat_dados = np.ones((1001, 2))
#extraindo somente os dados da minha matriz com toda a base
for i in range(0, 1001):
for j in range(0, 2):
mat_dados[i][j] = mat_all[i][j]
kmeans = KMeans(n_clusters=num_neuronio_oculto,
random_state=0).fit(mat_dados)
centroides = kmeans.cluster_centers_
def cria_mat_att_e_resp_treino_e_teste():
global mat_att_treino, mat_resp_treino
mat_all = cria_mat_all()
# TREINO
for i in range(0, 1001):
for j in range(0, 2):
mat_att_treino[i][j] = mat_all[i][j]
for i in range(0, 1001):
mat_resp_treino[i][0] = mat_all[i][2]
def neuronios_ocultos():#passa todas as entradas pelas funçoes de ativação e retorna uma matiz com os resultados
global centroides
G = np.ones((1001, num_neuronio_oculto+1))
#Calculando as saidas dos neuronio ocultos
for j in range(0, 1001):
for k in range(1, num_neuronio_oculto+1):
G[j][k] = math.exp((-1)*(((mat_att_treino[j][0] - centroides[k-1][0])**2
+ (mat_att_treino[j][1] - centroides[k-1][1])**2)))
return G
def neuronio_saida_W():# gerando os W(pesos) da camada de saida
global mat_resp_treino
G = neuronios_ocultos()
d = mat_resp_treino
W = np.dot(np.dot(np.linalg.inv(
np.dot(np.transpose(G), G)), np.transpose(G)), d)
return W
def testa():
global num_neuronio_oculto
G = np.ones((1, num_neuronio_oculto+1))
x = []
y = []
aux = 0
print('Calculando valores para varrer a area!!!')
while aux <= 7:
aux += 0.028
x.append(aux)
aux = 2.3
while aux <= 4.8:
aux += 0.01
y.append(aux)
#time.sleep(0.8)
print('Valores Calculados')
print('Gerando grafico de saida...')
W = neuronio_saida_W()
x_ = []
y_ = []
for i in range(0, len(x)):
for j in range(0, len(y)):
for k in range(1, num_neuronio_oculto+1):
G[0][k] = math.exp((-1)*((x[i] - centroides[k-1][0])**2
+ (y[j] - centroides[k-1][1])**2))
resp_rede = np.dot(G, W)
# se entrar no if é pq a rede está em duvida sobre akele ponto entao eu o salvo para exibi-los depois
if resp_rede <= 0.05 and resp_rede >= -0.05:
x_.append(x[i])
y_.append(y[j])
#pegando os valores da base para a plotagem
X1 = x1[0:501]
Y1 = x2[0:501]
X2 = x1[502:]
Y2 = x2[502:]
#Plotando o "hiperplano" separador
plp.title("Num. neurônios ocultos: "+ str(num_neuronio_oculto))
plp.plot(x_, y_, color='black')
plp.scatter(X1, Y1, marker=".", color='red')
plp.scatter(X2, Y2, marker=".")
plp.show()
cria_centroides()
cria_mat_att_e_resp_treino_e_teste()
testa()
| true |
4c43694c8b4609adcb7bd40b1a78a4e07c1b705f | Python | evg-cv/StampDetectorArduino | /src/stamp/detector.py | UTF-8 | 3,402 | 2.5625 | 3 | [] | no_license | import tensorflow as tf
import cv2
import numpy as np
import time
from settings import STAMP_MODEL_PATH, CONFIDENCE, CUR_DIR, DETECTION_REGION
class StampDetector:
def __init__(self):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(STAMP_MODEL_PATH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=detection_graph)
self.image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
self.boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
self.scores = detection_graph.get_tensor_by_name('detection_scores:0')
self.classes = detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = detection_graph.get_tensor_by_name('num_detections:0')
def detect_objects(self, image_np):
# Expand dimensions since the models expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
return self.sess.run([self.boxes, self.scores, self.classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
def detect_from_images(self, frame, stamp_top_ret=False):
if stamp_top_ret:
frame = frame[DETECTION_REGION[1]:DETECTION_REGION[3], DETECTION_REGION[0]:DETECTION_REGION[2]]
[frm_height, frm_width] = frame.shape[:2]
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
st_time = time.time()
(boxes, scores, classes, _) = self.detect_objects(frame_rgb)
print(f"detection time: {time.time() - st_time}")
print(scores[0][:3])
detected_rect_list = []
detected_scores = []
for i in range(len(scores[0])):
if scores[0][i] >= CONFIDENCE:
left, top = int(boxes[0][i][1] * frm_width), int(boxes[0][i][0] * frm_height)
right, bottom = int(boxes[0][i][3] * frm_width), int(boxes[0][i][2] * frm_height)
if stamp_top_ret:
detected_rect_list.append([left + DETECTION_REGION[0], top + DETECTION_REGION[1],
right + DETECTION_REGION[0], bottom + DETECTION_REGION[1]])
else:
detected_rect_list.append([left, top, right, bottom])
detected_scores.append(scores[0][i])
# cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 1)
# cv2.imshow("Stamps", cv2.resize(frame, None, fx=0.5, fy=0.5))
# cv2.waitKey()
# max_detected_stamp_rect = detected_rect_list[detected_scores.index(max(detected_scores))]
return detected_rect_list, detected_scores
if __name__ == '__main__':
import glob
import os
stamp_detector = StampDetector()
# rect_len = stamp_detector.detect_from_images(frame=cv2.imread("))
img_files = glob.glob(os.path.join(CUR_DIR, 'new model', 'Bottom', "*.png"))
for i_file in img_files:
rect_len, _ = stamp_detector.detect_from_images(frame=cv2.imread(i_file))
if len(rect_len) >= 2:
print(f"[WARN] {i_file}: {rect_len}")
| true |
d2485c46bd96acec73b3bfc39ac1c049e0b83d31 | Python | lnarasim/250_problems | /pyproblems/sep_odd_eve.py | UTF-8 | 739 | 3.890625 | 4 | [] | no_license | '''This program seperates odd and even number from the list'''
from pyproblems.utility import is_int
def sep_odd_eve(list_int):
'''when a list containing set of integers is passed, this function returns
two tuple within a tuple one containing odd integers and the other containing even integers'''
if not isinstance(list_int, list):
raise TypeError("unsupported format, pass a list")
for i in list_int:
if not is_int(i):
raise TypeError("unsupported format, pass integers inside the list")
odd_list = []
even_list = []
for i in list_int:
if i % 2 != 0:
odd_list.append(i)
else:
even_list.append(i)
return tuple(odd_list), tuple(even_list)
| true |
03ba6ec1fbdc8812d0b629f9cfef7610919b3060 | Python | Air-xin/exercise_ftp | /ftp_server.py | UTF-8 | 2,804 | 3.078125 | 3 | [] | no_license | """
ftp 文件服务器
【1】 分为服务端和客户端,要求可以有多个客户端同时操作。
【2】 客户端可以查看服务器文件库中有什么文件。
【3】 客户端可以从文件库中下载文件到本地。
【4】 客户端可以上传一个本地文件到文件库。
【5】 使用print在客户端打印命令输入提示,引导操作
"""
# 服务端
from socket import *
from threading import Thread
import sys, os
import time
ADDR = ("0.0.0.0", 8800)
DIR = "./file_dir/"
class MyThread(Thread):
def __init__(self, tcp_connect):
super().__init__()
self.tcp_connect = tcp_connect
self.file_list = os.listdir(DIR)
def do_list(self):
if not self.file_list:
self.tcp_connect.send(b"N")
else:
self.tcp_connect.send(b"Y")
time.sleep(0.1)
data = "\n".join(self.file_list)
self.tcp_connect.send(data.encode())
def put_file(self):
data = self.tcp_connect.recv(20).decode()
if data not in self.file_list:
self.tcp_connect.send(b"N")
else:
f = open(DIR + data, 'rb')
self.tcp_connect.send(b"Y")
time.sleep(0.1)
while True:
data = f.read(1024)
if len(data) < 1024:
self.tcp_connect.send(data)
f.close()
break
else:
self.tcp_connect.send(data)
def get_file(self):
data = self.tcp_connect.recv(20).decode()
if data in self.file_list:
self.tcp_connect.send(b"Y")
msg = self.tcp_connect.recv(20).decode()
if msg == 'N':
return
else:
self.tcp_connect.send(b"N")
f = open(DIR + data, 'wb')
while True:
file = self.tcp_connect.recv(1024)
if len(data) < 1024:
f.write(file)
f.close()
break
f.write(file)
def run(self):
while True:
data = self.tcp_connect.recv(20).decode()
if not data:
break
if data == "L":
self.do_list()
elif data == 'G':
self.put_file()
elif data == 'P':
self.get_file()
elif data == 'Q':
break
def main():
tcp_sock = socket()
tcp_sock.bind(ADDR)
tcp_sock.listen(2)
while True:
try:
tcp_connect, addr = tcp_sock.accept()
print("客户端地址:", addr)
except:
sys.exit("服务退出")
t = MyThread(tcp_connect)
t.setDaemon(True)
t.start()
if __name__ == '__main__':
main()
| true |
e9c18b214a13ba757f72ac24a4f03c4b9ad6054c | Python | WeberJulian/3D-scanner | /Optimization.py | UTF-8 | 1,085 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 29 13:27:34 2018
@author: Julian Weber
"""
import PointCloudHandler as pch
import numpy as np
def moveVect(w, n):
vect = [0,0,0,0,0,0]
vect[n] = w[n]
return vect
def gradiant(cloud, cloud2, every, w, prev):
distance = []
for i in range(6):
dist = (prev - pch.evaluation(cloud, pch.move(cloud2, moveVect(w,i)), every))/w[i]
distance.append(dist)
return np.array(distance)
def gradiantDescent(w, n, threshold, every, cloud, cloud2, maxIteration):
i = 0
newDist = pch.evaluation(cloud, pch.move(cloud2,w), 100)
prevDist = newDist
history = [w]
print("itération %d, loss : %f"%(i, newDist))
while prevDist >= newDist and i < maxIteration and newDist > threshold:
w = w - n * gradiant(cloud, cloud2, every, w, newDist)
prevDist = newDist
newDist = pch.evaluation(cloud, pch.move(cloud2,w), 100)
history.append(w)
i += 1
print("itération %d, loss : %f"%(i, newDist))
return history[i-1]
| true |
5263f34f84791ccd314e528b7afa75043e53724a | Python | SarthakPati-programmer/Best_Out_Of_Best_In_Python-Calculator | /Calculator(version-9.0).py | UTF-8 | 14,977 | 3.78125 | 4 | [] | no_license | from tkinter import *
from math import *
def btnclick(nums):
try:
global operator
operator = operator + str(nums)
text_input.set(operator)
except Exception:
text_input.set("Input Error!!!")
# This function will allow users to click on a button and it will display that on screen of calculator.
# text_input is a variable being used to print values on calculator screen.
# Operator is an empty variable being used to assign new values.
def btncleardis():
global operator
try:
operator = ""
text_input.set("")
except Exception:
text_input.set("Input Error!!!")
# This function clears the display.
def btndecimal():
global operator
try:
s="."
operator=operator+str(s)
text_input.set(operator)
except Exception:
text_input.set("Input Error!!!")
#This will implement the decimal button.
def square():
global operator
try:
o="**2"
operator=str(round(eval(operator+o),5))
text_input.set(operator)
except Exception:
text_input.set("Input Error!!!")
# This will do square of numbers.
def power():
global operator
try:
po="**"
operator=operator+po
text_input.set("^")
except Exception:
text_input.set("Input Error!!!")
# This will set the power/exponent.
def sqroot():
global operator
try:
so="**(1/2)"
operator=operator+so
text_input.set("√")
except Exception:
text_input.set("Input Error!!!")
# This is for square root button.
def percent():
global operator
try:
mn="/100"
operator=operator+mn
text_input.set("%")
except Exception:
text_input.set("Input Error!!!")
# Percentage button`s function.
def tenpow():
global operator
try:
mno="*(10)**"
operator=operator+mno
text_input.set("*10^x")
except Exception:
text_input.set("Input Error!!!")
# This is for scientific *10 to the power n.
def facto():
global operator, op
try:
a=1
for i in range(1,(int(operator)+1)):
if operator==0:
a=1
else:
a=a*i
sumup=a
if int(operator)>=0:
text_input.set(sumup)
op=sumup
else:
text_input.set("Input Error!!!")
except Exception:
text_input.set("Input Error!!!")
operator=""
# Factorial button will use this function.
def pie():
global operator
import math
try:
operator=operator+str(math.pi)
text_input.set("π")
except Exception:
text_input.set("Input Error!!!")
# It is used to obtain π
def tpie():
global operator
import math
try:
operator=operator+str(2*math.pi)
text_input.set("2π")
except Exception:
text_input.set("Input Error!!!")
# This will give 2π
def eul():
global operator
import math
try:
operator=operator+str(math.e)
text_input.set("e")
except Exception:
text_input.set("Input Error!!!")
# This function gives euler`s number e
def sine():
global operator,op
import math
try:
sumup=str(round(sin(radians(float(operator))),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Sine of an angle
def cosine():
global operator,op
import math
try:
sumup=str(round(cos(radians(float(operator))),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Cosine of an angle
def tangent():
global operator,op
import math
try:
sumup=str(round(tan(radians(float(operator))),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Tangent of an angle
def isine():
global operator,op
import math
try:
sumup=str(round(degrees(asin(float(operator))),3))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Sine Inverse of a value
def icosine():
global operator,op
import math
try:
sumup=str(round(degrees(acos(float(operator))),3))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Cosine Inverse of a value
def itangent():
global operator,op
import math
try:
sumup=str(round(degrees(atan(float(operator))),3))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Tangent Inverse of a value
def log_10():
global operator,op
import math
try:
sumup=str(round(log10(float(operator)),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Log(base 10) of a value
def hsine():
global operator,op
import math
try:
sumup=str(round(sinh(float(operator)),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Hyperbolic Sine of a value
def hcosine():
global operator,op
import math
try:
sumup=str(round(cosh(float(operator)),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Hyperbolic Cosine of a value
def htangent():
global operator,op
import math
try:
sumup=str(round(tanh(float(operator)),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Hyperbolic Tangent of a value
def log_e():
global operator,op
import math
try:
sumup=str(round(log(float(operator),math.e),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
operator=""
# Returns Log(base e) of a value
def croot():
global operator,op
try:
sumup=str(round(eval(operator+"**(1/3)"),5))
op=sumup
text_input.set(sumup)
except Exception:
text_input.set("Input Error!!!")
# Returns cube root of a number
def btnEqualsInput():
global operator,op
try:
sumup =str(round(eval(operator),5))
text_input.set(sumup)
op=sumup
except Exception:
text_input.set("Input Error!!!")
operator = ""
# Equal to operator is being assingned.
# Round function will round up the answer to desired places of decimal.
# Try..Except prevents calculation error in strings and division by zero.
def His():
global op, operator
try:
operator=operator+str(op)
text_input.set(operator)
except Exception:
text_input.set("Input Error!!!")
# It Will show the previous result obtained(History).
cal = Tk()
cal.title("Calculator")
# Tiltle is being given
operator = ""
# Empty operator
n=""
text_input = StringVar()
# It will hold the display value.
txtDisplay=Entry(cal,font=('aerial',20,'bold'),textvariable=text_input,bd=30,fg="red",insertwidth=1,bg="Indigo",width=84,justify='right').grid(columnspan=16)
# This will be used to show display.
# fg=textcolour of output
# Font`ll show font size, style etc.
# textvariable is the input command where text_input is assigned.
# bd=display box size
# insert width determines the width of the outer box(excluding display)
# bg=Background colour
# justify is set as right
# columnspan=width of display box
# Width=Width of the input box
btn1=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="0 ",bg='Dark Blue',command=lambda:btnclick(0)).grid(row=1,column=0)
btn2=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="1 ",bg='Dark Blue',command=lambda:btnclick(1)).grid(row=1,column=1)
btn3=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="2 ",bg='Dark Blue',command=lambda:btnclick(2)).grid(row=1,column=2)
btn4=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" + ",bg='Dark Blue',command=lambda:btnclick("+")).grid(row=1,column=3)
btn5=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="3 ",bg='Dark Blue',command=lambda:btnclick(3)).grid(row=2,column=0)
btn6=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="4 ",bg='Dark Blue',command=lambda:btnclick(4)).grid(row=2,column=1)
btn7=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="5 ",bg='Dark Blue',command=lambda:btnclick(5)).grid(row=2,column=2)
btn8=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" - ",bg='Dark Blue',command=lambda:btnclick("-")).grid(row=2,column=3)
btn9=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="6 ",bg='Dark Blue',command=lambda:btnclick(6)).grid(row=3,column=0)
btn10=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="7 ",bg='Dark Blue',command=lambda:btnclick(7)).grid(row=3,column=1)
btn11=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="8 ",bg='Dark Blue',command=lambda:btnclick(8)).grid(row=3,column=2)
btn12=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" * ",bg='Dark Blue',command=lambda:btnclick("*")).grid(row=3,column=3)
btn13=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="9 ",bg='Dark Blue',command=lambda:btnclick(9)).grid(row=4,column=0)
btn14=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" . ",bg='Dark Blue',command=lambda:btndecimal()).grid(row=4,column=2)
btn15=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" % ",bg='Dark Blue',command=lambda:percent()).grid(row=2,column=6)
btn16=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" / ",bg='Dark Blue',command=lambda:btnclick("/")).grid(row=4,column=3)
btn17=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" C ",bg='Dark Blue',command=lambda:btncleardis()).grid(row=1,column=4)
btn18=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" = ",bg='Dark Blue',command=lambda:btnEqualsInput()).grid(row=2,column=4)
btn19=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" ( ",bg='Dark Blue',command=lambda:btnclick("(" )).grid(row=3,column=4)
btn20=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" ) ",bg='Dark Blue',command=lambda:btnclick(")")).grid(row=4,column=4)
btn21=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" ^ ",bg='Dark Blue',command=lambda:power()).grid(row=1,column=5)
btn22=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="2√",bg='Dark Blue',command=lambda:sqroot()).grid(row=3,column=5)
btn23=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="^2",bg='Dark Blue',command=lambda:square()).grid(row=2,column=5)
btn24=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="*10^x",bg='Dark Blue',command=lambda:tenpow()).grid(row=1,column=6)
btn25=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="His",bg='Dark Blue',command=lambda:His()).grid(row=4,column=8)
btn26=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="G.I.F",bg='Dark Blue',command=lambda:btnclick("//")).grid(row=1,column=7)
btn27=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" MOD ",bg='Dark Blue',command=lambda:btnclick("%")).grid(row=3,column=6)
btn28=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" Fact! ",bg='Dark Blue',command=lambda:facto()).grid(row=4,column=6)
btn29=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="00",bg='Dark Blue',command=lambda:btnclick("00")).grid(row=4,column=1)
btn30=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" π ",bg='Dark Blue',command=lambda:pie()).grid(row=2,column=7)
btn31=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text=" e ",bg='Dark Blue',command=lambda:eul()).grid(row=3,column=7)
btn32=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="2π__",bg='Dark Blue',command=lambda:tpie()).grid(row=4,column=7)
btn33=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="sin",bg='Dark Blue',command=lambda:sine()).grid(row=1,column=8)
btn34=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="cos",bg='Dark Blue',command=lambda:cosine()).grid(row=2,column=8)
btn35=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="tan",bg='Dark Blue',command=lambda:tangent()).grid(row=3,column=8)
btn36=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="3√",bg='Dark Blue',command=lambda:croot()).grid(row=4,column=5)
btn33=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="inv(sin)",bg='Dark Blue',command=lambda:isine()).grid(row=1,column=9)
btn34=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="inv(cos)",bg='Dark Blue',command=lambda:icosine()).grid(row=2,column=9)
btn35=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="inv(tan)",bg='Dark Blue',command=lambda:itangent()).grid(row=3,column=9)
btn36=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="log_10()",bg='Dark Blue',command=lambda:log_10()).grid(row=4,column=9)
btn37=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="sinh()",bg='Dark Blue',command=lambda:hsine()).grid(row=1,column=10)
btn38=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="cosh()",bg='Dark Blue',command=lambda:hcosine()).grid(row=2,column=10)
btn39=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="tanh()",bg='Dark Blue',command=lambda:htangent()).grid(row=3,column=10)
btn40=Button(cal,padx=16,pady=16,bd=8,fg='Red',font=('aerial',20,'bold'),text="log_e()",bg='Dark Blue',command=lambda:log_e()).grid(row=4,column=10)
# padx=length of button in its x axis and pady=length of button in its y axis.
# text shows the value in its corresponding button.
# command gives a command to the calculator when that button is pressed.
# grid is used to make buttons of the desired size.
cal.mainloop()
# mainloop is made to run and calculator does its work.
quit(0)
| true |
48647814a800e541865354a98b1462791875f29f | Python | ndjman7/fastcampus_web | /python/Day4/Compare.py | UTF-8 | 215 | 3.171875 | 3 | [] | no_license | # 1. 정확한 비교 => type
# 2. 상속? => isinstance(객체, 클래스) True or False 값 반환
# 3. 상속? => issubclass(클래스, 클래스) True or False 값 반환
print(isinstance("RaDaeJin",str))
| true |
c0c4b6d7ca202b163122c06b3e6e0f17a98bb6d8 | Python | klaus2015/py_base | /code/day11/作业2老师版.py | UTF-8 | 1,167 | 4.5 | 4 | [] | no_license | """
4. 请用面向对象思想,描述以下场景:
玩家(攻击力)攻击敌人(血量),敌人受伤(掉血),还可能死亡(掉装备,加分)。
敌人(攻击力)攻击玩家,玩家(血量)受伤(掉血/碎屏),还可能死亡(游戏结束)。
"""
class Player:
def __init__(self,atk,hp):
self.atk = atk
self.hp = hp
def attack(self,other):
print("玩家攻击敌人")
other.damage(self.atk)
def damage(self,value):
print("玩家受伤")
self.hp -= value
if self.hp <= 0:
self.__death()
def __death(self):
print("玩家死亡")
print("游戏结束")
class Enemy:
def __init__(self, atk, hp):
self.atk = atk
self.hp = hp
def damage(self,value):
print("敌人受伤")
self.hp -= value
if self.hp <= 0:
self.__death()
def __death(self):
print("死亡")
print("掉装备")
print("加分")
def attack(self,other):
print("敌人攻击玩家")
other.damage(self.atk)
p01 = Player(100,1000)
e01 = Enemy(10,200)
p01.attack(e01)
| true |
e2f651f3b364b37c175fd257ecc2006ef724c785 | Python | gotostack/python-design-pattern | /samples/behavior_pattern/09_visitor.py | UTF-8 | 1,811 | 3.59375 | 4 | [] | no_license | print '---------------------------------1--------------------------------'
class Person:
def Accept(self, visitor):
pass
class Man(Person):
def Accept(self, visitor):
visitor.GetManConclusion(self)
class Woman(Person):
def Accept(self, visitor):
visitor.GetWomanConclusion(self)
class Action:
def GetManConclusion(self, concreteElementA):
pass
def GetWomanConclusion(self, concreteElementB):
pass
class Success(Action):
def GetManConclusion(self, concreteElementA):
print "A man"
def GetWomanConclusion(self, concreteElementB):
print "A woman"
class Failure(Action):
def GetManConclusion(self, concreteElementA):
print "A failed man"
def GetWomanConclusion(self, concreteElementB):
print "A failed woman"
class ObjectStructure:
def __init__(self):
self.plist = []
def Add(self, p):
self.plist = self.plist+[p]
def Display(self, act):
for p in self.plist:
p.Accept(act)
def test1():
os = ObjectStructure()
os.Add(Man())
os.Add(Woman())
sc = Success()
os.Display(sc)
fl = Failure()
os.Display(fl)
test1()
print '---------------------------------2--------------------------------'
class Visitor(object):
def visit(self, sub):
pass
class MyVisitor(Visitor):
def visit(self, sub):
print "visit the subject: " + sub.getSubject()
class Subject(object):
def accept(self, visitor):
pass
def getSubject(self):
pass
class MySubject(Subject):
def accept(self, visitor):
visitor.visit(self)
def getSubject(self):
return "love"
def test2():
visitor = MyVisitor()
sub = MySubject()
sub.accept(visitor)
test2()
| true |
152e4e6d4c1b20a2cca9c11e3d93ab166fc298e0 | Python | carldnelson/untitled2 | /audio test.py | UTF-8 | 846 | 3.765625 | 4 | [] | no_license | from pydub import AudioSegment
from pydub.playback import play
song = AudioSegment.from_file("westside.mp3", format("mp3"))
# play(song[1500:3000])
# play(song[:1500])
# play(song[1500:3000])
# play(song[1500:3000])
# initializing number
num = 2519
# printing number
print("The original number is " + str(num))
# using list comprehension
# to convert number to list of integers
digits = [int(x) for x in str(num)]
# printing result
print("The list from number is " + str(digits))
# Build a sentence from the list
# Thousands
Thousands = int(digits[-4])
print(Thousands)
# Hundreds
Hundreds = digits[-3]
print(Hundreds)
# Tens
Tens = 10 * digits[-2] + digits[-1]
print(Tens)
Sentence = ""
if Thousands != 0:
Sentence += str(Thousands) + " Thousand"
if Hundreds != 0:
Sentence += str(Hundreds) + " Hundred"
print(Sentence)
| true |
6d02bb051f26db7b8026851d014dc00e0a65e0fc | Python | AnarCoSol/WildCat | /Components/Deprecated/html_monitor/toolbox/Modules/status_bar.py | UTF-8 | 3,323 | 2.859375 | 3 | [] | no_license | import sys
import time
import signal
class Status_bar():
def __init__(self,
comment = str(),
b_comment = str(),
progress = ["|","/" ,"-","\\"],
dots = [".","..","...","...."],
i = int(),
k = 3
):
self.progress = progress
self.b_comment = b_comment
self.comment = comment
self.dots = dots
self.k = k
self.i = i
def input_timeout(self, prompt = str(), time_out = int(), func = None):
class AlarmException(Exception):
pass
def alarmHandler(signum, frame):
raise AlarmException
def nonBlockingRawInput(prompt=str(), timeout=int(), func = None):
signal.signal(signal.SIGALRM, alarmHandler)
signal.alarm(timeout)
try:
if not func:
text = raw_input(prompt)
else:
text = func(prompt)
signal.alarm(0)
return text
except AlarmException:
pass
except KeyboardInterrupt:
return "KeyboardInterrupt"
except EOFError:
pass
signal.signal(signal.SIGALRM, signal.SIG_IGN)
return None
text = nonBlockingRawInput(prompt, time_out, func)
return text
def rotate(self):
rotation = "\r" + self.b_comment + "[%s] " % self.progress[self.i] + self.comment + "%s" % self.dots[self.i]
sys.stdout.write(rotation)
sys.stdout.flush()
if self.i < self.k:
self.i += 1
else:
self.i = 0
return rotation
def rotate_in(self, wait_time = int()):
rotation = "\r" + self.b_comment + "[%s] " % self.progress[self.i] + self.comment + "%s" % self.dots[self.i]
sys.stdout.write(rotation)
sys.stdout.flush()
key_in = self.input_timeout("", int(wait_time))
if key_in == "KeyboardInterrupt":
exit()
if self.i < self.k:
self.i += 1
else:
self.i = 0
#return rotation
def __test__(self):
while True:
self.b_comment = time.ctime() + " "
self.rotate()
time.sleep(1)
if __name__ == "__main__":
s = Status_bar("rotating")
s.__test__()
| true |
4e31b4bfab55515d343bc912e15071bcdd254905 | Python | jombooth/wicycle | /scripts/showNearbyNetworks.py | UTF-8 | 1,355 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python
import subprocess, os, sys, json
def py_grep(buf, s):
def py_grep_w(buf, s, __outputs__):
while len(buf) > 0 and buf[:len(s)] != s:
buf=buf[1:]
if buf[:len(s)] == s:
__outputs__.append(buf.split('\n')[0])
buf = "\n".join(buf.split('\n')[1:])
if buf != "":
return py_grep_w(buf, s, __outputs__)
return __outputs__
return py_grep_w(buf, s, [])
dev_null = open(os.devnull, 'w')
networks_raw = subprocess.check_output(["iwlist", "scan"], stderr=dev_null).split("wlan1")[0]
networks_raw_list = networks_raw.split("Cell")[1:]
networks_as_dicts = []
for network in networks_raw_list:
d = {}
try:
d["SSID"] = py_grep(network, "ESSID:")[0].split(':')[1][1:-1]
if d["SSID"] == "":
d["SSID"] = "<hidden>"
d["Encrypted"] = "off" not in py_grep(network, "Encryption key:")[0]
if d["Encrypted"]:
d["EncryptionType"] = py_grep(network, "IE:")[0].split(':')[1].strip()
else:
d["EncryptionType"] = "Unencrypted"
d["SignalStrength"] = py_grep(network, "Quality=")[0].split(' ')[0].split('=')[1]
networks_as_dicts.append(d)
except:
print >> sys.stderr, sys.exc_info()
print json.dumps(networks_as_dicts)
| true |
55d62020a7f4fb71cc7306e6bce5397718e11f53 | Python | dw-liedji/spiking-space-radio | /utils/modulator.py | UTF-8 | 3,068 | 2.671875 | 3 | [] | no_license | import numpy as np
from scipy.interpolate import interp1d
class AsynchronousDeltaModulator():
def __init__(self, thrup, thrdn, resampling_factor):
self.thrup = thrup
self.thrdn = thrdn
self.resampling_factor = resampling_factor
self.time_length = None
self.time_resampled = None
self.vin = None
self.rec = None
self.up = None
self.dn = None
self.time_step = None
def interpolate(self, time, vin):
self.time_resampled, self.time_step = np.linspace(np.min(time), np.max(time), num=len(vin)*self.resampling_factor, endpoint=True, retstep=True)
self.vin = interp1d(time, vin, kind='linear')(self.time_resampled)
self.time_length = len(self.vin)
def encode(self):
self.up = np.zeros(self.time_length, dtype=bool)
self.dn = np.zeros(self.time_length, dtype=bool)
actual_dc = self.vin[0]
for i in range(self.time_length):
if (actual_dc + self.thrup) < self.vin[i]:
self.up[i] = True
actual_dc = self.vin[i]
elif (actual_dc - self.thrdn) > self.vin[i]:
self.dn[i] = True
actual_dc = self.vin[i]
def decode(self):
actual_dc = self.vin[0]
self.rec = np.zeros_like(self.vin)
for i in range(self.time_length):
if self.up[i]:
actual_dc = actual_dc + self.thrup
if self.dn[i]:
actual_dc = actual_dc - self.thrdn
self.rec[i] = actual_dc
def modulate(admI, admQ, time, sample, resampling_factor=1, stretch_factor=1, reconstruct=False):
admI.interpolate(time, sample[0, :])
admQ.interpolate(time, sample[1, :])
admI.encode()
admQ.encode()
indices = []
times = []
time_stim = np.linspace(np.min(time), np.max(time), num=len(time)*resampling_factor, endpoint=True)
for i in range(admI.time_length):
if admI.up[i]:
indices.append(0)
times.append(time_stim[i])
if admI.dn[i]:
indices.append(1)
times.append(time_stim[i])
if admQ.up[i]:
indices.append(2)
times.append(time_stim[i])
if admQ.dn[i]:
indices.append(3)
times.append(time_stim[i])
signal = np.array([admI.vin, admQ.vin])
indices = np.array(indices)
times = np.array(times)*stretch_factor
time_stim = time_stim*stretch_factor
if reconstruct:
admI.decode()
admQ.decode()
reconstruction = np.array([admI.rec, admQ.rec])
return indices, times, time_stim, signal, reconstruction
else:
return indices, times, time_stim, signal
def reconstruction_error(signal, reconstruction):
if len(signal)!=len(reconstruction):
raise Exception("Signal and reconstruction must have same length")
dim, N = signal.shape
epsilon_rec = np.empty(dim)
for i in range(dim):
epsilon_rec[i] = np.sum((signal[i]-reconstruction[i])**2)/N
return epsilon_rec
| true |
240371e087586597cdb32931b08eafe644dea942 | Python | yaptide/converter | /converter/fluka/parser.py | UTF-8 | 1,048 | 2.796875 | 3 | [] | no_license | from converter.common import Parser
from converter.fluka.input import Input
class FlukaParser(Parser):
"""A simple placeholder that ignores the json input and prints example (default) configs."""
def __init__(self) -> None:
super().__init__()
self.info['simulator'] = 'fluka'
self.info['version'] = 'unknown'
self.input = Input()
def parse_configs(self, json: dict) -> None:
"""Parse energy and number of particles from json."""
# Since energy in json is in MeV and FLUKA uses GeV, we need to convert it.
self.input.energy_GeV = float(json["beam"]["energy"]) * 1e-3
self.input.number_of_particles = json["beam"]["numberOfParticles"]
def get_configs_json(self) -> dict:
"""
Return a dict representation of the config files. Each element has
the config files name as key and its content as value.
"""
configs_json = super().get_configs_json()
configs_json["fl_sim.inp"] = str(self.input)
return configs_json
| true |
6671eaf8efe0f7d3f9a3c3df89604eb79ef32bfe | Python | yehudit96/event_entity_coref_ecb_plus | /src/all_models/models.py | UTF-8 | 11,640 | 2.59375 | 3 | [] | no_license | import math
import numpy as np
import torch
import torch.nn as nn
from model_utils import *
import torch.nn.functional as F
import torch.autograd as autograd
class CDCorefScorer(nn.Module):
'''
An abstract class represents a coreference pairwise scorer.
Inherits Pytorch's Module class.
'''
def __init__(self, word_embeds, word_to_ix, vocab_size, char_embedding, char_to_ix, char_rep_size,
dims, use_mult, use_diff, feature_size, coreferability_type, atten_hidden_size=None):
'''
C'tor for CorefScorer object
:param word_embeds: pre-trained word embeddings
:param word_to_ix: a mapping between a word (string) to
its index in the word embeddings' lookup table
:param vocab_size: the vocabulary size
:param char_embedding: initial character embeddings
:param char_to_ix: mapping between a character to
its index in the character embeddings' lookup table
:param char_rep_size: hidden size of the character LSTM
:param dims: list holds the layer dimensions
:param use_mult: a boolean indicates whether to use element-wise multiplication in the
input layer
:param use_diff: a boolean indicates whether to use element-wise differentiation in the
input layer
:param feature_size: embeddings size of binary features
'''
super(CDCorefScorer, self).__init__()
self.embed = nn.Embedding(vocab_size, word_embeds.shape[1])
self.embed.weight.data.copy_(torch.from_numpy(word_embeds))
self.embed.weight.requires_grad = False # pre-trained word embeddings are fixed
self.word_to_ix = word_to_ix
self.char_embeddings = nn.Embedding(len(char_to_ix.keys()), char_embedding.shape[1])
self.char_embeddings.weight.data.copy_(torch.from_numpy(char_embedding))
self.char_embeddings.weight.requires_grad = True
self.char_to_ix = char_to_ix
self.embedding_dim = word_embeds.shape[1]
self.char_hidden_dim = char_rep_size
self.char_lstm = nn.LSTM(input_size=char_embedding.shape[1], hidden_size=self.char_hidden_dim, num_layers=1,
bidirectional=False)
self.coreferability_type = coreferability_type
# binary features for coreferring arguments/predicates
self.coref_role_embeds = nn.Embedding(2, feature_size)
self.use_mult = use_mult
self.use_diff = use_diff
self.input_dim = dims[0]
self.hidden_dim_1 = dims[1]
self.hidden_dim_2 = dims[2]
self.out_dim = 1
self.hidden_layer_1 = nn.Linear(self.input_dim, self.hidden_dim_1)
self.hidden_layer_2 = nn.Linear(self.hidden_dim_1, self.hidden_dim_2)
self.out_layer = nn.Linear(self.hidden_dim_2, self.out_dim)
if self.coreferability_type == 'linear':
self.coref_input_dim = dims[3]
self.coref_second_dim = dims[4]
self.coref_third_dim = dims[5]
self.hidden_layer_coref_1 = nn.Linear(self.coref_input_dim, self.coref_second_dim)
self.hidden_layer_coref_2 = nn.Linear(self.coref_second_dim, self.coref_third_dim)
self.dropout_coref = nn.Dropout(p=0.2)
elif self.coreferability_type == 'attention':
self.trasformer = FeaturesSelfAttention(vocab_size=20001, hidden_size=atten_hidden_size)
self.attention_features = [0, 3, 5, 6] + list(range(8, 17))
self.model_type = 'CD_scorer'
def forward(self, clusters_pair_tensor):
'''
The forward method - pass the input tensor through a feed-forward neural network
:param clusters_pair_tensor: an input tensor consists of a concatenation between
two mention representations, their element-wise multiplication and a vector of binary features
(each feature embedded as 50 dimensional embeddings)
:return: a predicted confidence score (between 0 to 1) of the mention pair to be in the
same coreference chain (aka cluster).
'''
if self.coreferability_type == 'linear':
coref_features = clusters_pair_tensor[:, :17]
coref_first_hidden = F.relu(self.hidden_layer_coref_1(coref_features))
coref_second_hidden = F.relu(self.hidden_layer_coref_2(coref_first_hidden))
coref_dropout = self.dropout_coref(coref_second_hidden)
clusters_tensor = torch.cat([clusters_pair_tensor[:, 17:], coref_dropout], dim=1)
elif self.coreferability_type == 'attention':
coref_features = clusters_pair_tensor[:, :17]
features_vector = coref_features[:, self.attention_features]
#features_vector = features_vector.type(torch.IntTensor)
attention = self.trasformer(features_vector)
clusters_tensor = torch.cat([clusters_pair_tensor[:, 17:], attention], dim=1)
else:
clusters_tensor = clusters_pair_tensor
first_hidden = F.relu(self.hidden_layer_1(clusters_tensor))
# first_hidden = F.relu(self.hidden_layer_1(clusters_pair_tensor))
second_hidden = F.relu(self.hidden_layer_2(first_hidden))
out = F.sigmoid(self.out_layer(second_hidden))
return out
def init_char_hidden(self, device):
'''
initializes hidden states the character LSTM
:param device: gpu/cpu Pytorch device
:return: initialized hidden states (tensors)
'''
return (torch.randn((1, 1, self.char_hidden_dim), requires_grad=True).to(device),
torch.randn((1, 1, self.char_hidden_dim), requires_grad=True).to(device))
def get_char_embeds(self, seq, device):
'''
Runs a LSTM on a list of character embeddings and returns the last output state
:param seq: a list of character embeddings
:param device: gpu/cpu Pytorch device
:return: the LSTM's last output state
'''
char_hidden = self.init_char_hidden(device)
input_char_seq = self.prepare_chars_seq(seq, device)
char_embeds = self.char_embeddings(input_char_seq).view(len(seq), 1, -1)
char_lstm_out, char_hidden = self.char_lstm(char_embeds, char_hidden)
char_vec = char_lstm_out[-1]
return char_vec
def prepare_chars_seq(self, seq, device):
'''
Given a string represents a word or a phrase, this method converts the sequence
to a list of character embeddings
:param seq: a string represents a word or a phrase
:param device: device: gpu/cpu Pytorch device
:return: a list of character embeddings
'''
idxs = []
for w in seq:
if w in self.char_to_ix:
idxs.append(self.char_to_ix[w])
else:
lower_w = w.lower()
if lower_w in self.char_to_ix:
idxs.append(self.char_to_ix[lower_w])
else:
idxs.append(self.char_to_ix['<UNK>'])
print('can find char {}'.format(w))
tensor = torch.tensor(idxs, dtype=torch.long).to(device)
return tensor
max_value = {
'NE_0.26': 3400,
'chirps_days': 1500,
'chirps_num': 20000,
'chirps_rules_num': 20,
'component_num': 1500,
'day_num': 600,
'entity_ipc': 2000,
'entity_pc': 7000,
'entity_wc': 1000,
'event_ipc': 9000,
'event_pc': 7000,
'in_clique': 5000,
'pairs_num': 9000
}
class FeaturesSelfAttention(nn.Module):
def __init__(self, vocab_size, hidden_size, num_attention_heads=1, attention_probs_dropout_prob=0.1,
hidden_dropout_probs=0.1):
super(FeaturesSelfAttention, self).__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
self.numerical_embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=vocab_size-1)
self.features_embedding = nn.Embedding(13, hidden_size)
# self.embeddings = {i: nn.Embedding(dim, hidden_size) for i, dim in enumerate(max_value.values())}
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
# self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
self.device = torch.cuda.current_device()
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states):
embedding = self.create_features_embedding(hidden_states)
mixed_query_layer = self.query(embedding)
mixed_key_layer = self.key(embedding)
mixed_value_layer = self.value(embedding)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
# attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
batch_vectors = torch.mean(context_layer, dim=1)
return batch_vectors
def create_features_embedding(self, features_vector):
batch_vectors = []
for row in features_vector:
row_features = []
for feature_inx, feature_val in enumerate(row):
feature_val = int(feature_val) if feature_val > -1 else self.numerical_embedding.num_embeddings-1
feature_val = torch.tensor(feature_val).to(self.device)
feature_inx = torch.tensor(feature_inx).to(self.device)
feature_tensor = (self.numerical_embedding(feature_val) + self.features_embedding(feature_inx))/2
feature_tensor = feature_tensor.reshape(1, -1)
row_features.append(feature_tensor)
batch_vectors.append(torch.cat(row_features, dim=-2).unsqueeze(0))
return torch.cat(batch_vectors, dim=0)
| true |
b6970df2b14a12229ed2e2c954416f16b6dfe9fc | Python | RichardFord10/bonus-item-swap | /bonus_item_swap.py | UTF-8 | 3,301 | 2.609375 | 3 | [] | no_license | from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import NoSuchWindowException
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
from getpass import getpass
import pandas as pd
import sys
import csv
import os
import re
# configure webdriver & headless chrome
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
driver = webdriver.Chrome(options = chrome_options, executable_path=r'C:/Users/rford/Desktop/chromedriver/chromedriver.exe')
# current day format
currentDate = datetime.today().strftime('%Y-%m-%d')
#login function
def login(user, pword = str):
driver.get("https://######.com/manager")
Username = driver.find_element_by_id("bvuser")
Password = driver.find_element_by_id("bvpass")
Login = driver.find_element_by_xpath('//*[@id="form1"]/div/div[2]/input')
Username.send_keys(user)
Password.send_keys(pword)
Login.click()
print("Logging In...")
#item ids
sale_ids = [
'70280',
'70281',
'72551',
'72552',
'74968',
'74969',
'75729',
'70271',
'70274',
'70273',
]
old_bonus_item = ['238438']
new_bonus_item = ['238605']
#Result Lists
item_results = []
#start function
def switch_bonus_item():
print('Gathering Information...')
for sale_id in sale_ids:
ignored_exceptions=(NoSuchElementException, StaleElementReferenceException)
driver.get("https://www.#######.com/manager/bonus-item-swap.php?sale_id={}&item_id=238438".format(sale_id))
WebDriverWait(driver, 10)
#Navigate to item
search_text_id = "search_text"
WebDriverWait(driver, 10, ignored_exceptions=ignored_exceptions).until(expected_conditions.presence_of_element_located((By.ID, search_text_id)))
driver.find_element_by_xpath('//*[@id="search_text"]').send_keys('item to search for')
action = ActionChains(driver)
action.double_click(driver.find_element_by_xpath('/html/body/div[2]/div/select/option[2392]')).perform()
new_qty_id = "new_qty"
WebDriverWait(driver, 10, ignored_exceptions=ignored_exceptions).until(expected_conditions.presence_of_element_located((By.ID, new_qty_id)))
driver.find_element_by_xpath('//*[@id="new_qty"]').send_keys('1')
WebDriverWait(driver, 10)
driver.find_element_by_xpath('//*[@id="submit-new-bonus"]').click()
print("Sale {} with bonus item {} has been swapped with bonus item {}".format(sale_id, old_bonus_item, new_bonus_item))
print('All Sale Bonus Items Switched')
#Run
login(input("Enter Username: "), getpass("Enter Password: "))
switch_bonus_item()
| true |
6b5c262d5a951575cd0535b7df8050c50907d2c7 | Python | trevohearn/AppMarketing | /webscraping/webscraping.py | UTF-8 | 7,375 | 2.703125 | 3 | [] | no_license | #Trevor O'Hearn
#5/6/2020
#Python file for webscraping methods
#installs
#!pip install Selenium
import Selenium as sl
from bs4 import BeautifulSoup
import requests
#Get webpage
page = requests.get("http://dataquestio.github.io/web-scraping-pages/simple.html")
bs = BeautifulSoup(page.content, 'html.parser')
#requests attributes
#page.status_code
#page.content
#
### REQUESTS ###
#session handling from requests library
#session = requets.Session()
#session.auth = ('user', 'pass')
#session.headers.update({'x-test' : 'true'})
# #cookies example
# with requests.Session() as s:
# s.get('https://httpbin.org/cookies/set/sessioncookie/123456789')
#or
#session.get('https://example.com/headers', headers = {'x-test2' : true})
#page.headers returns headers
#example of sending a prepped request
# from requests import Request, Session
#
# s = Session()
#
# req = Request('POST', url, data=data, headers=headers)
# prepped = req.prepare()
#
# # do something with prepped.body
# prepped.body = 'No, I want exactly this as the body.'
#
# # do something with prepped.headers
# del prepped.headers['Content-Type']
#
# resp = s.send(prepped,
# stream=stream,
# verify=verify,
# proxies=proxies,
# cert=cert,
# timeout=timeout
# )
#create a session
def session():
return reqeusts.Session()
def authenticate(session, username, password):
session.auth = (username, password)
#get new page
def getPage(url):
#add try catch block with error protection
return requests.get(url)
### SELENIUM METHODS ###
### BEAUTIFULSOUP METHODS ###
#https://www.restapitutorial.com/httpstatuscodes.html
def requestResponse(page):
code = page.status_code
cat = code // 100
spec = code % 100
if (cat == 4): #client error
if (spec == 0):
return 'Client Error'
elif (spec == 1):
return 'Unauthorized'
elif (spec == 2):
return 'payment required'
elif (spec == 3):
return 'forbidden'
elif (spec == 4):
return 'not found'
elif (spec == 5):
return 'method not found'
elif (spec == 6):
return 'not acceptable'
elif (spec == 7):
return 'proxy authentication required'
elif (spec == 8):
return 'request timeout'
elif (spec == 9):
return 'conflict'
elif (spec == 10):
return 'gone'
elif (spec == 11):
return 'length required'
elif (spec == 12):
return 'precondition failed'
elif (spec == 13):
return 'reqest entity too large'
elif (spec == 14):
return 'request-URI too long'
elif (spec == 15):
return 'Unsupported Media Type'
elif (spec == 16):
return 'requested range not satisfiable'
elif (spec == 17):
return 'expectation failed'
elif (spec == 18):
return 'im a teapot (RFC 2324)'
elif (spec == 20):
return 'enhance your calm (twitter)'
elif (spec == 22):
return 'Unprocessable Entity (WebDAV)'
elif (spec == 23):
return 'locked (webDAV)'
elif (spec == 24):
return 'Failed dependency (WebDAV)'
elif (spec == 25):
return 'reserved for webdav'
elif (spec == 26):
return 'upgrade required'
elif (spec == 28):
return 'precondition requried'
elif (spec == 29):
return 'too many requests'
elif (spec == 31):
return 'request header fields too large'
elif (spec == 44):
return 'no response (nginx)'
elif (spec == 49):
return 'retry with microsoft'
elif (spec == 50):
return 'blocked by windows parental controls (microsoft)'
elif (spec == 51):
return 'unavailable for legal reasons'
elif (spec == 99):
return 'client closed request (nginx)'
else:
return 'unkown client error response'
elif (cat == 3): #redirection
if (spec == 0):
return 'multiple choices'
elif (spec == 1):
return 'moved permanently'
elif (spec == 2):
return 'found'
elif (spec == 3):
return 'see other'
elif (spec == 4):
return 'not modified'
elif (spec == 5):
return 'use proxy'
elif (spec == 6):
return 'unused'
elif (spec == 7):
return 'temporary redirect'
elif (spec == 8):
return 'permanent redirect (experimental)'
else:
return 'redirect with other subcategory'
elif (cat == 2): #success
if (spec == 0):
return 'Success'
elif (spec == 1):
return 'Success - created'
elif (spec == 2):
return 'Success - Accepted'
elif (spec == 3):
return 'Success - Non-Authoritative Information'
elif (spec == 4):
return 'Success - No content'
elif (spec == 5):
return 'Success - Reset Content'
elif (spec == 6):
return 'Success - partial content'
elif (spec == 7):
return 'Success - Multi-Status (WebDAV)'
elif (spec == 8):
return 'Success - Already Reported (WebDAV)'
elif (spec == 26):
return 'Success - IM Used'
else
return 'Success - other status code involved'
elif (cat == 1): #informational
if (spec == 0):
return 'Continue'
elif (spec == 1):
return 'switching protocols'
elif (spec == 2):
return 'processing (webDAV)'
else:
return 'general informational other subcategory'
elif (cat == 5): #Server Error
if (spec == 0):
return 'Internal Server Error'
elif (spec == 1):
return 'not implemented'
elif (spec == 2):
return 'bad gateway'
elif (spec == 3):
return 'service unavailable'
elif (spec == 4):
return 'gateway timeout'
elif (spec == 5):
return 'HTTP Version Not Supported'
elif (spec == 6):
return 'variant also negotiates (experimental)'
elif (spec == 7):
return 'insufficient storage (WebDAV)'
elif (spec == 8):
return 'Loop Detected (WebDAV)'
elif (spec == 9):
return 'Bandwidth Limit Exceeded (Apache)'
elif (spec == 10):
return 'Not extended'
elif (spec == 11):
return 'Network Authentication Required'
elif (spec == 98):
return 'Network read timeout error'
elif (spec == 99):
return 'network connect timeout error'
else:
return 'server error - unknown subcategory'
else: #unknown code from http status codes
return 'unknown code error'
#beautiful soup methods
#bs.prettify()
#bs.children
#bs.find_all('p')
#bs.find_all('p', class_='example-stuff')
#bs.find('p')
#bs.find() -> finds entire page
#bs.select('div p') -> uses css tags
#given HTML element
#get parent elements
#return specific child elements
#parse text out of given elements
#change webpage to scrape
| true |
4f6209b5a3914dc4d0661638066d003fa6792c44 | Python | mfigurski80/HydroErosion | /hydro_erosion/erodeLandscape.py | UTF-8 | 2,238 | 2.78125 | 3 | [
"MIT"
] | permissive | #! /home/miko/python/HydroErosion/env/bin/python3
from . utilities import mean
# from viewLandscape import viewMap
def getDeltaHeight(map, x, y):
deltaHeight = []
for i in [-1, 0, 1]:
deltaHeight.append([])
for j in [-1, 0, 1]:
if (
(x + i >= len(map))
or (x + i < 0)
or (y + j >= len(map[0]))
or (y + j < 0)
):
# out of bounds. make sure doesn't get picked
deltaHeight[i + 1].append(10000)
else:
deltaHeight[i + 1].append(map[x + i][y + j] - map[x][y])
if abs(i + j) == 2 and i != 0: # weight corners less
deltaHeight[i + 1][j + 1] *= 0.65 # 1/sqrt(2)
return deltaHeight
# Perform actual erosion operation on map
def erodeWithDrop(map, rockmap, hydrationMap, x, y, carry):
for time in range(25): # drop lifespan = 25
# find lowest surrounding point
deltaHeight = getDeltaHeight(map, x, y)
d_x = 0
d_y = 0
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
if deltaHeight[i + 1][j + 1] < deltaHeight[d_x][d_y]:
d_x = i
d_y = j
fu_x = x + d_x
fu_y = y + d_y
# Perform droplet move
ch_height = map[x][y] - map[fu_x][fu_y] # get base delta height
rockMult = 1 # get multiplier due to bedrock
if map[x][y] - carry * ch_height < rockmap[x][y]:
rockMult = 0.1
rockmap[x][y] = map[x][y] - carry * ch_height * rockMult
map[x][y] -= carry * ch_height * rockMult
map[fu_x][fu_y] += carry * ch_height
x = fu_x
y = fu_y
# set hydrationMap
hydrationMap[x][y] += 1
def erodeMap(heightmap, rockmap, iterate=400, carry=0.15):
hydrationMap = [[0] * len(row) for row in heightmap]
for i in range(iterate):
for x in range(len(heightmap)):
for y in range(len(heightmap[0])):
erodeWithDrop(heightmap, rockmap, hydrationMap, x, y, carry)
return (heightmap, hydrationMap)
| true |
03189d0b87eea254ed27b94056945ad97d28ee16 | Python | MrQubo/wwi-ctf | /tasks/router-stegano/task3.py | UTF-8 | 1,010 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python3
FLAG_FILE = "Flag3-1.webp.gz.xz.lzma"
DELAY = 'delay delay-time=0.030;'
#TWO_BIT = [
# 'beep frequency=200 length=0.01;',
# 'beep frequency=320 length=0.01;',
# 'beep frequency=512 length=0.01;',
# 'beep frequency=820 length=0.01;',
# ]
TWO_BIT = [
'$a;',
'$b;',
'$c;',
'$d;',
]
def encode_byte(x):
return TWO_BIT[x>>6] + \
TWO_BIT[(x>>4)&3] + \
TWO_BIT[(x>>2)&3] + \
TWO_BIT[(x)&3]
with open(FLAG_FILE, 'rb') as f:
data = f.read()
print(':global a do={beep frequency=1000 length=0.02;delay delay-time=0.030}')
print(':global b do={beep frequency=2000 length=0.02;delay delay-time=0.030}')
print(':global c do={beep frequency=4000 length=0.02;delay delay-time=0.030}')
print(':global d do={beep frequency=8000 length=0.02;delay delay-time=0.030}')
for byte in data:
print(encode_byte(byte))
print("# Total time =", len(data) * 0.12, "s.")
| true |
7dbf46bd02c145b37568dfa4d243851a99736637 | Python | Nukesor/encarne | /encarne/db.py | UTF-8 | 746 | 2.640625 | 3 | [
"MIT"
] | permissive | """Helper class to get a database engine and to get a session."""
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_utils.functions import database_exists, create_database
engine = create_engine('sqlite:////var/lib/encarne/encarne.db')
base = declarative_base(bind=engine)
def get_session():
"""Get a new scoped session."""
session = scoped_session(sessionmaker(bind=engine))
return session
def create_db():
"""Create db if it doesn't exist yet."""
db_url = engine.url
if not database_exists(db_url):
create_database(db_url)
base.metadata.create_all()
| true |
18024dbb74c16760cec27f2026484357f8e67093 | Python | ibrahim85/LSTM_TimeSeriesRegression | /Final/RiotAPI.py | UTF-8 | 10,685 | 2.515625 | 3 | [] | no_license | import requests
from collections import deque
import simplejson as json
import glob
import time
import os
import sys
# Based code off of this project: https://github.com/pseudonym117/Riot-Watcher/blob/master/riotwatcher/riotwatcher.py
# Used a smaller version, since I only needed certain parts of the API
class LoLException(Exception):
def __init__(self, error, response):
self.error = error
self.headers = response.headers
def __str__(self):
return self.error
def __eq__(self, other):
if isinstance(other, "".__class__):
return self.error == other
elif isinstance(other, self.__class__):
return self.error == other.error and self.headers == other.headers
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return super(LoLException).__hash__()
error_400 = "Bad request"
error_401 = "Unauthorized"
error_403 = "Blacklisted key"
error_404 = "Game data not found"
error_429 = "Too many requests"
error_500 = "Internal server error"
error_503 = "Service unavailable"
error_504 = 'Gateway timeout'
def raise_status(response):
if response.status_code == 400:
raise LoLException(error_400, response)
elif response.status_code == 401:
raise LoLException(error_401, response)
elif response.status_code == 403:
raise LoLException(error_403, response)
elif response.status_code == 404:
raise LoLException(error_404, response)
elif response.status_code == 429:
raise LoLException(error_429, response)
elif response.status_code == 500:
raise LoLException(error_500, response)
elif response.status_code == 503:
raise LoLException(error_503, response)
elif response.status_code == 504:
raise LoLException(error_504, response)
else:
response.raise_for_status()
def possible(request):
if request.status_code == 400 or request.status_code == 404 or request.status_code == 415:
return "skip"
elif request.status_code == 429:
print(request.headers)
type_limit = request.headers.get("X-Rate-Limit-Type", "proxy")
if type_limit == "proxy":
print("PROXY PROBLEM")
time.sleep(5)
return "wait"
else:
wait_time = request.headers.get("Retry-After", -42)
if wait_time == -42:
print("PROXY PROBLEM")
time.sleep(5)
return "wait"
else:
print(type_limit + " HAS EXCEEDED AND MUST WAIT " + str(wait_time) + " SECONDS." )
time.sleep(int(wait_time))
return "wait"
elif request.status_code == 500 or request.status_code == 503:
return "wait"
elif request.status_code == 200:
return "success"
elif request.status_code == 403:
return "quit"
else:
return "unknown"
# keeps a Deque of times and removes the requests from the front as the time limit decreases
class RateLimiter(object):
def __init__(self, n_made, n_requests, seconds):
self.allowed_requests = n_requests
self.seconds = seconds
self.made_requests = deque()
for i in range(n_made):
self.add_request()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotAPI(object):
def __init__(self, key, limits):
self.api_key = key
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def getMatch(self, matchID):
while not self.can_make_request():
time.sleep(1)
url = "https://na.api.pvp.net/api/lol/na/v2.2/match/" + str(matchID) + "?api_key=" + self.api_key
request = requests.get(url)
check = possible(request)
if check == "skip" or check == "unknown":
print(str(matchID) + " unsuccessful with error" + str(request.status_code))
return None
elif check == "quit":
print("CARE OF BLACKLIST")
sys.exit(0)
elif check == "wait":
while check!="success":
while not self.can_make_request():
time.sleep(1)
request = requests.get(url)
check = possible(request)
for lim in self.limits:
lim.add_request()
return request.json()
def getExtraGames(self, matchJSON, playerID):
timestamp = matchJSON["matchCreation"]
game_ids = []
for participant in matchJSON["participants"]:
if participant["timeline"]["lane"] == "TOP":
game_ids.append(participant["participantId"])
wantedID = 0
pID = 0
champID = 0
for identity in matchJSON["participantIdentities"]:
if identity["participantId"] in game_ids and str(identity["player"]["summonerId"])!=str(playerID):
wantedID = identity["player"]["summonerId"]
pID = identity["participantId"]
break
for participant in matchJSON["participants"]:
if participant["participantId"] == pID:
champID = participant["championId"]
break
return self.getSpecificMatchList(wantedID, champID, timestamp-1)
def getSpecificMatchList(self, summonerID, champID, timestamp):
while not self.can_make_request():
time.sleep(1)
url = "https://na.api.pvp.net/api/lol/na/v2.2/matchlist/by-summoner/" + str(summonerID) + "?championIds=" + str(champID) + "&rankedQueues=TEAM_BUILDER_DRAFT_RANKED_5x5,RANKED_TEAM_5x5&seasons=SEASON2016&endTime=" + str(timestamp) + "&api_key=" + str(self.api_key)
request = requests.get(url)
check = possible(request)
if check == "skip" or check == "unknown":
print(str(summonerID) + " unsuccessful with error" + str(request.status_code))
return None
elif check == "quit":
print("CARE OF BLACKLIST")
sys.exit(0)
elif check == "wait":
while check!="success":
while not self.can_make_request():
time.sleep(1)
request = requests.get(url)
check = possible(request)
for lim in self.limits:
lim.add_request()
return request.json()
def getMatchList(self, summonerID):
while not self.can_make_request():
time.sleep(1)
url = "https://na.api.pvp.net/api/lol/na/v2.2/matchlist/by-summoner/" + str(summonerID) + "?api_key=" + self.api_key
request = requests.get(url)
check = possible(request)
if check == "skip" or check == "unknown":
print(str(summonerID) + " unsuccessful with error" + str(request.status_code))
return None
elif check == "quit":
print("CARE OF BLACKLIST")
sys.exit(0)
elif check == "wait":
while check!="success":
while not self.can_make_request():
time.sleep(1)
request = requests.get(url)
check = possible(request)
for lim in self.limits:
lim.add_request()
return request.json()
def getMatches(API):
players = [66271229, 44207669, 47063254, 56723622, 34399881, 47837396, 19305039, 37348109, 51635691, 28360391]
for i in range(8, len(players)):
directory = "./" + str(players[i])
if not os.path.exists(directory):
os.makedirs(directory)
playerID = players[i]
matchlist = API.getMatchList(playerID)
for match in matchlist["matches"]:
poss_queues = ["TEAM_BUILDER_DRAFT_RANKED_5x5", "RANKED_TEAM_5x5"]
season = "SEASON2016"
lane = "TOP"
matchId = match["matchId"]
if match["queue"] in poss_queues and match["season"] == season and match["lane"] == lane:
realMatch = API.getMatch(matchId)
print(matchId)
with open("./" + str(playerID) + "/" + str(matchId) + ".json", "w") as f:
json.dump(realMatch,f)
def getExtras(API):
players = [66271229, 44207669, 47063254, 56723622, 34399881, 47837396, 19305039, 37348109, 51635691, 28360391]
for i in range(0,1):
player = players[i]
DATA_FILES = glob.glob('./' + str(player) + '/*.json')
for filename in DATA_FILES:
new_folder = filename.split("\\")[1].split(".")[0]
print("~~~~~~~~~~~~~~~")
print(str(player) + " - " + new_folder)
games = None
with open(filename) as data_file:
data = json.load(data_file)
games = API.getExtraGames(data, player)
if games is None or games.get("matches") is None:
continue
count = 0
if not os.path.exists("./" + str(player) + "/" + str(new_folder)):
os.makedirs("./" + str(player) + "/" + str(new_folder))
for match in games["matches"]:
if count>=24:
break
if match["lane"] == "TOP":
count += 1
matchId = match["matchId"]
print(matchId)
realMatch = API.getMatch(matchId)
with open("./" + str(player) + "/" + str(new_folder) + "/" + str(matchId) + ".json", "w") as f:
json.dump(realMatch,f)
def fixNulls(API):
null_file = open("nulls.txt")
for line in null_file:
directory = line[:-1]
if os.path.exists(directory):
os.remove(directory)
print(directory)
matchId = directory.split("/")[-1].split(".")[0]
json_data = API.getMatch(matchId)
with open(directory, "w") as f:
json.dump(json_data,f)
if __name__ == "__main__":
config = open("config.txt")
api_key = config.readline()[:-1]
print(api_key)
API = RiotAPI(api_key, limits = (RateLimiter(1,10,10), RateLimiter(10,500,600)))
# getMatches(API)
fixNulls(API)
# myID = 39550290
# matchIds = open("matchIDs.txt")
# for ID in matchIds:
# ID = ID[:-1]
# if ID != "":
# match = API.getMatch(ID)
# with open("./Sensen/" + ID + ".json", "w") as f:
# json.dump(match,f)
# DATA_FILES = glob.glob('./Sensen/*.json')
# for filename in DATA_FILES:
# print(filename)
# file = open(filename)
# json_data = json.load(file)
# file.close()
# if filename == "./Sensen/2079308613.json":
# print(json_data)
# participantIdentities = json_data["participantIdentities"]
# player_IDs = {}
# for participant in participantIdentities:
# player_IDs[participant["participantId"]] = participant["player"]["summonerId"]
# participantData = json_data["participants"]
# desiredId = 0
# for participant in participantData:
# timeline = participant["timeline"]
# lane = timeline["lane"]
# if lane == "TOP" and player_IDs[participant["participantId"]]!=myID:
# desiredId = player_IDs[participant["participantId"]]
# break
# if desiredId == 0:
# continue
# timestamp = json_data["matchCreation"]
# matchList = API.getMatchList(desiredId)
# matchList = matchList["matches"]
# for match in matchList:
# matchTime = match["timestamp"]
# queue = "TEAM_BUILDER_DRAFT_RANKED_5x5"
# season = "SEASON2016"
# lane = "TOP"
# matchId = match["matchId"]
# if matchTime<timestamp and match["queue"] == queue and match["season"] == season and match["lane"] == lane:
# realMatch = API.getMatch(matchId)
# with open("./Sensen/" + str(desiredId) + "/" + str(matchId) + ".json", "w") as f:
# json.dump(realMatch,f)
| true |
87287ceaceec349fba4d350012302407f22d53ba | Python | WitoldMarciniak/ThyroidDiseaseZiwM | /ThyroidDiseaseZiwM/main.py | UTF-8 | 4,266 | 2.890625 | 3 | [] | no_license | import pandas as pd
import random
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.model_selection import StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
# 20 - other
# 0 - SVHC
# 1 - SVI
# 2 - STMW
# 3 - SVHD
# 4 - WEST
# pobieranie naglowkow cech z fliku features.txt
def get_features():
features = pd.read_csv('files/features.txt', header=None)
return features[0].tolist()
# pobieranie danych o cechach i diagnozie z pliku z files/data.csv
def get_data(features):
data = pd.read_csv('files/data.csv', sep=',', header=None)
data.columns = features
features_data = data.iloc[:, :16]
diagnosis_data = data.iloc[:, -1]
diagnosis = pd.DataFrame(diagnosis_data)
diagnosis.columns = ["Result"]
return features_data, diagnosis
# szukanie K najlepszych cech
def get_best_features(features, results, k):
classifier = SelectKBest(score_func=chi2, k=k)
classifier.fit(features, results)
found_features = classifier.get_support(indices=True)
return features.iloc[:, found_features]
# dzielenie zbioru danych na testujace i trenujace wedlug najlepszej obliczonej proporcji
def split_for_train_and_test_features(data_train, results_train, data_test, k):
classifier = SelectKBest(score_func=chi2, k=k)
classifier.fit(data_train, results_train)
new_features = classifier.get_support(indices=True)
train_best = data_train.iloc[:, new_features]
test_best = data_test.iloc[:, new_features]
return train_best, test_best
# funkcja tylko do wyswietlenia rankingu cech, nie sluzy do obliczen knn
def get_ranking(features, results):
(chi, pval) = chi2(features, results)
result = pd.DataFrame(features.columns, columns=['Feature name'])
result["chi"] = chi
result.sort_values(by=['chi'], ascending=False, inplace=True)
return result
# podwojna walidacja krzyzowa
def cross_valid(features, diagnosis, n, k_best_features, metric, offset):
split = StratifiedKFold(n_splits=2, random_state=offset, shuffle=True).split(features, diagnosis)
scores = []
for train_samples_indexes, test_samples_indexes in split:
features_train = features.iloc[train_samples_indexes]
diagnosis_train = diagnosis.iloc[train_samples_indexes]
features_test = features.iloc[test_samples_indexes]
diagnosis_test = diagnosis.iloc[test_samples_indexes]
train, test = split_for_train_and_test_features(features_train, diagnosis_train, features_test,
k_best_features)
knn = KNeighborsClassifier(n_neighbors=n, metric=metric)
knn.fit(train, diagnosis_train.values.ravel())
scores.append(knn.score(test, diagnosis_test))
return scores
def run_knn(features, diagnosis):
columns = ["metric", "k_best_features", "n_neighbors", "Scores"]
results = pd.DataFrame(columns=columns)
randoms = [random.randint(0, 10000000), random.randint(0, 10000000), random.randint(0, 10000000),
random.randint(0, 10000000), random.randint(0, 10000000)]
for metric in ["euclidean", "manhattan"]:
for k_best_features in range(1, 16):
for n_neighbors in [1, 5, 10]:
estimated_score = 0l
real_score = 0l
for run in range(5):
score = cross_valid(features, diagnosis, n_neighbors, k_best_features, metric,
randoms[run])
estimated_score += score[0]
real_score += score[1]
error = abs(estimated_score - real_score) / real_score
results = results.append({"Metric": metric, "K": k_best_features, "N": n_neighbors,
"Scores": [estimated_score / 5, real_score / 5], "Relative error": error},
ignore_index=True)
return results
# >>>>>>> MAIN
features_headers = get_features()
(features, diagnosis) = get_data(features_headers)
feature_ranking = get_ranking(features, diagnosis)
print(feature_ranking)
results = run_knn(features, diagnosis)
sorted = results.sort_values(by='Relative error')
sorted.to_csv("result2.csv")
print(sorted)
| true |
3fbcd82bb3b71a29ab95b44255fb16b4fa838ac4 | Python | RomanPolishchenko/Python-practice | /Queue_tasks/10_4/main10_4.py | UTF-8 | 551 | 3.921875 | 4 | [] | no_license | import queue
from random import randint
q = queue.Queue()
nums = input('Enter beginning numbers: ')
n = abs(int(input('Enter quantity of tests: ')))
for i in nums.split():
q.put(int(i))
for i in range(n):
inst = randint(0, 1)
if inst:
q.put(int(input('Got 1. Add a number: ')))
else:
print('Got 0. From queue got {}'.format(q.get()))
print('Queue size – {}'.format(q.qsize())) # 10.2(a)
print('Queue – {}'.format(list(q.queue)))
print('Reversed queue – {}'.format(list(reversed(list(q.queue))))) # 10.2(b)
| true |
61ccb0025466c2864ab1fcbca1b8c0762b9d390a | Python | Multifacio/Moldel | /moldel/Layers/MultiLayer/MultiLayer.py | UTF-8 | 2,065 | 3.1875 | 3 | [] | no_license | from Data.Player import Player
from typing import Dict, NamedTuple, Set
import numpy as np
MultiLayerResult = NamedTuple("MultiLayerResult", [("predictions", np.array), ("exclusion", bool)])
class MultiLayer:
""" A Multi Layer does multiple predictions how likely someone is the 'Mol'. """
def predict(self, predict_season: int, latest_episode: int, train_seasons: Set[int]) -> Dict[Player, MultiLayerResult]:
""" Do multiple predictions about how likely a player is the 'Mol'.
Parameters:
predict_season (int): The season number for which the predictions are made (the season started at
19 november 1999 is considered as season number 1).
latest_episode (int): From the predict_season we only use episode data from episodes with numbers until the
latest_episode number as observation data. This also includes the entire episode data from the episode
with the latest_episode number. <br>
- Set this value to sys.maxsize if you want to use all episodes from the predict_season as observation
data.
- Set this value to 0 if you want to use no episodes from the predict_season as observation data.
(Which can be used to check the performance of only the pre-layers)
train_seasons (Set[int]): A set of season numbers (int) which are used for training this layer.
Returns:
Dict[Player, MultiLayerResult]: A dictionary that contains the predictions for each player how likely they
are the 'Mol'. The key of this dictionary is the Player for which the predictions are made and the value
is a MultiLayerResult which consists of an array of floats that indicates how likely the player is the
'Mol' and an exclusion value which is True if this MultiLayer determined that that player cannot be the
'Mol' anymore and False if there is still a possibility that the player is the 'Mol'.
"""
pass | true |
6f6e2f00d6538487d6449d41c2031c44bd1d5a73 | Python | kimbumsoo0820/codeup | /20200713codeup/codeup2_num_asciimoonja.py | UTF-8 | 36 | 3.015625 | 3 | [] | no_license | a=input()
n=int(a)
c=chr(n)
print(c) | true |