seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
31977888925 | # import libtcodpy as libtcod
# import EngineSettings
from Scene import Scene
import Audio
from core import EngineSettings
from gui.MainWindow import MainWindow
audio = None
mainWin = None
mainScene = None
logWin = None
def init(scene=None):
global mainWin, mainScene, logWin, audio
mainScene = scene
if mainScene == None:
mainScene = Scene(mapW=EngineSettings.ViewWidth, mapH=EngineSettings.ViewHeight)
mainWin = mainScene.MainWindow
audio = Audio.Audio()
#audio.play_music('hkblue') | grotus/fishsticks | core/Core.py | Core.py | py | 520 | python | en | code | 0 | github-code | 13 |
40785686071 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:02:32 2020
@author: elvinagovendasamy
"""
from scipy import spatial
from sklearn.model_selection import KFold
import numpy as np
import pandas as pd
from operator import itemgetter
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
from collections import Counter
from scipy.spatial.distance import cdist
from random import seed
from random import random
from random import randrange
class KNN:
# calcul distance euclideenne de 2 vecteurs (x,y)
def dist_euclid(x,y):
distance=0.0
for i in range(len(x)-1):
distance+=((x[i]-y[i])**2)
return np.sqrt(distance)
def CV_KFold(nsplits,dataset):
kf=KFold(n_splits=nsplits,random_state=1, shuffle=True)
for train_index, test_index in kf.split(dataset):
X_train,X_test=X.iloc[train_index],X.iloc[test_index]
y_train, y_test = Y.iloc[train_index], Y.iloc[test_index]
return X_train,X_test,y_train,y_test
def transforming_datasets(nsplits,dataset):
X_train,X_test,y_train,y_test=KNN.CV_KFold(nsplits,dataset)
# Concatener les variables X et y dans chacun des 'training' et 'test' dataframes.
train_data=pd.concat([X_train,y_train],axis=1)
test_data=pd.concat([X_test,y_test],axis=1)
# Transformation en array pour faciliter le calcul de la distance euclideenne
test_data=test_data.astype(float).values.tolist()
train_data=pd.DataFrame(train_data).astype(float).values.tolist()
return test_data,train_data # sous forme de array
def get_neighbours(train_dataset,test_row,k):
distance=[]
for idx in train_dataset:
dist=KNN.dist_euclid(idx,test_row)
distance.append((idx,dist))
distance=sorted(distance, key=itemgetter(1))[:k]
# neighbours=[]
# for i in range(k):
# neighbours.append(distance[i][0])
neighbours=[n[0] for n in distance] # sortir les voisins correspondant aux k distances les plus petites
return neighbours
# Explication: Comme y est qualitatif, on affecte à chaque sous groupe 1 classe de y.
# Nous cherchons à regarder l'erreur de classification usuelle (soit 1-accuracy score)
# Regle de decision pour fabriquer le predicteur:
# on affecte la réponse 0 (idem 1, 2), s'il y a une plus grande proportion de 0(idem 1, 2)
def predict(train_dataset,test_row,k):
neighbours=KNN.get_neighbours(train_dataset,test_row,k)
classes={}
for i in range(k):
y_prediction=neighbours[i][-1]
if y_prediction in classes:
classes[y_prediction]+=1
else:
classes[y_prediction]=1
sorted_classes=sorted(classes.items(),key=itemgetter(1),reverse=True)
return sorted_classes[0][0] # On prend la plus grande proportion
# Je propose 2 autres methodes pour obtenir le même résultat
# METHODE 2:
# class_counter=Counter()
# for n in neighbours:
# class_counter[n[-1]]+=1
# return class_counter.most_common(1)[0][0]
# METHODE 3 :
# output=[]
# for i in range(len(neighbours)):
# output.append(neighbours[i][-1])
# prediction=max(set(output),key=output.count)
# return prediction
def manual_knn(dataset,num_splits,k):
# X_train,X_test,y_train,y_test=KNN.CV_KFold(num_splits,dataset)
test_data,train_data=KNN.transforming_datasets(num_splits,dataset)
predictions=[]
for row in test_data:
response=KNN.predict(train_data,row,k)
predictions.append(response)
return (predictions)
def sklearn_kNN(dataset,num_splits,k):
X_train,X_test,y_train,y_test=KNN.CV_KFold(num_splits,dataset)
#test_data,train_data=transforming_datasets(num_splits,dataset)
model=KNeighborsClassifier(k)
model.fit(X_train,y_train)
y_pred=model.predict(X_test)
return y_pred
def y_actual(dataset,num_splits,k):
X_train,X_test,y_train,y_test=KNN.CV_KFold(num_splits,dataset)
test_data,train_data=KNN.transforming_datasets(num_splits,dataset)
actual=[]
for row in test_data:
y_true=row[-1]
actual.append(y_true)
return (actual)
def accuracy_sklearn(dataset,num_splits,k):
actual = KNN.y_actual(dataset,num_splits,k)
predicted_sklearn=KNN.sklearn_kNN(dataset,num_splits,k)
# predicted_manual_knn=manual_knn(dataset,num_splits,k)
correct=0
for i in range(len(actual)):
if actual[i] == predicted_sklearn[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def accuracy_manual_knn(dataset,num_splits,k):
actual = KNN.y_actual(dataset,num_splits,k)
# predicted_sklearn=sklearn_kNN(dataset,num_splits,k)
predicted_manual_knn=KNN.manual_knn(dataset,num_splits,k)
correct=0
for i in range(len(actual)):
if actual[i] == predicted_manual_knn[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def subsample(dataset,ratio):
sample=[]
N=round(len(dataset)*ratio) # On peut faire notre sous-echantillon à partir d'une partie des données
while len(sample) < N: # tant que la taille de l'échantillon est plus petit que N
index=randrange(len(dataset)) # prend une valeur au hasard et avec remise
sample.append(dataset.iloc[index,:])
return pd.DataFrame(sample)
def subsample1(dataset):
sample_index=np.random.choice(dataset.index,len(dataset)) # resample with replacement
data_boostrap=dataset.loc[sample_index]
y_bs=data_boostrap.iloc[:,-1:]
X_bs=data_boostrap.iloc[:,:-1]
return X_bs,y_bs
# On veut trouver le k optimal
if __name__=="__main__":
path='winequality.csv'
data=pd.read_csv(path,sep=';')
Y=data.iloc[:,-1:]
X=data.iloc[:,:-1]
np.random.seed(0)
r=0.5
num_neighbours=5
num_splits=3
acc_bs=[]
# data1=KNN.subsample(data,0.2)
# choosing boostrap samples of size 100:
# for s in [100]:
scores=[]
sample_means=[]
for i in range(100):
sample=KNN.subsample(data,r)
# print(sample)
score=KNN.accuracy_manual_knn(sample,num_splits,num_neighbours)
# print(score)
scores.append(score)
print('Scores: %.3f' % np.mean(scores))
# A FAIRE: TROUVER LE SAMPLE MEAN ET COMPARE LE SAMPLE MEAN
| elvinaeury/KNN_from_scratch | KNN_Bagging_final.py | KNN_Bagging_final.py | py | 7,069 | python | en | code | 0 | github-code | 13 |
2303156517 | import logging
FORMAT = "%(asctime)s %(name)s %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
log1 = logging.getLogger('s')
log1.setLevel(logging.DEBUG)
log2 = logging.getLogger('s.s1')
# print(log2.getEffectiveLevel())
log2.debug('log2 debug')
data = ["{}*{}={}".format(j,i,i*j) for i in range(1,10) for j in range(1,10)]
print(data) | bujiliusu/first | L3903.py | L3903.py | py | 358 | python | en | code | 0 | github-code | 13 |
6180657112 | from twilio.rest import Client
import os
try:
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body="Hi Maaz.",
from_='+xxxxxxxxxxx',
to='+92xxxxxxxxxx'
)
print(message.sid)
except Exception as e:
print(ValueError("Failed to access twilio credentials." + str(e)))
| maazsabahuddin/Twilio-SMS-service | Twilio_sms.py | Twilio_sms.py | py | 529 | python | en | code | 1 | github-code | 13 |
13779721509 | import subprocess
input_file = "ips.txt"
output_file = "ping_results.txt"
# Open the input file and read IP addresses
with open(input_file, "r") as file:
ip_addresses = file.read().splitlines()
# Open the output file to write the results
with open(output_file, "w") as file:
# Loop through each IP address
for ip_address in ip_addresses:
# Run the ping command and capture the output
ping_process = subprocess.Popen(["ping", "-n", "4", ip_address], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
output, error = ping_process.communicate()
# Write the IP address and ping output to the output file
file.write(f"IP Address: {ip_address}\n")
file.write(output)
file.write("\n")
# Print a success message
print("Ping results recorded successfully!")
| kotha070/Thesis | ping_ips.py | ping_ips.py | py | 832 | python | en | code | 0 | github-code | 13 |
41057607409 | from locale import currency
import sys
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatterbot.trainers import ChatterBotCorpusTrainer
import re
import datetime
import glob
from sqlalchemy import false
def get_corpus_from_file(filename):
conversations = []
curr_conversation = []
conv_init_words = ["hi", "hey", "heyy", "heyyy", "how", "yo", "yoo", "yooo", "going", "wyd", "been"]
depth = 0
past_speaker = ""
in_conversation = False
lock = set()
with open(filename, "r") as f:
text = f.readlines()
c_date = re.search(r'.(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})] ([a-zA-Z\d+]*):', text[0])
last_date = datetime.datetime.strptime(c_date.group(1), '%Y-%m-%d %H:%M:%S')
for line in text:
line = line.replace("’", "\'")
line = ''.join([i if ord(i) < 128 else ' ' for i in line]).strip()
match = re.search(r'.(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})] ([a-zA-Z\d+]*):', line)
if match:
#print(match)
speaker = match.group(2)
date = datetime.datetime.strptime(match.group(1), '%Y-%m-%d %H:%M:%S')
message = line.replace(match.group(0), '')
message = message.strip()
if message:
time_delta = (date - last_date).total_seconds() / 3600
list_of_message_words = [word.lower() for word in re.split('\s+', message)]
matched_init_word = set(conv_init_words) & set(list_of_message_words)
has_init_word = True if matched_init_word else False
#has_time_delta = True if time_delta > 36 else False
has_question = True if "?" in message else False
new_speaker = True if speaker != past_speaker else False
#print(str(depth) + " - " + str(in_conversation))
#print("new speaker: " + str(new_speaker) + ", New speaker: " + str(speaker) + ", older speaker: " + str(past_speaker))
if depth >= 0 and not in_conversation:
if (has_init_word or has_question):
lock = (matched_init_word, has_question)
#print(has_time_delta)
in_conversation = True
#print(str(matched_init_word) +" - "+ message)
curr_conversation.append(message)
depth += 1
continue
if (depth < 3 and depth >= 1) and in_conversation:
if not new_speaker:
curr_conversation.append(message)
else:
curr_conversation.append(message)
depth += 1
continue
if depth >= 3 and in_conversation:
curr_conversation.append(message)
#conversations.append((lock, curr_conversation))
conversations.append(curr_conversation)
curr_conversation = []
depth = 0
in_conversation = False
if new_speaker:
past_speaker = speaker
last_date = date
return conversations
def main():
master_convo = []
stormlight = ["Kaladin, say the words", "Life before death...Strength before weakness...Journey before destination"]
training_files = glob.glob("./training_sets/*.txt")
for f in training_files:
print("Generating corpus for " + str(f))
temp_convo = get_corpus_from_file(f)
master_convo += temp_convo
with open("test.txt", "w") as f:
for conv in master_convo:
f.write("-----------New Convo-------------\n")
f.write(str(conv) + "\n")
chatbot = ChatBot("Kaladin")
trainer = ListTrainer(chatbot)
count = 0
total = str(len(master_convo))
for convo in master_convo:
print("Training Count: " + str(count) + " : " + total)
trainer.train(convo)
count += 1
trainer.train(stormlight)
trainer = ChatterBotCorpusTrainer(chatbot)
trainer.train(
"chatterbot.corpus.english"
)
main() | volts-inventory/Kaladin | train.py | train.py | py | 4,643 | python | en | code | 0 | github-code | 13 |
42960510720 | from typing import List
from cloudleak.app import create_app
from cloudleak.models.objectid import PydanticObjectId
from cloudleak.models.target import Scan
from flask_pymongo import PyMongo
from ..models.scan_status import ScanStatus
app = create_app(register_blueprints=False)
mongo_client = PyMongo(app, uri=app.config["MONGO_URI"])
db = mongo_client.db
def add_scan(scan_info):
scan = Scan(**scan_info)
insert_result = db.scans.insert_one(scan.to_bson())
scan.id = PydanticObjectId(str(insert_result.inserted_id))
return scan.id
def get_scan(scan_id=None) -> List:
if scan_id is not None:
cursor = db.scans.find({"_id": scan_id})
else:
cursor = db.scans.find()
if not cursor:
return []
found_docs = []
for doc in cursor:
json_doc = Scan(**doc).to_json()
json_doc["status"] = ScanStatus(json_doc["status"]).name
found_docs.append(json_doc)
return found_docs
def save_bucket(scan_id, buckets_results):
db.scans.update_one(
{"_id": PydanticObjectId(scan_id)}, {"$set": {"buckets": buckets_results}}
)
| DanielAzulayy/CloudLeak | cloudleak_backend/cloudleak/common/scans.py | scans.py | py | 1,118 | python | en | code | 2 | github-code | 13 |
21322648186 | import sys
import numpy
import numpy.linalg
class BaseImage(object):
def __init__(self):
self.points = {}
def add_point(self, name, px, py):
self.points[name] = px, py
def union_points(self, other):
d = dict(self.points)
d.update(other.points)
return d
def interesect_points(self, other):
d = {}
for k in self.points:
if k in other.points:
d[k] = other.points[k]
return d
def get_images(self):
return [self]
class Image(BaseImage):
def __init__(self, filename, width, height):
super(Image, self).__init__()
self.filename = filename
self.width = width
self.height = height
class CombinedImage(BaseImage):
def __init__(self, im1, im2):
super(CombinedImage, self).__init__()
self.im1 = im1
self.im2 = im2
self.points = im1.union_points(im2)
def get_images(self):
return self.im1.get_images() + self.im2.get_images()
def matrix_sqrt(m):
ev, evv = numpy.linalg.eig(m)
d = numpy.diag(numpy.sqrt(ev))
evvi = numpy.linalg.inv(evv)
m2 = evv * d * evvi
return m2
class Montage(object):
def __init__(self):
self.images = set()
def add_image(self, image):
self.images.add(image)
def combine(self, im1, im2):
common_points = im1.interesect_points(im2)
a_rows = [[], [], []]
b_rows = [[], [], []]
for name in common_points:
a_rows[0].append(im1.points[name][0])
a_rows[1].append(im1.points[name][1])
a_rows[2].append(1)
b_rows[0].append(im2.points[name][0])
b_rows[1].append(im2.points[name][1])
b_rows[2].append(1)
a = numpy.matrix(a_rows)
b = numpy.matrix(b_rows)
m = b * numpy.linalg.inv(a)
m2 = matrix_sqrt(m)
im1.transform = m2
im2.transform = numpy.linalg.inv(m2)
#print a
#print b
#print m
#print m * a
#print m * [[0.0], [0.0], [1.0]]
#print m * [[im1.width], [im1.height], [1.0]]
#print (m * [[im1.width], [im1.height], [1.0]]) - (m * [[0.0], [0.0], [1.0]])
new_im = CombinedImage(im1, im2)
return new_im
def process_image(self):
for im1 in self.images:
for im2 in self.images:
if im1 == im2:
continue
common_points = im1.interesect_points(im2)
if len(common_points) >= 3:
self.images.remove(im1)
self.images.remove(im2)
new_im = self.combine(im1, im2)
self.images.add(new_im)
return True
return False
def process(self):
while len(self.images) > 1:
if not self.process_image():
raise Exception('Cannot find image pair to combine')
def get_images(self):
all_images = []
for im in self.images:
all_images.extend(im.get_images())
return all_images
def save_svg(montage, filename):
image_parts = []
for image in montage.get_images():
try:
m = image.transform
extra = """transform="matrix(%f, %f, %f, %f, %f, %f)" """ % (m[0,0], m[1,0], m[0,1], m[1,1], m[0,2], m[1,2])
except AttributeError:
extra = ''
image_svg = """<image y="0.0" x="0.0" xlink:href="%s" width="%d" height="%d" %s />""" % (image.filename, image.width, image.height, extra)
image_parts.append(image_svg)
svg = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">
%s
</svg>
""" % '\n '.join(image_parts)
with open(filename, 'wt') as f:
f.write(svg)
def main():
montage = Montage()
im1 = Image('test1.jpg', 2592, 1936)
im1.add_point('a', 2566, 1510)
im1.add_point('b', 1982, 1397)
#im1.add_point('c', 2402, 826)
im1.add_point('d', 1993, 620)
montage.add_image(im1)
im2 = Image('test2.jpg', 2592, 1936)
im2.add_point('a', 610, 1752)
im2.add_point('b', 33, 1648)
#im2.add_point('c', 456, 1075)
im2.add_point('d', 49, 868)
montage.add_image(im2)
montage.process()
save_svg(montage, 'output.svg')
if __name__ == '__main__':
main()
| ejrh/image-tools | montage/montage.py | montage.py | py | 4,548 | python | en | code | 0 | github-code | 13 |
73207501138 | import sys
from load import load_strings
names = load_strings(sys.argv[1])
search_names = ["Titus", "Harv", "Wolfgang", "Moshe", "Len", "Cosmo", "Bernd", "Tray", "Derrin", "Garry", "Tomlin", "Pace", "Wilfrid", "Ulysses", "Uli", "Ave", "Val", "Todd", "Chrissy", "Terry", "Mischa", "Elwood", "Earl", "Alec", "Demetrius", "Fulton", "Zechariah", "Wolfram", "Jeth", "Sergent", "Jotham", "Ferguson", "Andreas", "Thaine", "Eduardo", "Thorny", "Ambrosi", "Keil", "Ephrem", "Socrates", "Reinhard", "Gary", "Joaquin", "Beowulf", "Ritchie", "Denis", "Abraham", "Wynton", "Robinson", "Solly",
"Dell", "Rahul", "Archibald", "Robbert", "Melvyn", "Virgie", "Douggie", "Anton", "Janus", "Ajai", "Obadias", "Winslow", "Tobie", "Corby", "Drew", "Sascha", "Ferdinand", "Shaine", "Harman", "Elvis", "Bruno", "Germaine", "Halvard", "Urson", "Wolfy", "Duffy", "Hervey", "Phillip", "Merry", "Geoffry", "Burke", "Thadeus", "Immanuel", "Ansel", "Horatius", "Randall", "Lawton", "Aguste", "Felipe", "Derrick", "Torre", "Sanson", "Winford", "Pasquale", "Vance", "Neddie", "Sterling", "Wait", "Davoud", "Guthrey"]
def binary_search(list, target):
first = 0
last = len(list) - 1
while first <= last:
midpoint = (first + last) // 2
if list[midpoint] == target:
return midpoint
elif list[midpoint] < target:
first = midpoint + 1
else:
last = midpoint - 1
return None
for n in search_names:
result = binary_search(names, n)
print(result)
| HazeeqHaikal/learning-data-structure-with-python | binary_search.py | binary_search.py | py | 1,520 | python | hr | code | 0 | github-code | 13 |
1744887552 | from ..config.Mongodb import Mongodb
from ..models.entity.Restaurant import Restaurant
from bson.objectid import ObjectId
class Restaurant_dao:
def __init__(self):
self.mongodb = Mongodb.getInstance()
self.client = self.mongodb.client
def searchRestaurant(self, search):
response = []
restaurant_db = self.mongodb.db.restaurant
if search is None:
tempList = restaurant_db.find().sort([("ranking", -1)])
else:
tempList = restaurant_db.find({"name": {"$regex": search, "$options" : "i"}}).sort([("ranking", -1)])
for item in tempList:
response.append(Restaurant(item))
return response
def getRestaurant(self, restaurant_id):
restaurant_db = self.mongodb.db.restaurant
temp = restaurant_db.find_one({"_id": ObjectId(restaurant_id)})
return Restaurant(temp)
def updateRestaurant(self, data):
restaurant_db = self.mongodb.db.restaurant
restaurant_db.update(
{ "_id": data.id },
{
"name": data.name,
"address": data.address,
"region": data.region,
"district": data.district,
"ranking": data.ranking,
"open_close": data.open_close,
"comment_negative": data.comment_negative,
"comment_positive": data.comment_positive
}
) | Cochachin/demo-gastron-app-service | src/datasource/Restaurant_dao.py | Restaurant_dao.py | py | 1,543 | python | en | code | 0 | github-code | 13 |
32484822203 | #!/usr/bin/env python
import numpy as np
import logging
import gomill
import gomill.common
from gomill import gtp_engine, gtp_states
from gomble import MoveProbBot
import kombilo_book
from kombilo_book import MoveFinder, MoveFinderRet, MoveValue
def make_engine(player):
"""Return a Gtp_engine_protocol which runs the specified player."""
gtp_state = gtp_states.Gtp_state(move_generator=player.genmove,
acceptable_sizes=(19,))
engine = gtp_engine.Gtp_engine_protocol()
engine.add_protocol_commands()
engine.add_commands(gtp_state.get_handlers())
engine.add_commands(player.get_handlers())
return engine
class KombiloFusekiPlayer(object):
def __init__(self):
self.handlers = {'name': self.handle_name,
'move_probabilities': self.handle_move_probabilities,
'kombilofuseki-weight': self.handle_weight,
}
self.name = "Kombilo Fuseki Bot, v0.1"
self.mf = MoveFinder('freq')
self.mps = MoveFinderRet(None, None, None, None)
def genmove(self, state, player):
"""
:returns: gomill.Move_generator_result
"""
logging.debug("KombiloFusekiPlayer.genmove()")
result = gtp_states.Move_generator_result()
if state.board.side != 19:
logging.warn("Unsupported board size '%d'"%state.board.side )
result.pass_move = True
return result
self.mps = self.mf.by_the_book(state.board, player)
if self.mps.move:
result.move = gomill.common.move_from_vertex(self.mps.move,
state.board.side)
else:
result.pass_move = True
return result
def handle_name(self, args=[]):
if self.name is None:
return self.__class__
return self.name
def handle_move_probabilities(self, args):
logging.debug("KombiloFusekiPlayer.handle_move_probabilities()")
if not self.mps.probs:
return ''
return '\n'.join( "%s %.6f"%(move, prob) for move, prob in self.mps.probs )
def handle_weight(self, args):
logging.debug("KombiloFusekiPlayer.handle_weight()")
if self.mps.weight is None:
return ''
return '%.3f'%(self.mps.weight)
def get_handlers(self):
return self.handlers
if __name__ == "__main__":
logging.basicConfig(format='KP %(asctime)s %(levelname)s: %(message)s',
#level=logging.INFO)
level=logging.DEBUG)
# player def
player = KombiloFusekiPlayer()
# player => engine => RUN
engine = make_engine(player)
gomill.gtp_engine.run_interactive_gtp_session(engine)
| jmoudrik/gomble | kombilo_player.py | kombilo_player.py | py | 2,802 | python | en | code | 0 | github-code | 13 |
31625859389 | import argparse
from pyteomics import mzid
parser = argparse.ArgumentParser(description='Filter an MzIdentML file by q-value')
parser.add_argument('input', help='input mzid file')
parser.add_argument('threshold', type=float, help='maximum q value')
parser.add_argument('output', help='location of filtered mzid file')
namespace = '{http://psidev.info/psi/pi/mzIdentML/1.1}'
args = parser.parse_args()
m = mzid.MzIdentML(args.input)
m.build_tree()
tree = m._tree
identifications = tree.findall('./*/%sAnalysisData/*/%sSpectrumIdentificationResult' % (namespace, namespace))
num_identifications = 0
num_removals = 0
for ident in identifications:
items = ident.findall('%sSpectrumIdentificationItem' % namespace)
for item in items:
q_value = float(item.find('%scvParam[@name=\'MS-GF:QValue\']' % namespace).attrib['value'])
num_identifications += 1
if q_value > args.threshold:
ident.find('..').remove(ident)
num_removals += 1
break
tree.write(args.output, xml_declaration=True)
print('num identifications: %d, num removals: %d, num confident matches: %d' % (num_identifications, num_removals, num_identifications - num_removals))
| mrForce/tiny_scripts | python/mzid_qvalue_filter/filter.py | filter.py | py | 1,213 | python | en | code | 0 | github-code | 13 |
4320562471 | ##############################################################################
# Copyright (C) 2018, 2019, 2020 Dominic O'Kane
##############################################################################
from numba import njit, float64, int64
from scipy import integrate
from math import exp, log, pi
import numpy as np # I USE NUMPY FOR EXP, LOG AND SQRT AS THEY HANDLE IMAGINARY PARTS
from ..utils.global_vars import gDaysInYear
from ..utils.global_types import OptionTypes
from ..utils.math import norminvcdf
from ..utils.error import FinError
##########################################################################
# Heston Process
# dS = rS dt + sqrt(V) * S * dz
# dV = kappa(theta-V) dt + sigma sqrt(V) dz
# corr(dV,dS) = rho dt
# Rewritten as
# dS = rS dt + sqrt(V) * S * (rhohat dz1 + rho dz2)
# dV = kappa(theta-V) dt + sigma sqrt(V) dz2
# where rhohat = sqrt(1-rho*rho)
###############################################################################
# TODO - DECIDE WHETHER TO OO MODEL
# TODO - NEEDS CHECKING FOR MC CONVERGENCE
###############################################################################
from enum import Enum
class HestonNumericalScheme(Enum):
EULER = 1
EULERLOG = 2
QUADEXP = 3
###############################################################################
@njit(float64[:, :](float64, float64, float64, float64, float64, float64,
float64, float64, float64, float64, int64, int64, int64),
cache=True, fastmath=True)
def get_paths(s0, r, q, v0, kappa, theta, sigma, rho, t, dt, num_paths,
seed, scheme):
np.random.seed(seed)
num_steps = int(t / dt)
sPaths = np.zeros(shape=(num_paths, num_steps))
sPaths[:, 0] = s0
sdt = np.sqrt(dt)
rhohat = np.sqrt(1.0 - rho * rho)
sigma2 = sigma * sigma
if scheme == HestonNumericalScheme.EULER.value:
# Basic scheme to first order with truncation on variance
for iPath in range(0, num_paths):
s = s0
v = v0
for iStep in range(1, num_steps):
z1 = np.random.normal(0.0, 1.0) * sdt
z2 = np.random.normal(0.0, 1.0) * sdt
zV = z1
zS = rho * z1 + rhohat * z2
vplus = max(v, 0.0)
rtvplus = np.sqrt(vplus)
v += kappa * (theta - vplus) * dt + sigma * \
rtvplus * zV + 0.25 * sigma2 * (zV * zV - dt)
s += (r - q) * s * dt + rtvplus * s * \
zS + 0.5 * s * vplus * (zV * zV - dt)
sPaths[iPath, iStep] = s
elif scheme == HestonNumericalScheme.EULERLOG.value:
# Basic scheme to first order with truncation on variance
for iPath in range(0, num_paths):
x = log(s0)
v = v0
for iStep in range(1, num_steps):
zV = np.random.normal(0.0, 1.0) * sdt
zS = rho * zV + rhohat * np.random.normal(0.0, 1.0) * sdt
vplus = max(v, 0.0)
rtvplus = np.sqrt(vplus)
x += (r - q - 0.5 * vplus) * dt + rtvplus * zS
v += kappa * (theta - vplus) * dt + sigma * \
rtvplus * zV + sigma2 * (zV * zV - dt) / 4.0
sPaths[iPath, iStep] = exp(x)
elif scheme == HestonNumericalScheme.QUADEXP.value:
# Due to Leif Andersen(2006)
Q = exp(-kappa * dt)
psic = 1.50
gamma1 = 0.50
gamma2 = 0.50
K0 = -rho * kappa * theta * dt / sigma
K1 = gamma1 * dt * (kappa * rho / sigma - 0.5) - rho / sigma
K2 = gamma2 * dt * (kappa * rho / sigma - 0.5) + rho / sigma
K3 = gamma1 * dt * (1.0 - rho * rho)
K4 = gamma2 * dt * (1.0 - rho * rho)
A = K2 + 0.5 * K4
mu = (r - q)
c1 = sigma2 * Q * (1.0 - Q) / kappa
c2 = theta * sigma2 * ((1.0 - Q)**2) / 2.0 / kappa
for iPath in range(0, num_paths):
x = log(s0)
vn = v0
for iStep in range(1, num_steps):
zV = np.random.normal(0, 1)
zS = rho * zV + rhohat * np.random.normal(0, 1)
m = theta + (vn - theta) * Q
m2 = m * m
s2 = c1 * vn + c2
psi = s2 / m2
u = np.random.uniform(0.0, 1.0)
if psi <= psic:
b2 = 2.0 / psi - 1.0 + \
np.sqrt((2.0 / psi) * (2.0 / psi - 1.0))
a = m / (1.0 + b2)
b = np.sqrt(b2)
zV = norminvcdf(u)
vnp = a * ((b + zV)**2)
d = (1.0 - 2.0 * A * a)
M = exp((A * b2 * a) / d) / np.sqrt(d)
K0 = -log(M) - (K1 + 0.5 * K3) * vn
else:
p = (psi - 1.0) / (psi + 1.0)
beta = (1.0 - p) / m
if u <= p:
vnp = 0.0
else:
vnp = log((1.0 - p) / (1.0 - u)) / beta
M = p + beta * (1.0 - p) / (beta - A)
K0 = -log(M) - (K1 + 0.5 * K3) * vn
x += mu * dt + K0 + (K1 * vn + K2 * vnp) + \
np.sqrt(K3 * vn + K4 * vnp) * zS
sPaths[iPath, iStep] = exp(x)
vn = vnp
else:
raise FinError("Unknown FinHestonNumericalSchme")
return sPaths
###############################################################################
class Heston():
def __init__(self, v0, kappa, theta, sigma, rho):
verbose = False
if 2.0 * kappa * theta <= sigma and verbose:
print("Feller condition not satisfied. Zero Variance possible")
self._v0 = v0
self._kappa = kappa
self._theta = theta
self._sigma = sigma
self._rho = rho
###############################################################################
def value_mc(self,
valuation_date,
option,
stock_price,
interest_rate,
dividend_yield,
num_paths,
num_steps_per_year,
seed,
scheme=HestonNumericalScheme.EULERLOG):
tau = (option._expiry_date - valuation_date) / gDaysInYear
K = option._strike_price
dt = 1.0 / num_steps_per_year
schemeValue = float(scheme.value)
sPaths = get_paths(stock_price,
interest_rate,
dividend_yield,
self._v0,
self._kappa,
self._theta,
self._sigma,
self._rho,
tau,
dt,
num_paths,
seed,
schemeValue)
if option._option_type == OptionTypes.EUROPEAN_CALL:
path_payoff = np.maximum(sPaths[:, -1] - K, 0.0)
elif option._option_type == OptionTypes.EUROPEAN_PUT:
path_payoff = np.maximum(K - sPaths[:, -1], 0.0)
else:
raise FinError("Unknown option type.")
payoff = np.mean(path_payoff)
v = payoff * exp(-interest_rate * tau)
return v
###############################################################################
def value_lewis(self,
valuation_date,
option,
stock_price,
interest_rate,
dividend_yield):
tau = (option._expiry_date - valuation_date) / gDaysInYear
rho = self._rho
sigma = self._sigma
v0 = self._v0
kappa = self._kappa
theta = self._theta
r = interest_rate
q = dividend_yield
S0 = stock_price
K = option._strike_price
F = S0 * exp((r - q) * tau)
V = sigma * sigma
def phi(k_in,):
k = k_in + 0.5 * 1j
b = kappa + 1j * rho * sigma * k
d = np.sqrt(b**2 + V * k * (k - 1j))
g = (b - d) / (b + d)
T_m = (b - d) / V
Q = np.exp(-d * tau)
T = T_m * (1.0 - Q) / (1.0 - g * Q)
W = kappa * theta * (tau * T_m - 2.0 *
np.log((1.0 - g * Q) / (1.0 - g)) / V)
phi = np.exp(W + v0 * T)
return phi
def phi_transform(x):
def integrand(k): return 2.0 * np.real(np.exp(-1j *
k * x) * phi(k)) / (k**2 + 1.0 / 4.0)
return integrate.quad(integrand, 0, np.inf)[0]
x = log(F / K)
I1 = phi_transform(x) / (2.0 * pi)
v1 = F * exp(-r * tau) - np.sqrt(K * F) * exp(-r * tau) * I1
# v2 = S0 * exp(-q*tau) - K * exp(-r*tau) * I1
return(v1)
###############################################################################
def value_lewis_rouah(self,
valuation_date,
option,
stock_price,
interest_rate,
dividend_yield):
tau = (option._expiry_date - valuation_date) / gDaysInYear
rho = self._rho
sigma = self._sigma
v0 = self._v0
kappa = self._kappa
theta = self._theta
q = dividend_yield
r = interest_rate
V = sigma * sigma
def f(k_in):
k = k_in + 0.5 * 1j
b = (2.0 / V) * (1j * k * rho * sigma + kappa)
e = np.sqrt(b**2 + 4.0 * k * (k - 1j) / V)
g = (b - e) / 2.0
h = (b - e) / (b + e)
q = V * tau / 2.0
Q = np.exp(-e * q)
H = np.exp((2.0 * kappa * theta / V) * (q * g - np.log((1.0 -
h * Q) / (1.0 - h))) + v0 * g * (1.0 - Q) / (1.0 - h * Q))
integrand = H * np.exp(-1j * k * X) / (k * k - 1j * k)
return integrand.real
S0 = stock_price
F = S0 * exp((r - q) * tau)
K = option._strike_price
X = log(F / K)
integral = integrate.quad(f, 0.0, np.inf)[0] * (1.0 / pi)
v = S0 * exp(-q * tau) - K * exp(-r * tau) * integral
return (v)
###############################################################################
# Taken from Nick Weber's VBA Finance book
###############################################################################
def value_weber(self,
valuation_date,
option,
stock_price,
interest_rate,
dividend_yield):
tau = (option._expiry_date - valuation_date) / gDaysInYear
rho = self._rho
sigma = self._sigma
v0 = self._v0
kappa = self._kappa
theta = self._theta
q = dividend_yield
r = interest_rate
S0 = stock_price
K = option._strike_price
V = sigma**2
def f(s, b):
def integrand(u):
beta = b - 1j * rho * sigma * u
d = np.sqrt((beta**2) - V * u * (s * 1j - u))
g = (beta - d) / (beta + d)
Q = np.exp(-d * tau)
B = (beta - d) * (1.0 - Q) / (1.0 - g * Q) / V
A = kappa * ((beta - d) * tau - 2.0 *
np.log((1.0 - g * Q) / (1.0 - g))) / V
v = np.exp(A * theta + B * v0 + 1j * u *
np.log(S0 / (K * np.exp(-(r - q) * tau)))) / (u * 1j)
return v.real
area = 0.50 + (1.0 / pi) * integrate.quad(integrand, 0, np.inf)[0]
return area
v = S0 * exp(-q * tau) * f(1.0, kappa - rho * sigma) - \
exp(-r * tau) * K * f(-1.0, kappa)
return(v)
###############################################################################
# Gatheral book page 19 with definition of x given on page 16 and noting
# that the value C is a forward value and so needs to be discounted
###############################################################################
def value_gatheral(self,
valuation_date,
option,
stock_price,
interest_rate,
dividend_yield):
tau = (option._expiry_date - valuation_date) / gDaysInYear
rho = self._rho
sigma = self._sigma
v0 = self._v0
kappa = self._kappa
theta = self._theta
q = dividend_yield
r = interest_rate
S0 = stock_price
K = option._strike_price
F = S0 * exp((r - q) * tau)
x0 = log(F / K)
def ff(j):
def integrand(u):
V = sigma * sigma
A = -u * u / 2.0 - 1j * u / 2.0 + 1j * j * u
B = kappa - rho * sigma * j - rho * sigma * 1j * u
G = V / 2.0
d = np.sqrt(B**2 - 4.0 * A * G)
rplus = (B + d) / 2.0 / G
rminus = (B - d) / 2.0 / G
R = rminus / rplus
Q = np.exp(-d * tau)
D = rminus * (1.0 - Q) / (1.0 - R * Q)
C = kappa * (rminus * tau - (2.0 / V) *
np.log((1.0 - R * Q) / (1.0 - R)))
phi = np.exp(C * theta + D * v0 + 1j * u * x0) / (1j * u)
return phi.real
area = 0.50 + 1.0 / pi * integrate.quad(integrand, 0.0, np.inf)[0]
return area
v = S0 * exp(-q * tau) * ff(1) - K * exp(-r * tau) * ff(0)
return(v)
###############################################################################
| domokane/FinancePy | financepy/models/heston.py | heston.py | py | 14,264 | python | en | code | 1,701 | github-code | 13 |
5255652282 | from numpy.typing import NDArray
import numpy as np
class NaiveBayes:
def fit(self, X:NDArray, y:NDArray) -> None:
n_samples, n_features = X.shape
self._classes:NDArray = np.unique(y)
n_classes = len(self._classes)
# calculate mean, var, and prior for each class
self._mean:NDArray = np.zeros((n_classes, n_features), dtype=np.float64)
self._var:NDArray = np.zeros((n_classes, n_features), dtype=np.float64)
self._priors:NDArray = np.zeros(n_classes, dtype=np.float64)
for idx, c in enumerate(self._classes):
X_c = X[y==c]
self._mean[idx, :] = X_c.mean(axis=0)
self._var[idx, :] = X_c.var(axis=0)
self._priors[idx] = X_c.shape[0] / float(n_samples)
def predict(self, X:NDArray) -> NDArray:
y_pred = [self._predict(x) for x in X]
return np.array(y_pred)
def _predict(self, x:NDArray) -> NDArray:
posteriors = list()
# calculate posterior probability for each class
for idx, c in enumerate(self._classes):
likelyhood = np.sum(np.log(self._pdf(idx, x)))
prior = np.log(self._priors[idx])
posterior = prior + likelyhood
posteriors.append(posterior)
# return class with highest posterior probability
return self._classes[np.argmax(posteriors)]
def _pdf(self, class_idx, x) -> np.float64:
'''
The likelihood of the features is assumed to be Gaussian
'''
mean = self._mean[class_idx]
var = self._var[class_idx]
numerator = np.exp(- (x-mean)**2 / (2 * var))
denominator = np.sqrt(2 * np.pi * var)
return numerator / denominator | supertigim/ML-DL-Rewind | machine_learning/from_scratch/models/naive_bayes.py | naive_bayes.py | py | 1,751 | python | en | code | 0 | github-code | 13 |
6200520072 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 20:47:35 2020
@author: vijetadeshpande
"""
import torch
import sys
sys.path.insert(1, r'/Users/vijetadeshpande/Documents/GitHub/Sequence2Sequence model for CEPAC prediction/Data processing, runs generator and utility file')
import utils
import pandas as pd
import os
from copy import deepcopy
def evaluate(model, data, criterion, device, seqpath):
# initialize
model.eval()
epoch_loss = 0
# collect all the outputs
outputs = []
denorm_outputs = []
denorm_targets = []
# import denormalization parameters
mean_sd = pd.read_csv(os.path.join(seqpath, 'output_mean_and_sd.csv'), header = 0, index_col = 0)
# initialize tensor to store attention weights
BATCH_SIZE, SRC_LEN, _ = data[0][0].shape
_, TRG_LEN, _ = data[0][1].shape
attention_ws = torch.zeros((len(data), BATCH_SIZE, TRG_LEN-1, SRC_LEN))
with torch.no_grad():
idx = -1
for example in data:
idx += 1
# access the source and target sequence
src = example[0].to(device)
trg = example[1].to(device)
# predict output and append
output, attention_w = model(src, trg)#, 0) # switch off teacher forcing
outputs.append(output.numpy())
CUR_SIZE, _, _ = attention_w.shape
attention_ws[idx, 0:CUR_SIZE, :, :] = attention_w[:, 1:, :]
# denormalize prediction and append
denorm_output = utils.denormalize(deepcopy(output.numpy()), mean_sd.iloc[0, :].values, mean_sd.iloc[1, :].values)
denorm_outputs.append(denorm_output)
# denormalize target and save
trg = trg.permute(1, 0, 2)
denorm_target = utils.denormalize(deepcopy(trg.numpy()), mean_sd.iloc[0, :].values, mean_sd.iloc[1, :].values)
denorm_targets.append(denorm_target)
# dimension check:
# trg = [target_len, batch_size, out dim]
# output = [target_len, batch_size, out dim]
# calculate error
TRG_LEN, BATCH_SIZE, OUTPUT_DIM = output.shape
output = torch.reshape(output[1:, :, :], ((TRG_LEN - 1)*BATCH_SIZE, OUTPUT_DIM))
trg = torch.reshape(trg[1:, :, :], ((TRG_LEN - 1)*BATCH_SIZE, OUTPUT_DIM))
loss = criterion(output, trg)
# update error
epoch_loss += loss.item()
# return a dictionary
all_metrics = {'average epoch loss': epoch_loss/len(data),
'normalized prediction': outputs,
'denormalized prediction': denorm_outputs,
'denormalized target': denorm_targets,
'attention weights': attention_ws}
return all_metrics
| vijetadeshpande/meta-environment | Encoder decoder with attention/evaluate_EncDec.py | evaluate_EncDec.py | py | 2,914 | python | en | code | 0 | github-code | 13 |
37921616089 | #!/usr/bin/env python
"""Train foreground segmentation network."""
from __future__ import division
import numpy as np
import os
from utils import logger
from utils import BatchIterator, ConcurrentBatchIterator
from utils import plot_utils as pu
from utils.lazy_registerer import LazyRegisterer
from utils.step_counter import StepCounter
from utils.time_series_logger import TimeSeriesLogger
from cmd_args_parser import TrainArgsParser, DataArgsParser, CmdArgsParser
from experiment import TrainingExperimentBase
from runner import RunnerBase
from fg_model import get_model
class Runner(RunnerBase):
def __init__(self,
sess,
model,
dataset,
num_batch,
train_opt,
model_opt,
outputs,
step=StepCounter(0),
loggers=None,
phase_train=True,
increment_step=False):
self.dataset = dataset
self.log = logger.get()
self.loggers = loggers
self.add_orientation = model_opt['add_orientation']
self.num_orientation_classes = model_opt['num_orientation_classes']
self.input_variables = self.get_input_variables()
num_ex = dataset.get_dataset_size()
batch_iter = BatchIterator(
num_ex,
batch_size=train_opt['batch_size'],
get_fn=self.get_batch,
cycle=True,
progress_bar=False,
shuffle=True,
log_epoch=-1)
if train_opt['prefetch']:
batch_iter = ConcurrentBatchIterator(
batch_iter,
max_queue_size=train_opt['queue_size'],
num_threads=train_opt['num_worker'],
log_queue=-1)
super(Runner, self).__init__(
sess,
model,
batch_iter,
outputs,
num_batch=num_batch,
step=step,
phase_train=phase_train,
increment_step=increment_step)
def get_input_variables(self):
variables = ['x', 'c_gt']
if self.add_orientation:
variables.append('d_gt')
return set(variables)
def get_batch(self, idx):
"""Transform a dataset get_batch into a dictionary to feed."""
batch_ = self.dataset.get_batch(idx, variables=self.input_variables)
batch = {}
batch['x'] = batch_['x']
batch['y_gt'] = batch_['c_gt']
if 'sem_weights' in batch_ and 'sem_weights' in self.model:
batch['sem_weights'] = batch_['sem_weights']
if 'ori_weights' in batch_ and 'ori_weights' in self.model:
batch['ori_weights'] = batch_['ori_weights']
if self.add_orientation:
batch['d_gt'] = batch_['d_gt']
return batch
class Trainer(Runner):
def __init__(self,
sess,
model,
dataset,
train_opt,
model_opt,
step=StepCounter(0),
loggers=None,
steps_per_log=10):
outputs = ['loss', 'train_step']
num_batch = steps_per_log
super(Trainer, self).__init__(
sess,
model,
dataset,
num_batch,
train_opt,
model_opt,
outputs,
step=step,
loggers=loggers,
phase_train=True,
increment_step=True)
def write_log(self, results):
self.log.info('{:d} loss {:.4f} t {:.2f}ms'.format(self.step.get(), results[
'loss'], results['step_time']))
self.loggers['loss'].add(self.step.get(), [results['loss'], ''])
self.loggers['step_time'].add(self.step.get(), results['step_time'])
class Evaluator(Runner):
def __init__(self,
sess,
model,
dataset,
train_opt,
model_opt,
step=StepCounter(0),
num_batch=10,
loggers=None,
phase_train=True):
outputs = ['iou_soft', 'iou_hard', 'foreground_loss', 'loss']
if model_opt['add_orientation']:
outputs.extend(['orientation_ce', 'orientation_acc'])
super(Evaluator, self).__init__(
sess,
model,
dataset,
num_batch,
train_opt,
model_opt,
outputs,
step=step,
loggers=loggers,
phase_train=phase_train,
increment_step=False)
def write_log(self, results):
if self.loggers is not None:
self.log.info('{:d} loss {:.4f} t {:.2f}ms'.format(self.step.get(
), results['loss'], results['step_time']))
if 'loss' in self.loggers:
if self.phase_train:
line = [results['loss'], '']
else:
line = ['', results['loss']]
self.loggers['loss'].add(self.step.get(), line)
if 'iou' in self.loggers:
if self.phase_train:
line = [results['iou_soft'], '', results['iou_hard'], '']
else:
line = ['', results['iou_soft'], '', results['iou_hard']]
self.loggers['iou'].add(self.step.get(), line)
if 'foreground_loss' in self.loggers:
if self.phase_train:
line = [results['foreground_loss'], '']
else:
line = ['', results['foreground_loss']]
self.loggers['foreground_loss'].add(self.step.get(), line)
if self.add_orientation:
if 'orientation_ce' in self.loggers:
if self.phase_train:
line = [results['orientation_ce'], '']
else:
line = ['', results['orientation_ce']]
self.loggers['orientation_ce'].add(self.step.get(), line)
if 'orientation_acc' in self.loggers:
if self.phase_train:
line = [results['orientation_acc'], '']
else:
line = ['', results['orientation_acc']]
self.loggers['orientation_acc'].add(self.step.get(), line)
class Plotter(Runner):
def __init__(self,
sess,
model,
dataset,
train_opt,
model_opt,
logs_folder,
step=StepCounter(0),
split='train',
phase_train=False):
outputs = ['x_trans', 'y_gt_trans', 'y_out']
if model_opt['add_orientation']:
outputs.extend(['d_out', 'd_gt_trans'])
num_batch = 1
self.split = split
self.logs_folder = logs_folder
self.ori_color_wheel = np.array(
[[255, 17, 0], [255, 137, 0], [230, 255, 0], [34, 255, 0],
[0, 255, 213], [0, 154, 255], [9, 0, 255], [255, 0, 255]],
dtype='uint8')
self.sem_color_wheel = np.array(
[[0, 0, 0], [255, 17, 0], [255, 137, 0], [230, 255, 0], [34, 255, 0],
[0, 255, 213], [0, 154, 255], [9, 0, 255], [255, 0, 255]],
dtype='uint8')
loggers = self.get_loggers(model_opt['add_orientation'], split)
super(Plotter, self).__init__(
sess,
model,
dataset,
num_batch,
train_opt,
model_opt,
outputs,
step=step,
loggers=loggers,
phase_train=phase_train,
increment_step=False)
def get_loggers(self, add_orientation, split):
loggers = {}
labels = ['input', 'gt_segmentation', 'output_segmentation']
if add_orientation:
labels.extend(['gt_orientation', 'output_orientation'])
for name in labels:
key = '{}_{}'.format(name, split)
loggers[name] = LazyRegisterer(
os.path.join(self.logs_folder, '{}.png'.format(key)), 'image',
'Samples {} {}'.format(name, split))
return loggers
def check_register(self):
if not self.loggers[self.loggers.keys()[0]].is_registered():
for name in self.loggers.iterkeys():
self.loggers[name].register()
@staticmethod
def get_max_items_per_row(inp_height, inp_width):
if inp_height == inp_width:
return 8
else:
return 4
def write_log(self, results):
x = results['x_trans']
y_gt = results['y_gt_trans']
y_out = results['y_out']
max_items = self.get_max_items_per_row(x.shape[1], x.shape[2])
if self.loggers is not None:
if 'input' in self.loggers:
pu.plot_thumbnails(
self.loggers['input'].get_fname(),
results['x_trans'],
axis=0,
max_items_per_row=max_items)
if 'gt_segmentation' in self.loggers:
if y_gt.shape[3] == 1:
plot_img = np.squeeze(y_gt, axis=3)
y_gt_mask = y_gt
else:
y_gt_mask = y_gt[:, :, :, 1:].max(axis=3, keepdims=True)
plot_img = self.build_orientation_img(y_gt, None,
self.sem_color_wheel)
pu.plot_thumbnails(
self.loggers['gt_segmentation'].get_fname(),
plot_img,
axis=0,
max_items_per_row=max_items)
if 'output_segmentation' in self.loggers:
# Single class segmentation
if y_gt.shape[3] == 1:
plot_img = np.squeeze(y_out, 3)
else:
plot_img = self.build_orientation_img(y_out, None,
self.sem_color_wheel)
pu.plot_thumbnails(
self.loggers['output_segmentation'].get_fname(),
plot_img,
axis=0,
max_items_per_row=max_items)
if self.add_orientation:
d_gt = results['d_gt_trans']
d_out = results['d_out']
if 'gt_orientation' in self.loggers:
img = self.build_orientation_img(d_gt, y_gt_mask,
self.ori_color_wheel)
pu.plot_thumbnails(
self.loggers['gt_orientation'].get_fname(),
img,
axis=0,
max_items_per_row=max_items)
if 'output_orientation' in self.loggers:
img = self.build_orientation_img(d_out, y_gt_mask,
self.ori_color_wheel)
pu.plot_thumbnails(
self.loggers['output_orientation'].get_fname(),
img,
axis=0,
max_items_per_row=max_items)
self.check_register()
self.batch_iter.reset()
def build_orientation_img(self, d, y, cw):
d2 = np.expand_dims(d, 4)
did = np.argmax(d, -1)
c2 = cw[did.reshape([-1])].reshape(d.shape[0], d.shape[1], d.shape[2], 3)
if y is not None:
img = (c2 * y).astype('uint8')
else:
img = (c2).astype('uint8')
return img
class FGExperiment(TrainingExperimentBase):
def get_ts_loggers(self):
model_opt = self.model_opt
loggers = {}
restore_step = self.step.get()
loggers['loss'] = TimeSeriesLogger(
os.path.join(self.logs_folder, 'loss.csv'), ['train', 'valid'],
name='Loss',
buffer_size=1,
restore_step=restore_step)
loggers['iou'] = TimeSeriesLogger(
os.path.join(self.logs_folder, 'iou.csv'),
['train soft', 'valid soft', 'train hard', 'valid hard'],
name='IoU',
buffer_size=1,
restore_step=restore_step)
loggers['foreground_loss'] = TimeSeriesLogger(
os.path.join(self.logs_folder, 'foreground_loss.csv'),
['train', 'valid'],
name='Foreground loss',
buffer_size=1,
restore_step=restore_step)
if model_opt['add_orientation']:
loggers['orientation_ce'] = TimeSeriesLogger(
os.path.join(self.logs_folder, 'orientation_ce.csv'),
['train', 'valid'],
name='Orientation CE',
buffer_size=1,
restore_step=restore_step)
loggers['orientation_acc'] = TimeSeriesLogger(
os.path.join(self.logs_folder, 'orientation_acc.csv'),
['train', 'valid'],
name='Orientation accuracy',
buffer_size=1,
restore_step=restore_step)
loggers['step_time'] = TimeSeriesLogger(
os.path.join(self.logs_folder, 'step_time.csv'),
'step time (ms)',
name='Step time',
buffer_size=1,
restore_step=restore_step)
return loggers
def get_runner_trainval(self):
return Evaluator(
self.sess,
self.model,
self.dataset['train'],
self.opt,
self.model_opt,
step=self.step,
loggers=self.loggers,
phase_train=True)
def get_runner_train(self):
return Trainer(
self.sess,
self.model,
self.dataset['train'],
self.opt,
self.model_opt,
step=self.step,
loggers=self.loggers)
def get_runner_valid(self):
return Evaluator(
self.sess,
self.model,
self.dataset['valid'],
self.opt,
self.model_opt,
step=self.step,
loggers=self.loggers,
phase_train=False)
def get_runner_plot_train(self):
return Plotter(
self.sess,
self.model,
self.dataset['train'],
self.opt,
self.model_opt,
step=self.step,
logs_folder=self.logs_folder,
split='train',
phase_train=True)
def get_runner_plot_valid(self):
return Plotter(
self.sess,
self.model,
self.dataset['valid'],
self.opt,
self.model_opt,
step=self.step,
logs_folder=self.logs_folder,
split='valid',
phase_train=False)
def get_model(self):
return get_model(self.model_opt)
class FGModelArgsParser(CmdArgsParser):
def add_args(self):
self.parser.add_argument('--cnn_filter_size', default='3,3,3,3,3,3,3,3,3,3')
self.parser.add_argument(
'--cnn_depth', default='8,8,16,16,32,32,64,64,128,128')
self.parser.add_argument('--cnn_pool', default='1,2,1,2,1,2,1,2,1,2')
self.parser.add_argument(
'--dcnn_filter_size', default='3,3,3,3,3,3,3,3,3,3,3')
self.parser.add_argument(
'--dcnn_depth', default='128,128,64,64,32,32,16,16,8,8,1')
self.parser.add_argument('--dcnn_pool', default='2,1,2,1,2,1,2,1,2,1,1')
self.parser.add_argument('--add_skip_conn', action='store_true')
# From the input image all the way to the last second layer of CNN.
self.parser.add_argument('--cnn_skip_mask', default='1,0,0,0,0,0,1,0,1,0')
self.parser.add_argument('--dcnn_skip_mask', default='0,1,0,1,0,0,0,0,0,1')
self.parser.add_argument('--segm_loss_fn', default='iou')
self.parser.add_argument('--add_orientation', action='store_true')
self.parser.add_argument('--num_orientation_classes', default=8, type=int)
self.parser.add_argument('--num_semantic_classes', default=1, type=int)
self.parser.add_argument('--base_learn_rate', default=1e-3, type=float)
self.parser.add_argument('--learn_rate_decay', default=0.96, type=float)
self.parser.add_argument(
'--steps_per_learn_rate_decay', default=5000, type=int)
self.parser.add_argument('--rnd_colour', action='store_true')
self.parser.add_argument('--padding', default=16, type=int)
self.parser.add_argument('--optimizer', default='adam')
def make_opt(self, args):
cnn_fsize_list = args.cnn_filter_size.split(',')
cnn_fsize_list = [int(fsize) for fsize in cnn_fsize_list]
cnn_depth_list = args.cnn_depth.split(',')
cnn_depth_list = [int(depth) for depth in cnn_depth_list]
cnn_pool_list = args.cnn_pool.split(',')
cnn_pool_list = [int(pool) for pool in cnn_pool_list]
cnn_skip_mask_list = args.cnn_skip_mask.split(',')
cnn_skip_mask_list = [bool(sk == '1') for sk in cnn_skip_mask_list]
dcnn_fsize_list = args.dcnn_filter_size.split(',')
dcnn_fsize_list = [int(fsize) for fsize in dcnn_fsize_list]
dcnn_depth_list = args.dcnn_depth.split(',')
dcnn_depth_list = [int(depth) for depth in dcnn_depth_list]
dcnn_pool_list = args.dcnn_pool.split(',')
dcnn_pool_list = [int(pool) for pool in dcnn_pool_list]
dcnn_skip_mask_list = args.dcnn_skip_mask.split(',')
dcnn_skip_mask_list = [bool(sk == '1') for sk in dcnn_skip_mask_list]
inp_height, inp_width, timespan = self.get_inp_dim(args.dataset)
model_opt = {
'inp_height': inp_height,
'inp_width': inp_width,
'inp_depth': 3,
'padding': args.padding,
'cnn_filter_size': [3] * len(cnn_depth_list),
'cnn_depth': cnn_depth_list,
'cnn_pool': cnn_pool_list,
'cnn_skip_mask': cnn_skip_mask_list,
'dcnn_filter_size': [3] * len(dcnn_depth_list),
'dcnn_depth': dcnn_depth_list,
'dcnn_pool': dcnn_pool_list,
'dcnn_skip_mask': dcnn_skip_mask_list,
'weight_decay': 5e-5,
'use_bn': True,
'segm_loss_fn': args.segm_loss_fn,
'rnd_hflip': False,
'rnd_vflip': False,
'rnd_transpose': False,
'rnd_colour': args.rnd_colour,
'add_skip_conn': args.add_skip_conn,
'base_learn_rate': args.base_learn_rate,
'learn_rate_decay': args.learn_rate_decay,
'steps_per_learn_rate_decay': args.steps_per_learn_rate_decay,
'add_orientation': args.add_orientation,
'num_orientation_classes': args.num_orientation_classes,
'num_semantic_classes': args.num_semantic_classes,
'optimizer': args.optimizer
}
return model_opt
if __name__ == '__main__':
parsers = {
'default': TrainArgsParser(),
'data': DataArgsParser(),
'model': FGModelArgsParser()
}
FGExperiment.create_from_main(
'fg_model', parsers=parsers, description='training').run() | renmengye/rec-attend-public | fg_model_train.py | fg_model_train.py | py | 17,207 | python | en | code | 107 | github-code | 13 |
14957777310 | # CBV 방식으로 변경하기
# from django.shortcuts import render
from django.views.generic import ListView, DetailView, CreateView, UpdateView # Detail을 불러오겠음
# django -> views -> generic 안의 CreateView를 불러오겠음.
# 로그인 관련해서 django에서 지원해주는 라이브러리
# 로그인되어있을때만 보여줌
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
# UserPassesTestMixin : 스태프
from .models import Post, Category, Tag, Comment
from django.shortcuts import render, redirect, get_object_or_404
from django.core.exceptions import PermissionDenied
from django.utils.text import slugify
from .forms import CommentForm
from django.db.models import Q
# --------------------------------------------------------------------------------------------------------------
class CommentUpdate(LoginRequiredMixin, UpdateView):
model = Comment
form_class = CommentForm
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated and request.user == self.get_object().author:
return super(CommentUpdate, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
class PostUpdate(LoginRequiredMixin, UpdateView):
model = Post
fields = ['title', 'hook_text', 'content', 'head_image', 'file_upload', 'category', 'tags']
# _form.html
template_name = 'blog/post_update_form.html'
# 원하는데로 보낼 수 있게 하는 방법
def get_context_data(self, **kwargs):
context = super(PostUpdate, self).get_context_data()
if self.object.tags.exists():
tags_str_list = list()
for t in self.object.tags.all():
tags_str_list.append(t.name)
context['tags_str_default'] = ';'.join(tags_str_list)
return context
def form_valid(self, form):
response = super(PostUpdate, self).form_valid(form)
self.object.tags.clear()
tags_str = self.request.POST.get('tags_str') # POST : post 방식으로 매서드가 온 것
if tags_str:
tags_str = tags_str.strip() # strip() : 공백 제거
tags_str = tags_str.replace(',', ';').replace(' ', '') # ,를 ;로 변경하기
while ';;' in tags_str:
tags_str = tags_str.replace(';;', ';')
tags_list = tags_str.split(';') # ;을 기준으로 전부 나눠짐
for t in tags_list:
t = t.strip()
if len(t) < 1: continue
tag, is_tag_created = Tag.objects.get_or_create(name=t)
# 같은 이름을 갖는 태그가 없으면 생성하고, 있으면 그대로 가져온다.
if is_tag_created:
tag.slug = slugify(t, allow_unicode=True)
# 새로 생성된 것이 있으면 한글 처리한다.
tag.save()
self.object.tags.add(tag)
return response
# dispatch
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated and request.user == self.get_object().author:
return super(PostUpdate, self).dispatch(request, *args, **kwargs)
else:
raise PermissionDenied
# 권한이 없다는 것을 알려줌
class PostCreate(LoginRequiredMixin, CreateView, UserPassesTestMixin):
model = Post # Post 모듈을 사용하겠음
fields = ['title', 'hook_text', 'content', 'head_image', 'file_upload', 'category']
def test_func(self):
return self.request.user.is_superuser or self.request.user.is_staff
def form_valid(self, form):
current_user = self.request.user
if current_user.is_authenticated and (current_user.is_staff or current_user.is_superuser):
form.instance.author = current_user
response = super(PostCreate, self).form_valid(form)
tags_str = self.request.POST.get('tags_str') # 이 POST 는 request 로 받아온 method 형식을 말하는거임 POST 방식으로 날라왔당
if tags_str : # 만약 태그가 있다면
tags_str = tags_str.strip() # strip() : 공백 제거
#tags_str = tags_str.replace(',', ';') # , 로 나눠서 입력 들어오면 ; 로 바꿔주기
tags_str = tags_str.replace(',', ';').replace(' ', '') # 중간에 공백이 있는 경우를 모두 삭제하도록 수정
while ';;' in tags_str:
tags_str = tags_str.replace(';;', ';') # 연속된 ;;가 있는 경우 삭제
tags_list = tags_str.split(';') # ; 기준으로 태그 나눠주기
# 문제
# 문제는 ,;, 를 입력하면, 현재 소스코드 상 ;;;으로 바뀌게 되고, 이 상태에서 ;으로
# splite을 하기 때문에, 아무것도 없는 문자가 연속으로 들어가는 문제가 발생합니다.
# 그래서 더 이상 unique 하지 않은 값이 들어가는 문제가 있는거죠.
# 책에서 가이드를 드릴 때는 blog는 admin 권한이 있는 사람이 쓰기 때문에 그런 시도를
# 하지 않을 것이라고 가정하고, 완벽하지는 않지만 설명이 길지 않아도 되는 임시방편적 해결책을 드렸습니다.
for t in tags_list :
t = t.strip() # 혹시 딸려들어왔을지 모를 공백을 제거
if len(t) < 1: continue
# [중요] split한 t의 길이가 3 미만인 경우에는 tag를 생성하지 않도록 내용을 추가
tag, is_tag_created = Tag.objects.get_or_create(name = t)
# 만약 같은 이름을 가진 태그가 없으면 생성을 하고,
# 있다면 태그 걸어주기
if is_tag_created :
tag.slug = slugify(t, allow_unicode=True)
tag.save()
self.object.tags.add(tag)
return response
else:
return redirect('/blog/')
class PostList(ListView):
model = Post
ordering = '-pk'
# Post 나열
# index 함수의 역할을 대신하게 된다.
# Blog의 urls를 전부 수정해야 한다.
# Older, Newer
paginate_by = 5
# None인 category가 몇 개가 있는가?
def get_context_data(self, **kwargs):
context = super(PostList, self).get_context_data()
context['categories'] = Category.objects.all()
context['no_category_post_count'] = Post.objects.filter(category=None).count()
return context
class PostDetail(DetailView):
model = Post
def get_context_data(self, **kwargs):
context = super(PostDetail,self).get_context_data()
context['categories'] = Category.objects.all()
context['no_category_post_count'] = Post.objects.filter(category=None).count()
context['comment_form'] = CommentForm
return context
# path('category/<str:slug>/',views.category_page),에서 옴
def category_page(request, slug):
if slug == 'no_category':
category = "미분류" # 카테고리가 없으면 미분류
post_list = Post.objects.filter(category=None)
else:
category = Category.objects.get(slug=slug)
post_list = Post.objects.filter(category=category)
return render(
request,
'blog/post_list.html',
{
'post_list':post_list,
'categories':Category.objects.all(), # 카드를 채워준다.
'no_category_post_count':Post.objects.filter(category=None).count(), # 미분류, 미분류 개수 알려줌
'category': category, # 제목 옆에 카테고리 이름이 붙는다.
}
)
# 태그 페이지
def tag_page(request, slug) :
tag = Tag.objects.get(slug = slug)
post_list = tag.post_set.all()
return render(
request,
'blog/post_list.html',
{
'post_list' : post_list,
'tag' : tag,
'categories' : Category.objects.all(),
'no_category_post_count' : Post.objects.filter(category = None).count(),
}
)
def new_comment(request, pk):
if request.user.is_authenticated:
post = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
comment_form = CommentForm(request.POST)
if comment_form.is_valid(): # 정상적으로 가져왔으면
comment = comment_form.save(commit=False) # 잠시 저장을 미룬다.
comment.post = post
comment.author = request.user
comment.save()
return redirect(comment.get_absolute_url())
else:
return redirect(post.get_absolute_url())
else:
raise PermissionDenied
def delete_comment(request, pk):
comment = get_object_or_404(Comment, pk=pk)
post = comment.post
# 로그인이 되어 있고 작성자가 자신이라면
if request.user.is_authenticated and request.user == comment.author:
comment.delete()
return redirect(post.get_absolute_url())
else:
raise PermissionDenied
class PostSearch(PostList):
paginate_by = None # 검색 결과를 전부 다 보여주도록 설정하기
def get_queryset(self):
q = self.kwargs['q']
post_list = Post.objects.filter(
Q(title__contains=q) | Q(tags__name__contains=q)
).distinct() # distinct : 중복 제거
return post_list # 타이틀과 태그에서 찾은 자료를 중복 없는 자료를 반환
def get_context_data(self, **kwargs):
context = super(PostSearch, self).get_context_data()
q = self.kwargs['q']
context['search_info'] = f'Search : {q} ({self.get_queryset().count()})'
return context
# category=category : category로 필터링 한 것만 가지고 온다.
# from django.shortcuts import render
# from .models import Post
# # 함수 방식으로 만들기
# # render : '템플릿'이라고 하는 폴더를 찾으러 감
# def index(request):
# # objects.all() : DB Query 명령어
# # DB에 있는 것들을 전부 가져온다.
# posts = Post.objects.all().order_by('-pk')
# return render(
# request,
# 'blog/index.html',
# {
# 'posts':posts,
# }
# )
# # urls에서 views.single_post_page로 이동했음.
# # request와 pk를 보내야 함
#def single_post_page(request, pk):
# post = Post.objects.get(pk=pk)
#
# return render(
# request,
# 'blog/single_post_page.html',
# {
# 'post':post,
# }
# )
# # 들어온 pk의 값들을 가져오는 것임
# # Create your views here.
| Sgkeoi/Goorm_Django | blog/views.py | views.py | py | 11,212 | python | ko | code | 0 | github-code | 13 |
3039568331 | #! python3
# ExcelToCSV.py - Converts all excel spreadsheets in the working directory to CSV files
import csv, openpyxl, os
def main():
ExcelToCSV()
def ExcelToCSV():
for excelFile in os.listdir('.'):
# Skip non-xlsx files
if not excelFile.endswith('.xlsx'):
continue
wb = openpyxl.load_workbook(excelFile)
# Loop through each sheet
for sheetName in wb.sheetnames:
# Load sheet
sheet = wb[sheetName]
# Create csv file and writer
fileName = excelFile[:-5] + '_' + sheetName + '.csv'
csvFile = open(fileName, 'w', newline='')
csvWriter = csv.writer(csvFile)
# Loop through the rows in the spreadsheet
for rowNum in range(1, sheet.max_row):
rowData = []
for colNum in range(1, sheet.max_column):
rowData.append(sheet.cell(row = rowNum, column = colNum).value)
# Write list to csv file
csvWriter.writerow(rowData)
csvFile.close()
if __name__ == '__main__':
main() | cjam3/AutomateTheBoringStuffPractice | Chapter 16/ExcelToCSV.py | ExcelToCSV.py | py | 1,161 | python | en | code | 0 | github-code | 13 |
39031703852 | # Recursive implementation
class Solution(object):
def combo(self, n, ans, s=0, c=0, p=""):
if c==n: # All starting parantheses have been closed with equal numbers of valid closing parantheses for current permutation
ans.append(p) # Append latest calulated permutation to final answer list
return
if s<n: # New starting parantheses can be added to start permutation
self.combo(n, ans, s+1, c, p+"(")
if c<s: # New closing parantheses can be added to catch up with starting parantheses count
self.combo(n, ans, s, c+1, p+")")
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
ans = []
self.combo(n, ans)
return ans
| sarvesh10491/Leetcode | Pattern_Based/3_Generate_Parentheses.py | 3_Generate_Parentheses.py | py | 829 | python | en | code | 0 | github-code | 13 |
29578613364 | import math
import os
import pygame
gameStage = 1
startMenuButtons = ["Continue", "New Game", "Settings", "Exit"]
# Initializes PyGame #
pygame.init()
os.environ["SDL_VIDEO_CENTERED"] = "1"
screen = pygame.display.set_mode((1088, 768))
pygame.display.set_caption("Menu Testing")
font = pygame.font.SysFont("ariel", 35)
pygame.mouse.set_visible(False)
menuButton = 0
color = (255, 0, 0)
while gameStage == 1:
for event in pygame.event.get():
# Close game
if event.type == pygame.QUIT:
gameStage = 0
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_s:
if menuButton + 1 > len(startMenuButtons) - 1:
menuButton = 0
else:
menuButton += 1
if event.key == pygame.K_w:
if menuButton - 1 < 0:
menuButton = 3
else:
menuButton -= 1
if event.key == pygame.K_RETURN:
if menuButton == 0:
print("Continue Coming Soon...")
elif menuButton == 1:
gameStage = 2
elif menuButton == 2:
print("Settings Coming Soon...")
elif menuButton == 3:
gameStage = 0
running = False
menuY = math.ceil(384 / (len(startMenuButtons) / 2))
for button in startMenuButtons:
if button == startMenuButtons[menuButton]:
color = (255, 255, 255)
else:
color = (255, 0, 0)
text = font.render(button, True, color)
screen.blit(text,
(544 - text.get_width() // 2, (menuY - text.get_height() // 2)))
menuY += 64
pygame.display.update()
| Benjamin-Fever/Box-Shifter-I | gameTesting/menu.py | menu.py | py | 1,811 | python | en | code | 0 | github-code | 13 |
22561465809 | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = [1,2,3,4,5,6,7,8,9,10]
Y = [5,6,2,3,13,4,1,2,4,8]
Z = [2,3,3,3,5,7,9,11,9,10]
label = [1,1,1,1,1,0,0,0,0,0]
for i in xrange(10):
if label[i]==1:
ax.scatter(X[i],Y[i],Z[i], c='r', marker='o')
else:
ax.scatter(X[i],Y[i],Z[i], c='b', marker='o')
ax.set_xlabel('x axis')
ax.set_ylabel('y axis')
ax.set_zlabel('z axis')
plt.show() | xiawang/HPCDA | src/test5.py | test5.py | py | 475 | python | en | code | 0 | github-code | 13 |
11188204995 | import numpy as np
#np.set_printoptions(suppress=True)
import os
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.pyplot as plt
def resolution(path,n,savepath):
dir = path
data = np.loadtxt(dir)
j = 1
label = []
x = data[:, 0]
y = data[:, 1]
for i in range(len(y) - 2):
if y[j] < y[j - 1] and y[j] < y[j + 1]:
label.append(j)
# print(j,y[j])
j += 1
else:
j += 1
# print(label)
# label2记录了极小值点小于1.5的数值
k = 0
label2 = []
for i in range(len(label) - 1):
a = label[k]
b = label[k + 1]
if x[b] - x[a] <= n:
# print(x[b]-x[a],a,b)
label2.append(a)
label2.append(b)
k += 1
else:
k += 1
# 包含了相邻的两组数
# print(label2)
m = 0
n = 1
np.set_printoptions(threshold=np.inf)
# 对label2内的极小值点归零
for i in range(len(label2) // 2):
a = label2[m]
b = label2[n]
for j in range(a, b + 1):
y[j] = 0
m += 2
n += 2
y[0] = 0
np.savetxt(savepath, np.column_stack((x, y)), fmt='%0.4f')
# plt.plot(x, y)
# plt.title("1.5nm")
# plt.show()
if __name__ == "__main__":
dir1 = "E:\\2019-1\大气\Hg10mm-拟合基底\\原始去除波谷拟合数据1.txt"
savedir="E:\\2019-1\大气\Hg10mm-拟合基底\\原始去除波谷拟合数据1.5分辨率.txt"
resolution(dir1,2,savedir) | gym918/Air-Spectrum | 预处理/Resolution.py | Resolution.py | py | 1,598 | python | en | code | 0 | github-code | 13 |
10934043036 | # -*- coding: utf-8 -*-
from bson import ObjectId
import models
import logging
import threading
from qmongo import helpers
logger = logging.getLogger(__name__)
global lock
lock = threading.Lock()
def get_list_with_searchtext(args):
searchText = args['data'].get('search', '')
pageSize = args['data'].get('pageSize', 0)
pageIndex = args['data'].get('pageIndex', 20)
sort = args['data'].get('sort', 20)
where = args['data'].get('where', None)
pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)
pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)
ret = models.HCSSYS_ExcelTemplate().aggregate().project(
function_id = 1,
template_code = 1,
template_name = 1,
is_default = 1,
view_name = 1
)
if where != None:
ret.match("function_id == @func_id",func_id=where['function_id'])
if(searchText != None):
ret.match("contains(template_name, @name)",name=searchText)
if(sort != None):
ret.sort(sort)
return ret.get_page(pageIndex, pageSize)
def get_datail_with_searchtext(args):
searchText = args['data'].get('search', '')
pageSize = args['data'].get('pageSize', 0)
pageIndex = args['data'].get('pageIndex', 20)
sort = args['data'].get('sort', 20)
where = args['data'].get('where', None)
pageIndex = (lambda pIndex: pIndex if pIndex != None else 0)(pageIndex)
pageSize = (lambda pSize: pSize if pSize != None else 20)(pageSize)
ret = models.HCSSYS_ExcelTemplate().aggregate().project(
function_id = 1,
detail = 1
)
if where != None:
ret.match("function_id == @func_id",func_id=where['function_id'])
if(searchText != None):
ret.match("contains(template_name, @name)",name=searchText)
if(sort != None):
ret.sort(sort)
rs = ret.get_page(pageIndex, pageSize)
if len(rs['items']) > 0:
return {
'total_items': rs['total_items'],
'items': rs['items'][0]['detail'],
'page_index': rs['page_index'],
'page_size': rs['page_size']
}
return rs
def insert(args):
try:
lock.acquire()
ret = {}
if args['data'] != None:
data = set_dict_insert_data(args)
ret = models.HCSSYS_ExcelTemplate().insert(data)
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
def update(args):
try:
lock.acquire()
ret = {}
if args['data'] != None:
data = set_dict_update_data(args)
ret = models.HCSSYS_ExcelTemplate().update(data, "_id == {0}", _id = args['data']['_id'])
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
def delete(args):
try:
lock.acquire()
ret = {}
if args['data'] != None:
ret = models.HCSSYS_ExcelTemplate().delete("_id in {0}",[ObjectId(x["_id"])for x in args['data']])
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
def remove_detail(args):
try:
lock.acquire()
ret = {}
id = ""
if args['data'] != None:
filter_value=helpers.filter("detail.field_name == {0}", args['data']['detail'][0]['field_name']).get_filter()
ret = models.HCSSYS_ExcelTemplate().update({
"$pull": filter_value
},"_id == {0}",id)
lock.release()
return ret
lock.release()
return dict(
error = "request parameter is not exist"
)
except Exception as ex:
lock.release()
raise(ex)
def set_dict_insert_data(args):
ret_dict = dict()
ret_dict.update(
function_id = (lambda x: x['function_id'] if x.has_key('function_id') else None)(args['data']),
template_code = (lambda x: x['template_code'] if x.has_key('template_code') else None)(args['data']),
template_name = (lambda x: x['template_name'] if x.has_key('template_name') else None)(args['data']),
is_default = (lambda x: x['is_default'] if x.has_key('is_default') else None)(args['data']),
view_name = (lambda x: x['view_name'] if x.has_key('view_name') else None)(args['data']),
detail = dict(
field_name = (lambda x: x['field_name'] if x.has_key('field_name') else None)(args['data']),
lookup_data = (lambda x: x['lookup_data'] if x.has_key('lookup_data') else None)(args['data']),
lookup_key_field = (lambda x: x['lookup_key_field'] if x.has_key('lookup_key_field') else None)(args['data']),
lookup_result = (lambda x: x['lookup_result'] if x.has_key('lookup_result') else None)(args['data']),
allow_null = (lambda x: x['allow_null'] if x.has_key('allow_null') else None)(args['data']),
is_key = (lambda x: x['is_key'] if x.has_key('is_key') else None)(args['data']),
language = (lambda x: x['language'] if x.has_key('language') else None)(args['data']),
header_text = (lambda x: x['header_text'] if x.has_key('header_text') else None)(args['data']),
is_visible = (lambda x: x['is_visible'] if x.has_key('is_visible') else None)(args['data']),
ordinal = (lambda x: x['ordinal'] if x.has_key('ordinal') else None)(args['data'])
)
)
return ret_dict
def set_dict_update_data(args):
ret_dict = set_dict_insert_data(args)
del ret_dict['function_id']
del ret_dict['template_code']
return ret_dict | nttlong2018/hr-python | apps/performance/api/HCSSYS_ExcelTemplate.py | HCSSYS_ExcelTemplate.py | py | 6,552 | python | en | code | 0 | github-code | 13 |
17428779224 | import pandas as pd
import stanza
nlp = stanza.Pipeline(lang='en', processors='tokenize, sentiment')
df = pd.read_csv('researchGate_Covid19ImpactAcademic.csv')
textsList = []
for comment in df['Comment']:
textsList.append(comment)
docList = []
for text in textsList:
docList.append(nlp(text))
resultList = []
for doc in docList:
tmpResultList = []
for sentence in doc.sentences:
tmpResultList.append(sentence.sentiment)
resultList.append(tmpResultList)
for result in resultList:
totalScore = 0
for eachScore in result:
totalScore += eachScore
resultList[resultList.index(result)] = round(totalScore/len(result) - 1, 3) * 10
df['SentimentOfComment'] = resultList
df.to_csv('sentimentAnalysis_researchGate.csv', encoding='UTF-8')
| j56810186/python_little_part_of_research_project | sentimentAnalysis.py | sentimentAnalysis.py | py | 783 | python | en | code | 0 | github-code | 13 |
42027174869 | # -*- coding: utf-8 -*-
__author__ = 'Xuesong Wang'
import sys
from data_helper import *
from sklearn.model_selection import train_test_split
import json
import logging
import tensorflow as tf
from text_cnn import TextCNN
import time
import os
if __name__ == '__main__':
""" Step 0: reload coding systems to display correct Chinese characters"""
reload(sys)
sys.setdefaultencoding('utf-8')
""" Step 1: read data ,model parameters """
# data = readFile('./data/txt/')
data = pd.read_csv('../DataSource/valid.csv',encoding="utf-8")
parameter_file = '../CNN/data/parameters.json'
params = json.loads(open(parameter_file).read())
""" Step 2: Select top 4000 shuffled samples as total data sets and ...
build word embedding vocabulary therein """
shuffle_indice = range(0,len(data))
np.random.shuffle(shuffle_indice)
x,vocab_processor = wordSeg(data['content'].values[shuffle_indice],initial_vocab=4000)
# seg_list = pd.read_csv('./data/feature.csv',encoding='utf-8',header=None).values[:, 0]
# x, vectorizer, transformer = tfIdf(seg_list)
""" Step 3: map labels into one hot vector """
y,enc,enc2 = labelEncoding(data['label'].values[shuffle_indice[0:4000]])
""" Step 4: split into training and testing"""
x_, x_test, y_, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
""" Step 5: shuffle train sets and split them into train and dev sets"""
shuffle_indices = np.random.permutation(np.arange(len(y_)))
x_shuffled = x_[shuffle_indices]
y_shuffled = y_[shuffle_indices]
x_train, x_dev, y_train, y_dev = train_test_split(x_shuffled, y_shuffled, test_size=0.1)
logging.info('x_train: {}, x_dev: {}, x_test: {}'.format(len(x_train), len(x_dev), len(x_test)))
logging.info('y_train: {}, y_dev: {}, y_test: {}'.format(len(y_train), len(y_dev), len(y_test)))
"""Step 6: build a graph and cnn object"""
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size = len(vocab_processor.vocabulary_),
embedding_size=params['embedding_dim'],
filter_sizes=list(map(int, params['filter_sizes'].split(","))),
num_filters=params['num_filters'],
l2_reg_lambda=params['l2_reg_lambda'],
labelencoder=enc)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
timestamp = str(int(time.time()))
out_dir = "trained_model_" + timestamp
# decide path to save model
checkpoint_dir = os.path.join(out_dir, "checkpoints")
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables())
# One training step: train the model with one batch
def train_step(x_batch, y_batch,i):
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: params['dropout_keep_prob']}
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./CNN/graphs/train',sess.graph)
summary,_, step, loss, acc = sess.run([merged,train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)
writer.add_summary(summary,i)
writer.close()
# the following run without building a graph for tensorboard
# _, step, loss, acc = sess.run([train_op, global_step, cnn.loss, cnn.accuracy], feed_dict)
# One evaluation step: evaluate the model with one batch
def dev_step(x_batch, y_batch, i=None):
feed_dict = {cnn.input_x: x_batch, cnn.input_y: y_batch, cnn.dropout_keep_prob: 1.0}
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('../CNN/graphs/valid', sess.graph)
summary,step, loss, acc, num_correct,confusion = sess.run([merged,global_step, cnn.loss, cnn.accuracy, cnn.num_correct, cnn.confusion], feed_dict)
if i:
writer.add_summary(summary,i)
# step, loss, acc, num_correct, confusion = sess.run(
# [global_step, cnn.loss, cnn.accuracy, cnn.num_correct, cnn.confusion], feed_dict)
return num_correct,confusion
# Save the word_to_id map since predictLabel.py needs it
# vocab_processor.save(os.path.join(out_dir, "vocab.pickle"))
# transformer.save(os.path.join(out_dir, "vocab.pickle"))
# Initialize the graph
sess.run(tf.global_variables_initializer())
# Training starts here
# split training data into batch
train_batches = batch_iter(list(zip(x_train, y_train)), params['batch_size'], params['num_epochs'])
best_accuracy, best_at_step = 0, 0
"""Step 7: train the cnn model with x_train and y_train (batch by batch)"""
for batchindex, train_batch in enumerate(train_batches):
x_train_batch, y_train_batch = zip(*train_batch)
train_step(x_train_batch, y_train_batch, batchindex)
current_step = tf.train.global_step(sess, global_step)
"""Step 7.1: evaluate the model with x_dev and y_dev (batch by batch)"""
if current_step % params['evaluate_every'] == 0:
dev_batches = batch_iter(list(zip(x_dev, y_dev)), params['batch_size'], 1)
total_dev_correct = 0
for dev_batch in dev_batches:
x_dev_batch, y_dev_batch = zip(*dev_batch)
num_dev_correct,confusion_matrix = dev_step(x_dev_batch, y_dev_batch,current_step)
total_dev_correct += num_dev_correct
dev_accuracy = float(total_dev_correct) / len(y_dev)
logging.critical('Accuracy on dev set: {}'.format(dev_accuracy))
"""Step 7.2: save the model if it is the best based on accuracy of the dev set"""
if dev_accuracy >= best_accuracy:
best_accuracy, best_at_step = dev_accuracy, current_step
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
logging.critical('Saved model {} at step {}'.format(path, best_at_step))
logging.critical('Best accuracy {} at step {}'.format(best_accuracy, best_at_step))
"""Step 8: predict x_test (batch by batch)"""
test_batches = batch_iter(list(zip(x_test, y_test)), params['batch_size'], 1)
total_test_correct = 0
correctlist = np.zeros(len(enc.classes_),)
incorrectlist = np.zeros(len(enc.classes_),)
for test_batch in test_batches:
x_test_batch, y_test_batch = zip(*test_batch)
num_test_correct, confusion_matrix= dev_step(x_test_batch, y_test_batch)
total_test_correct += num_test_correct
for i in range(0, confusion_matrix.shape[0]):
correctlist[i] += confusion_matrix[i,i]
incorrectlist[i] += np.sum(confusion_matrix[i, :], axis=0) - confusion_matrix[i,i]
df = DataFrame([correctlist, incorrectlist], index=['correct', 'incorrect'], columns=enc.classes_)
df.to_csv('data/result.csv')
# draw confusion matrix
confusionmatri_show(df)
test_accuracy = float(total_test_correct) / len(y_test)
logging.critical('Accuracy on test set is {} '.format(test_accuracy))
logging.critical('The training is complete')
| xuesongwang/Chinese-forum-mining | CNN/inclassCNN_train.py | inclassCNN_train.py | py | 8,330 | python | en | code | 0 | github-code | 13 |
33938347478 | import glob, os, re
def UtilObserver(sourcepath,file_mask):
unreal_source_path = sourcepath+'/Content/SHOTS/EPWHH'
all_native_assets = glob.glob(unreal_source_path + file_mask, recursive=True)
#all_optimized_assets = glob.glob(unreal_source_path + '/**/OPT/**/*.fbx', recursive=True)
name_assets = all_native_assets
all_assets = all_native_assets
for index, item in enumerate(all_assets) :
all_assets[index] = item.replace('\\','/')
#FBX vs OPT
#Restrict size
list_of_assets=[]
for asset in all_assets:
asset_size = os.stat(asset).st_size / (1000 * 1000)
if asset_size < 100:
print(asset + ' --> ' + str(asset_size))
list_of_assets.append(asset)
#Isolate with a string
#list_of_assets = [x for x in list_of_assets if 'PROPS' in x]
for index, name in enumerate(list_of_assets):
#asset_name = name.split('/')[-1].split('.')[0].split('_', 1)[1]
asset_name = name.split('/')[-1]
asset_name = name.split('/')[-1].split('.')[0]
asset_path = "/Game/Assets/StaticMeshes/" + asset_name
static_mesh_fbx = name
json_file_path = name.replace('fbx','json')
asset_size = os.stat(name).st_size / (1000 * 1000)
name_assets[index] = asset_name
print(str(index) + ' --> ' +asset_name + ' --> ' + str(os.stat(name).st_size / (1024 * 1024)).split('.')[0] + ' Mb')
return name_assets
print(len(UtilObserver('C:/GIT/ProjectOazis', '/**/*.umap'))) | denfrost/UnrealPyClient | Content/Python/UtilObserver.py | UtilObserver.py | py | 1,451 | python | en | code | 6 | github-code | 13 |
406574802 | """
SIGNAL PROCESSING
"""
# import essentia
from essentia.standard import FrameGenerator, Spectrum, Windowing
import numpy as np
from .utils import timer
@timer
def get_spectrogram(audio_data):
spectrogram = []
spectrum = Spectrum()
w = Windowing(type='hann')
spectrogram = np.array(list(map(
lambda x: spectrum(w(x)),
list(FrameGenerator(
audio_data,
frameSize=2048,
hopSize=1024,
startFromZero=True))
))).T
spectrogram += np.min(spectrogram[np.nonzero(spectrogram)])
return spectrogram
@timer
def equalize_spectrum(data):
# Move minimum value to zero
data -= data.min()
# Scale max to 255
data *= (255.0/data.max())
# Round to int values
data = np.rint(data)
# Create histogram and bins
hist, bins = np.histogram(data.flatten(), 256, [0, 256])
# Find Cumulative Distr Function
cdf = hist.cumsum()
# Normalise cdf
cdf_normalized = cdf * hist.max() / cdf.max()
# Mask nonzero values ( ingore zeros )
cdf_m = np.ma.masked_equal(cdf, 0)
# Equalise cdf values
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m, 0).astype('uint8')
# Return equalized data values according to cdf mapping
return cdf[data.astype('uint8')]
@timer
def group_avg(data, N=2):
result = np.cumsum(data, 0)[N-1::N]/float(N)
result[1:] = result[1:] - result[:-1]
return result
| conjectures/art-cgan | core/sp/signal_processing.py | signal_processing.py | py | 1,619 | python | en | code | 0 | github-code | 13 |
22336199833 | data = input()
students_info = {}
while not data == data.lower():
split = data.split(":")
student_name = split[0]
student_id = split[1]
student_course = split[2]
if student_course not in students_info:
students_info[student_course] = {student_name: student_id}
else:
students_info[student_course][student_name] = student_id
data = input()
course = " ".join(data.split("_"))
for key, value in students_info.items():
if key == course:
for name, id in value.items():
print(f"{name} - {id}") | DimitarDimitr0v/Python-Fundamentals | 06. Dictionaries/Lab/06. students.py | 06. students.py | py | 559 | python | en | code | 2 | github-code | 13 |
22244632338 |
import tensorflow as tf
import pathlib
import sys
import datetime
import time
import yfinance as yf
import copy
from baseSignal import Signal
import pandas as pd
scriptpath = pathlib.Path(__file__).parent.resolve()
sys.path.append(str(scriptpath.parent.parent/'utils'))
from dict_utils import Df_to_Dict, get_future_days_stock
from postgres import db_conn, MachineLearningTables
from tables.StockPrice import MainMLPrice
class MLSignal(Signal):
table_dict = {
'MTPrice': {
'Day1': 0,
'Day2': 1
},
'TWPrice': {
'Day1': 1,
'Day2': 2
},
'WTPrice': {
'Day1': 2,
'Day2': 3
},
'TFPrice': {
'Day1': 3,
'Day2': 4
},
}
table_names = ['MTPrice', 'TFPrice', 'TWPrice', 'WTPrice']
def __init__(self, dataobj,mlclass=None) -> None:
self.db_conn = db_conn()
super().__init__(conn=self.db_conn, name='MLSignal',
function=self.defaultFunction, dataclass=dataobj,ml=mlclass)
self.dataobj = dataobj
if not mlclass:
self.mlclass = MachineLearningTables(self.table_names)
self.mlclass.createMachineLearningTable()
self.tables = self.mlclass.tables
if mlclass:
self.mlclass = mlclass
self.tables = self.mlclass.tables
def defaultFunction(self):
return
def GetDfWithSignal(self,data=None):
if not data:
data = yf.download(tickers='SPY',start= "2023-01-01", end="2023-03-31")
# data = self.get_price_from_yf(period='5d')
data['Previous_Close'] = (data.Close.shift(1))
data['signal'] = 0.0
for d in self.table_dict:
num_minus = 1
if self.table_dict[d]['Day1'] == 0:
num_minus = 3
mldata =self.db_conn.SelectAll(self.tables[d])
df = pd.DataFrame.from_dict(mldata)
df.set_index('Date',inplace=True)
data = pd.concat([data,df],axis='columns')
for index,row in data.iterrows():
if not pd.isna(row.Predicted_Close) and index.dayofweek == self.table_dict[d]['Day1']:
string_date = pd.to_datetime(index)-datetime.timedelta(days=num_minus)
query=string_date.strftime('%Y-%m-%d')
if row.Predicted_Close > row.Previous_Close:
data.loc[query,'signal'] += 1
else:
data.loc[query,'signal'] -= 1
if not pd.isna(row.Predicted_Close) and index.dayofweek == self.table_dict[d]['Day2']:
string_date = pd.to_datetime(index)-datetime.timedelta(days=1)
query=string_date.strftime('%Y-%m-%d')
if row.Predicted_Close > row.Previous_Close:
data.loc[query,'signal'] += 0.5
else:
data.loc[query,'signal'] -= 0.5
data.drop('Predicted_Close',inplace=True,axis='columns')
return data
def getTableData(self):
return self.db_conn.SelectAll(self.dataobj)
def RunDatesWithSignal(self,data=None):
if not data:
data = yf.download(tickers='SPY',start= "2023-01-01", end="2023-03-31")
| jamesyeogz/FYP_project | engine/engine/indicator_engine/MLSignal.py | MLSignal.py | py | 3,382 | python | en | code | 0 | github-code | 13 |
37988597174 | import sys
def txt_importer(path_file: str):
if not path_file.endswith(".txt"):
print("Formato inválido", file=sys.stderr)
return
try:
with open(f"{path_file}", "r") as file:
stacked_text = file.read().split("\n")
return stacked_text
except FileNotFoundError:
print(f"Arquivo {path_file} não encontrado", file=sys.stderr)
# Reference of the '.endswith()' method used above
# https://stackoverflow.com/questions/5899497/how-can-i-check-the-extension-of-a-file
| JOAO-LEE/project_ting_trybe_is_not_google | ting_file_management/file_management.py | file_management.py | py | 533 | python | en | code | 0 | github-code | 13 |
14423489517 | import requests
import tkinter as tk
class Box(object):
isChecked = False
isSubmarine = False
btnTxt = ""
lblBox = ""
def __init__(self, boxID, lblBox, GameBoard):
self.boxID = boxID
self.lblBox = lblBox
self.GB = GameBoard
def ShootBox(self, event):
if self.GB.MyTurn and not self.isChecked:
self.isChecked = True
if self.isSubmarine:
self.lblBox.config(bg='red')
self.GB.CheckWin()
else:
self.lblBox.config(text='X')
PARAMS = {'GameId': self.GB.GameId, 'BoxNm': self.boxID, 'nmMe': self.GB.NmMe}
myURL = 'http://oniken.c1.biz/server/actions/SendBoxNm.php?'
requests.get(url=myURL, params=PARAMS)
self.GB.SetMyTurnThread()
def setHit(self):
self.isChecked = True
if self.isSubmarine:
self.lblBox.config(bg='Red')
self.GB.CheckOpponentWin()
else:
self.lblBox.config(text='X')
| oniken18/SubmarineWar_Python | BoxClass.py | BoxClass.py | py | 1,035 | python | en | code | 0 | github-code | 13 |
74638928016 | from decouple import config
ENVIRONMENT = config('ENVIRONMENT', default='DEVELOPMENT')
if ENVIRONMENT.upper() == 'PRODUCTION':
from project_name.settings.staging import *
elif ENVIRONMENT.upper() == 'STAGING':
from project_name.settings.production import *
else:
from project_name.settings.development import *
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = 'static/'
Path(BASE_DIR / 'static/').mkdir(parents=True, exist_ok=True)
if DEBUG:
STATIC_DIR = BASE_DIR / 'static/'
STATICFILES_DIRS = [
STATIC_DIR
]
else:
STATIC_ROOT = BASE_DIR / 'static/'
| CodingArmenia/django-rest-api-project-template | project_name/settings/__init__.py | __init__.py | py | 670 | python | en | code | 0 | github-code | 13 |
21223774732 | def add_to_inventory (inventory, added_items):
for i in range(len(added_items)):
inventory.setdefault(added_items[i], 0)
inventory[added_items[i]] += 1
return inventory
def display_inventory (display):
print ("Inventory:")
total_inv = 0
for k, v in display.items():
print (str(v) + " " + k)
total_inv += v
print ("Total number of items: " + str(total_inv))
inv = {'gold coin': 42, 'rope': 1}
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
inv = add_to_inventory (inv, dragonLoot)
display_inventory (inv)
| kestena/automatetheboringstuffwithpython | chapter_5_add_to_inventory.py | chapter_5_add_to_inventory.py | py | 599 | python | en | code | 0 | github-code | 13 |
11045600210 | import logging
import sys
import time
import json
import jieba.analyse
jieba.setLogLevel(logging.ERROR)
jieba.initialize()
from titletrigger.api import load_model, abs_summarize, tag_classify, extract_keywords, ext_summarize
sys.path.append('titletrigger/textsum')
sys.path.append('titletrigger/textclf')
if __name__ == "__main__":
sum_model_path = "/home/hjpan/projects/ML-Camp-BurnMyGpu/titletrigger/textsum/cache/copynet/model/best_model.pt"
clf_model_path = "/home/hjpan/projects/ML-Camp-BurnMyGpu/titletrigger/textclf/cache/rcnn/model/best_model.pt"
content = """国务院总理李克强21日下午在中南海紫光阁会见中印边界问题印方特别代表、印度国家安全顾问多瓦尔。
李克强表示,中印边界问题特别代表会晤机制为双方增进互信、扩大共识发挥了建设性作用。
我们要继续从中印关系大局出发,探讨通过外交途径以和平方式妥善解决边界问题。
在找到公平合理、双方都能接受的解决方案前,一定要管控好分歧,共同致力于维护边境地区的和平与安宁。
这也可以为两国深入推进经贸合作提供稳定的预期。李克强指出,当前世界经济复苏乏力,地缘政治动荡更加突出。
中印作为两个最大的新兴经济体,经济保持中高速增长,对世界是鼓舞,对亚洲是带动。
双方要珍惜和维护好两国关系发展势头,充分发挥经济互补优势,开展多领域务实合作,
密切在国际和地区事务中的沟通协调,发出中印携手维护和平稳定、促进发展进步的积极信号。
多瓦尔表示,印中关系取得了积极进展,两国既面临发展经济的艰巨挑战,也拥有开展合作的巨大机遇。
印方愿同中方加强高层交往,深化经济、安全等各领域合作,妥善处理边界问题,推动两国关系取得更大发展。"""
content_list = [" ".join(jieba.cut(content))]
ext_headline = " ".join(ext_summarize(content_list)[0])
print("Loading model...\n")
sum_model_file = load_model(sum_model_path)
clf_model_file = load_model(clf_model_path)
# print("="*50)
st = time.time()
result_dict = abs_summarize(content_list, sum_model_file)
print("Textsum Finished in {:.4f} s".format(time.time() - st))
preds = result_dict["preds"]
st = time.time()
tag = tag_classify(content_list, clf_model_file)
print("Textclf Finished in {:.4f} s".format(time.time() - st))
print("News Content: ")
print("".join(content_list[0].split()))
print()
print("Headline: ")
print("EXT:")
print(ext_headline)
print()
print("ABS:")
for pred, score in result_dict["all_preds"][0]:
print("{:.2f}: {}".format(score, "".join(pred)))
print()
print("Tag: ")
print(tag[0])
print()
print("Keywords: ")
keywords = " ".join(
extract_keywords("".join(content_list[0].split()))).replace("\n", "")
print(keywords)
| ScarletPan/ML-Camp-BurnMyGpu | example.py | example.py | py | 3,029 | python | zh | code | 2 | github-code | 13 |
26530006772 | import cv2
import time
import sys
import numpy as np
sys.path.append('..')
from libs import FaceGateway
from libs import EyesGateway
from libs import EyeDirectionGateway
from libs import PupilsGateway
#The issue with OpenCV track bars is that they require a function that will happen on each track bar
# movement. We don’t need any sort of action, we only need the value of our track bar, so we create a nothing() function:
def nothing(x):
pass
def videoCapture(static):
cap = cv2.VideoCapture(0)
cv2.namedWindow('image')
cv2.createTrackbar('threshold', 'image', 0, 255, nothing)
while True:
_, img = cap.read()
face_coordinates = FaceGateway.detectFace(img)
if np.any(face_coordinates):
eyes = EyesGateway.detect_eyes(img, face_coordinates)
if np.any(eyes):
threshold = cv2.getTrackbarPos('threshold', 'image')
PupilsGateway.detect_pupil(img, threshold, face_coordinates, eyes, static)
EyeDirectionGateway.detectEyeDirection(eyes, threshold, img, face_coordinates)
cv2.imshow("image", img)
time.sleep(0.1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
cap.release()
# Destroy all the windows
cv2.destroyAllWindows() | ritamouraribeiro/eye-tracker-opencv | libs/WebCamGateway.py | WebCamGateway.py | py | 1,287 | python | en | code | 0 | github-code | 13 |
1316905772 | __all__ = ["AMTrainer", "build_model", "_parser", "main"]
from ..shared import Manager
from ..shared import coreutils
from ..shared import encoder as model_zoo
from ..shared.data import (
KaldiSpeechDataset,
sortedPadCollateASR
)
import os
import argparse
from typing import *
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.cuda.amp import autocast
# NOTE:
# 1/4 subsampling is used for Conformer model defaultly
# for other sampling ratios, you may need to modify the values
# commonly, you can use larger value for allowing some margin.
SUBSAMPLING = 4
def check_label_len_for_ctc(tupled_mat_label: Tuple[torch.FloatTensor, torch.LongTensor]):
"""filter the short seqs for CTC/CRF"""
return (tupled_mat_label[0].shape[0] // SUBSAMPLING > tupled_mat_label[1].shape[0])
def filter_hook(dataset):
return dataset.select(check_label_len_for_ctc)
def main_worker(gpu: int, ngpus_per_node: int, args: argparse.Namespace):
coreutils.set_random_seed(args.seed)
args.gpu = gpu
args.rank = args.rank * ngpus_per_node + gpu
torch.cuda.set_device(args.gpu)
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
manager = Manager(
KaldiSpeechDataset,
sortedPadCollateASR(flatten_target=True),
args,
func_build_model=build_model,
_wds_hook=filter_hook
)
# NOTE: for CTC training, the input feat len must be longer than the label len
# ... when using webdataset (--largedataset) to load the data, we deal with
# ... the issue by `_wds_hook`; if not, we filter the unqualified utterances
# ... before training start.
tr_dataset = manager.trainloader.dl.dataset
if isinstance(tr_dataset, KaldiSpeechDataset):
orilen = len(tr_dataset)
tr_dataset.filt_by_len(lambda x, y: x//SUBSAMPLING > y)
if len(tr_dataset) < orilen:
coreutils.distprint(
f"warning: filtered {orilen-len(tr_dataset)} utterances.",
args.gpu
)
# training
manager.run(args)
class AMTrainer(nn.Module):
def __init__(
self,
am: model_zoo.AbsEncoder,
use_crf: bool = False,
lamb: Optional[float] = 0.01,
**kwargs):
super().__init__()
self.am = am
self.is_crf = use_crf
if use_crf:
from ctc_crf import CTC_CRF_LOSS as CRFLoss
self._crf_ctx = None
self.criterion = CRFLoss(lamb=lamb)
else:
self.criterion = nn.CTCLoss()
def register_crf_ctx(self, den_lm: Optional[str] = None):
"""Register the CRF context on model device."""
assert self.is_crf
from ctc_crf import CRFContext
self._crf_ctx = CRFContext(den_lm, next(
iter(self.am.parameters())).device.index)
def forward(self, feats, labels, lx, ly):
logits, lx = self.am(feats, lx)
logits = torch.log_softmax(logits, dim=-1)
labels = labels.cpu()
lx = lx.cpu()
ly = ly.cpu()
if self.is_crf:
assert self._crf_ctx is not None
with autocast(enabled=False):
loss = self.criterion(
logits.float(), labels.to(torch.int),
lx.to(torch.int), ly.to(torch.int))
else:
# [N, T, C] -> [T, N, C]
logits = logits.transpose(0, 1)
loss = self.criterion(logits, labels.to(torch.int), lx.to(
torch.int), ly.to(torch.int))
return loss
def build_model(
cfg: dict,
args: Optional[Union[argparse.Namespace, dict]] = None,
dist: bool = True,
wrapper: bool = True) -> Union[nn.parallel.DistributedDataParallel, AMTrainer, model_zoo.AbsEncoder]:
if 'ctc-trainer' not in cfg:
cfg['ctc-trainer'] = {}
assert 'encoder' in cfg
netconfigs = cfg['encoder']
net_kwargs = netconfigs['kwargs'] # type:dict
# when immigrate configure from RNN-T to CTC,
# one usually forget to set the `with_head=True` and 'num_classes'
if not net_kwargs.get('with_head', False):
print("warning: 'with_head' in field:encoder:kwargs is False/not set. "
"If you don't know what this means, set it to True.")
if 'num_classes' not in net_kwargs:
raise Exception("error: 'num_classes' in field:encoder:kwargs is not set. "
"You should specify it according to your vocab size.")
am_model = getattr(model_zoo, netconfigs['type'])(
**net_kwargs) # type: model_zoo.AbsEncoder
if not wrapper:
return am_model
model = AMTrainer(am_model, **cfg['ctc-trainer'])
if not dist:
return model
assert args is not None, f"You must tell the GPU id to build a DDP model."
if isinstance(args, argparse.Namespace):
args = vars(args)
elif not isinstance(args, dict):
raise ValueError(f"unsupport type of args: {type(args)}")
# make batchnorm synced across all processes
model = coreutils.convert_syncBatchNorm(model)
model.cuda(args['gpu'])
if 'use_crf' in cfg['ctc-trainer'] and cfg['ctc-trainer']['use_crf']:
assert 'den-lm' in cfg['ctc-trainer']
model.register_crf_ctx(cfg['ctc-trainer']['den-lm'])
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args['gpu']])
return model
def _parser():
parser = coreutils.basic_trainer_parser("CTC trainer.")
return parser
def main(args: argparse.Namespace = None):
if args is None:
parser = _parser()
args = parser.parse_args()
coreutils.setup_path(args)
coreutils.main_spawner(args, main_worker)
if __name__ == "__main__":
print(
"NOTE:\n"
" since we import the build_model() function in cat.ctc,\n"
" we should avoid calling `python -m cat.ctc.train`, instead\n"
" running `python -m cat.ctc`"
)
| NLPvv/Transducer-dev-1 | cat/ctc/train.py | train.py | py | 6,089 | python | en | code | 0 | github-code | 13 |
73091567697 | # These are primarily intended to be used with simulate.calc_lens
import metrics
import pandas
import decimal
from decimal import Decimal as D
def calc_pwa0(annual_data):
return metrics.pwa(1, 0, [n.returns_r for n in annual_data])
def calc_pwa1(annual_data):
return metrics.pwa(1, 1, [n.returns_r for n in annual_data])
def calc_success(annual_data):
# it is success if the last withdrawal is the same as the first withdrawal...otherwise
# we must have cut spending somewhere along the line
# We have to put in a $1 fudge factor for floating point weirdness...
last_wd = annual_data[-1].withdraw_r
first_wd = annual_data[0].withdraw_r
return last_wd >= (first_wd - 1)
def calc_shortfall_years(annual):
df = pandas.DataFrame(annual)
# subtract $1 to deal with floating point weirdness (sometimes $40 turns into $39.9999)
failed = df[df['withdraw_r'] < df['withdraw_r'][0] - 1]
s_y = len(failed)
return s_y
def calc_years_sustained(annual):
df = pandas.DataFrame(annual)
# subtract $1 to deal with floating point weirdness (sometimes $40 turns into $39.9999)
failed = df[df['withdraw_r'] < df['withdraw_r'][0] - 1]
s_y = len(failed)
if s_y:
return -s_y
else:
b_t = df.tail(1)['portfolio_post'].item().value_r
b_y = b_t / df['withdraw_r'][0]
return b_y
def calc_ulcer(annual):
df = pandas.DataFrame(annual)
vals = df['portfolio_pre'].get_values()
ulcer = metrics.ulcer([p.value_r for p in vals])
return ulcer
def calc_bond_pct(annual):
df = pandas.DataFrame(annual)
with decimal.localcontext(decimal.ExtendedContext) as context:
vals = df['portfolio_pre'].get_values()
# this arrives as an ndarray but pandas wants a real list
p = pandas.DataFrame(data=list(vals))
bonds_pct = p['bonds'] / p['value_n']
return bonds_pct.mean()
def calc_hreff(annual, floor=D('.04')):
df = pandas.DataFrame(annual)
withdrawals = df['withdraw_pct_orig'].tolist()
returns = df['returns_r'].tolist()
return metrics.hreff(withdrawals, returns, floor=floor)
def calc_max_wd(annual):
df = pandas.DataFrame(annual)
return df['withdraw_pct_cur'].max()
def calc_cew(annual):
df = pandas.DataFrame(annual)
return metrics.cew(df['withdraw_r'].tolist())
def calc_dras(series, years):
L = years
# how many had shortfall years?
failures = series[series < 0]
successes = series[series >= 0]
p_fail = len(failures) / len(series)
s_y = failures.mean()
b_y = successes.mean()
e_ys = (p_fail * (L + s_y)) + ((1 - p_fail) * (L + b_y))
# semi-deviation with respect to length of retirement
ssd_l_ys = (p_fail * s_y * s_y) ** 1/2
d_ras = e_ys / ssd_l_ys
return d_ras
def calc_coverage_ratio(annual, years):
s_y = calc_years_sustained(annual)
L = years
c = s_y / L
def u(c, risk_aversion=D('0.9999'), penalty_coeff=D(10)):
c = D(c)
if c >= 1:
numerator = (c ** (1 - risk_aversion)) - 1
denominator = 1 - risk_aversion
return numerator / denominator
else:
numerator = (1 ** (1 - risk_aversion)) - 1
denominator = 1 - risk_aversion
penalty = penalty_coeff * (1 - c)
return (numerator / denominator) - penalty
return u(c)
| hoostus/prime-harvesting | lens.py | lens.py | py | 3,399 | python | en | code | 26 | github-code | 13 |
37296083485 |
import matplotlib.pyplot as plt
import numpy as np
import sys
import json
from util import Log, ConstellationToXY
def main():
if len(sys.argv) != 3 and len(sys.argv) != 5:
Log("ERR. Incorrent number of args.")
return
if sys.argv[1] != "-f":
Log("ERR. No '-f' param given.")
return
wp = -1
if len(sys.argv) > 3:
if sys.argv[3] != "-n":
Log("ERR. No -n param given")
return
wp = int(sys.argv[4])
f = open(sys.argv[2], "r")
rep = json.load(f)
# Log(json.dumps(rep, indent=2, sort_keys=True))
Log("Plotting generation " + str(rep["Reports"][wp]["Generation"]))
for i,r in enumerate(rep["Reports"][wp]["Reports"]):
x,y = ConstellationToXY(r["constellation"])
plt.subplot(331+i); plt.plot(x,y,"*"); plt.xlim([-2,2]); plt.ylim([-2,2]); plt.grid(); plt.title("Individual " + str(i))
plt.show()
if __name__ == "__main__":
main()
| skyhoffert/ENEE623_Project | report.py | report.py | py | 961 | python | en | code | 0 | github-code | 13 |
31071192149 | # -*- encoding: utf-8 -*-
"""
PyCharm show
2022年08月14日
by littlefean
"""
from typing import *
空 = None
真 = True
def 主函数():
打印 = print
打印(123)
return 空
if __name__ == "__main__":
主函数()
| Littlefean/SmartPython | python迷惑行为/中文编程/show.py | show.py | py | 240 | python | zh | code | 173 | github-code | 13 |
672494652 | import logging
from sawtooth_sdk.protobuf import state_context_pb2
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from rbac_addressing import addresser
from rbac_processor.common import get_state_entry
from rbac_processor.common import is_in_role_attributes_container
from rbac_processor.common import validate_list_of_user_are_users
from rbac_processor.protobuf import role_state_pb2
from rbac_processor.protobuf import role_transaction_pb2
from rbac_processor.state import get_state
from rbac_processor.state import set_state
LOGGER = logging.getLogger(__name__)
def apply_create_role(header, payload, state):
create_role = role_transaction_pb2.CreateRole()
create_role.ParseFromString(payload.content)
_validate_create_role_data(create_role)
_validate_create_role_state(create_role, state)
_handle_role_state_set(create_role, state)
def _validate_create_role_data(create_role):
if not len(create_role.name) > 4:
raise InvalidTransaction("Role name {} must be greater than 4 "
"characters.".format(create_role.name))
if not create_role.admins:
raise InvalidTransaction("Role must have at least one admin")
if not create_role.owners:
raise InvalidTransaction("Role must have at least one owner")
def _validate_create_role_state(create_role, state):
state_return = get_state(
state,
[addresser.make_role_attributes_address(create_role.role_id)])
if _role_already_exists(state_return, create_role.role_id):
raise InvalidTransaction("Role id {} is already in state".format(
create_role.role_id))
users = list(create_role.admins) + list(create_role.owners)
user_state_return = get_state(
state,
[addresser.make_user_address(u) for u in users])
validate_list_of_user_are_users(user_state_return, users)
def _handle_role_state_set(create_role, state):
role_container = role_state_pb2.RoleAttributesContainer()
role = role_container.role_attributes.add()
role.role_id = create_role.role_id
role.name = create_role.name
role.metadata = create_role.metadata
entries_to_set = {
addresser.make_role_attributes_address(create_role.role_id):
role_container.SerializeToString()
}
pubkeys_by_address = {}
for admin in list(create_role.admins):
admin_address = addresser.make_role_admins_address(
role_id=create_role.role_id,
user_id=admin)
if admin_address in pubkeys_by_address:
pubkeys_by_address[admin_address].append(admin)
else:
pubkeys_by_address[admin_address] = [admin]
for owner in list(create_role.owners):
owner_address = addresser.make_role_owners_address(
role_id=create_role.role_id,
user_id=owner)
if owner_address in pubkeys_by_address:
pubkeys_by_address[owner_address].append(owner)
else:
pubkeys_by_address[owner_address] = [owner]
state_returns = get_state(
state,
[addresser.make_role_admins_address(
role_id=create_role.role_id,
user_id=a) for a in create_role.admins] +
[addresser.make_role_owners_address(
role_id=create_role.role_id,
user_id=o) for o in create_role.owners])
for addr, pubkeys in pubkeys_by_address.items():
try:
state_entry = get_state_entry(state_returns, addr)
container = role_state_pb2.RoleRelationshipContainer()
container.ParseFromString(state_entry.data)
except KeyError:
container = role_state_pb2.RoleRelationshipContainer()
_add_role_rel_to_container(
container,
create_role.role_id,
pubkeys)
entries_to_set[addr] = container.SerializeToString()
set_state(state, entries_to_set)
def _add_role_rel_to_container(container, role_id, identifiers):
role_relationship = container.relationships.add()
role_relationship.role_id = role_id
role_relationship.identifiers.extend(identifiers)
def _role_already_exists(state_return, role_id):
if not state_return:
return False
role_attr_container = role_state_pb2.RoleAttributesContainer()
role_attr_container.ParseFromString(
state_return[0].data)
return is_in_role_attributes_container(
container=role_attr_container,
identifier=role_id)
| hyperultra-zz/selenium | processor/rbac_processor/role/role_apply.py | role_apply.py | py | 4,490 | python | en | code | 0 | github-code | 13 |
3873729152 |
def swap(mylist,i):
# that sit at indices i and (i+1).
if (i >= 0 and i+1 <= len(mylist) - 1): # testing that the two indices are valid for the list
temp = mylist[i+1]
mylist[i+1] = mylist[i]
mylist[i] = temp
else:
print('error: index out of bounds')
def scan_once(l):
# it returns True if it has performed at least one swap, False otherwise (meaning that the input list was sorted)
at_least_one_swap = False
length = len(l)
for i in range(length - 1): # we perform at most (length - 1) swaps, e.g. 3 swaps in a list of 4 elements
if l[i] > l[i+1]: # we do not perform a swap in case of equality: that would be totally useless
swap(l,i)
at_least_one_swap = True
return at_least_one_swap # to let the caller know whether the input list was already sorted
def bubble_sort(l):
do_i_have_to_continue = True
while (do_i_have_to_continue):
do_i_have_to_continue = scan_once(l)
return l
## QUESTION 4 ##
def bubble_sort(l):
localcopy = l[:] # this creates a list called localcopy that is a copy of the list l. l is unchanged by this bubble_sort.
do_i_have_to_continue = True
while (do_i_have_to_continue):
do_i_have_to_continue = scan_once(localcopy)
return localcopy
def binary_search(value, mylist):
# in case the search is successful, the function returns the index where the element is, otherwise it returns -1.
length = len(mylist)
if length == 0:
return -1
else: # the list has at least one element
first = 0
last = length-1
# and length is the length of the current chunk (already set)
while length > 1:
midpoint = int((first+last)/2) # this is the floor of the average, e.g. int(3.5) == 3
if mylist[midpoint] == value:
return midpoint # we have found the value sought
elif mylist[midpoint] < value:
# if present, the value is in the second half of the list
first = midpoint + 1 # we don't modify last, the last possible position
else: # if present, the value is in the first half of the list
last = midpoint - 1 # we don't modify first, the first possible position
length = last - first + 1 # importantly, this is OUTSIDE the if/elif/else, so computed in all cases
# when the execution reaches this point, l == 1 (which means last == first) and we have to test whether the element is there:
if mylist[first] == value:
return first
else:
return -1
def get_first_index(mylist, k):
if k < 0 or k >= len(mylist):
print("error in get_first_index: index out of bounds.")
return -1 # kind of an error code
else:
elem = mylist[k] # the element for which we are going to find the first occurrence
# k is readily a local variable. We are going to decrement it as long as the currrent element in the list remains equal to elem.
while k > 0: # which means there exists a mylist[k-1] element
if mylist[k-1] == elem:
k -= 1
else:
break
return k # either because k == 0 or because the break instruction has been reached
# and then we can rewrite the return statements of our bubble sort function above, to return the return value of get_first_index(mylist,k)
# instead of k itself, which gives:
def binary_search(value, mylist):
# this function performs a binary search, looking for value in mylist.
# in case the search is successful, the function returns the FIRST index where the element is, otherwise it returns -1.
length = len(mylist)
if length == 0:
return -1
else: # the list has at least one element
first = 0
last = length-1
# and length is the length of the current chunk (already set)
while length > 1:
midpoint = int((first+last)/2) # this is the floor of the average, e.g. int(3.5) == 3
if mylist[midpoint] == value:
return get_first_index(mylist,midpoint) # we have found the value sought
elif mylist[midpoint] < value:
# if present, the value is in the second half of the list
first = midpoint + 1 # we don't modify last, the last possible position
else: # if present, the value is in the first half of the list
last = midpoint - 1 # we don't modify first, the first possible position
length = last - first + 1 # importantly, this is OUTSIDE the if/elif/else, so computed in all cases
# when the execution reaches this point, l == 1 (which means last == first) and we have to test whether the element is there:
if mylist[first] == value:
return get_first_index(mylist,first)
else:
return -1
list1 = [4,665,69,-12.4,5,2,0,66,23,11,-3,2,452,2,289,2,232]
print(list1)
sorted = bubble_sort(list1)
print(sorted)
print(list1) # to see whether it has been modified
print("Searching for 2: index", binary_search(2,sorted))
print("Searching for 100: index", binary_search(-1,sorted))
| Nxumalo/Sort-Methods | Bubble Sort.py | Bubble Sort.py | py | 4,675 | python | en | code | 0 | github-code | 13 |
28127352990 | """
最大值减去最小值小于或者等于num的子数组数量
题目:
给定数组arr和整数num,共返回有多少个子数组满足如下情况:
max(arr[i..j]) - min(arr[i..j]) <=num
max(arr[i..j]}表示子数组arr[i..j]中的最大值,min(arr[i..j])表示子数组arr[i..j]中的最小值
要求:
如果数组的长度为N,请实现时间复杂度为O(N)的解法
"""
from development.chapter7.LinkedDeque import LinkedDeque
def get_num(arr, num):
"""最大值减去最小值等于num的数量的具体实现"""
if len(arr) == 0 or num is None:
return
qmin = LinkedDeque()
qmax = LinkedDeque()
i, j, res = 0, 0, 0
while i < len(arr):
while j < len(arr):
"""一个递增的栈元素"""
while not qmin.is_empty() and arr[qmin.last()] >= arr[j]:
qmin.delete_last()
qmin.insert_last(j)
"""一个递减的栈元素"""
while not qmax.is_empty() and arr[qmax.last()] <= arr[j]:
qmax.delete_last()
qmax.insert_last(j)
if arr[qmax.first()] - arr[qmin.first()] > num:
break
j += 1
if qmin.first() == i:
qmin.delete_first()
if qmax.first() == i:
qmax.delete_first()
res += j - i
i += 1
return res
if __name__ == '__main__':
num = get_num([6, 3, 9, 17, 9], 10)
print(num)
| liruileay/data_structure_in_python | data_structure_python/question/chapter1_stack_queue_question/question11.py | question11.py | py | 1,244 | python | zh | code | 0 | github-code | 13 |
9483519716 | '''
Created on 27 mar 2016
@author: linky
'''
import Funzioni_PyTumblr
import pytumblr
from time import sleep
from _Poster import _poster as ps
from Funzioni_PyTumblr import *
class Poster(ps):
'''
classdocs
Questa classe avra' il compito di gestire la ricezione delle immagini da postare
'''
def __init__(self, c_key, s_key, token, sec_token, API_key, nome_blog, querry_tumblr, tags):
'''
Constructor
'''
self.stuff = Funzioni_PyTumblr.get_stuff_from_tumblr(querry_tumblr)
self.tags = tags
self.FiltroTag = ['art', 'myart', 'my art', 'own', 'own edit', 'myown']
# Autorizzazioni per tumblr
self.Customer_key = c_key
self.Secret_key = s_key
self.Token = token
self.Secre_token = sec_token
self.API_key = API_key
self.name = nome_blog
self.query = querry_tumblr
self.client = pytumblr.TumblrRestClient(self.Customer_key, self.Secret_key, self.Token, self.Secre_token)
def posta(self):
""" questa f() posta su tumblr gli elementi presi dal dizionario {"url" : [tags]}"""
self.filtra()
for k in self.stuff.keys():
if(len(self.stuff[k]) > 2): # <- verifico che ci siano tag!
# Filtro, per evitare i repost
self.client.create_photo(self.name, state="queue", tags=self.stuff[
k], source=k) # <--- FUCK YEAH WORKA!!!!
print( "Postato qualcosa by" + "\t" + self.name + "\n")
sleep(2)
def filtra(self):
""" Filtro i post con alcuni TAG"""
for k in list(self.stuff.keys()):
for f in self.FiltroTag:
try:
if(f in list(self.stuff[k])):
print(k + " FUCK\t GGWP, PERICOLO SCAMPATO!!!")
try:
del self.stuff[k]
except KeyError:
print ("NOn ho potuto eliminare la chiave\n")
pass
except KeyError:
print("Provo a passare roba POTENZIALMENTE CIT eliminata")
pass
def likes(self):
#infos = Funzioni_PyTumblr.smart_likes(Funzioni_PyTumblr.get_querry())
infos = Funzioni_PyTumblr.smart_likes(Funzioni_PyTumblr.get_searching_tags(self.tags))
for k in infos:
self.client.like(k[0], k[1])
print ("Messi %d Likes" % (len(infos)))
def reblog(self):
lista = Funzioni_PyTumblr.smart_reblog(self.query)
for k in lista:
self.client.reblog(self.name, id=k[0], reblog_key=k[1], tags=k[2])
print ("Reblogged: %d posts" % (len(lista)))
def reblog_adv(self):
lista_url = "https://thenaturalscenery.tumblr.com/tagged/14871e7"
lista_items = smart_reblog_adv(lista_url)
id = lista_items[0].split("/")[0]
key= lista_items[0].split("/")[1]
self.client.reblog(self.name, id=id, key=key, tags=["fashion", "clothes", 'trendy'])
def posta_quotes(self):
cits = Funzioni_PyTumblr.get_titoli_from_reddit('quotes')
tagsss = ['quote', 'cit', 'citation', 'famous words']
print('Ci sono %d elementi\t %s' % (len(cits), self.name))
for cit in cits:
self.client.create_quote(
self.name, state="queue", tags=tagsss, quote=cit)
print ("Fatto")
print ("Finished")
| Linkinax/PyTumblr | Poster/Poster.py | Poster.py | py | 3,668 | python | it | code | 0 | github-code | 13 |
4693902191 | import textwrap
def merge_the_tools(string, n):
# Calculate the length of each substring
substring_length = n
# Split the string into substrings
substrings = textwrap.wrap(string, substring_length)
result_list = []
for string in substrings:
unique_chars = []
for char in string:
if char not in unique_chars:
unique_chars.append(char)
unique_string = ''.join(unique_chars)
result_list.append(unique_string)
print('\n'.join(result_list))
print(unique_chars)
print(unique_string)
"""
def merge_the_tools(string, k):
result = ""
for i, c in enumerate(string, 1):
if not c in result:
result += c
if i%k==0:
print(result)
result = ""
best solution
"""
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k) | ifte110/Python-hackerrank | merge_the_tools.py | merge_the_tools.py | py | 930 | python | en | code | 0 | github-code | 13 |
35220089434 | import torch
import torch.nn as nn
import torch.nn.functional as F
class DuelingCNN(nn.Module):
def __init__(self, img_dim, w, h, input_dim, output_dim, dueling_type='mean'):
super().__init__()
self.dueling_type = dueling_type
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.conv1 = nn.Conv2d(img_dim, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size = 5, stride = 2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.fc_value = nn.Linear(linear_input_size, 1)
self.fc_action_adv = nn.Linear(linear_input_size, output_dim)
def forward(self, x):
x.to(self.device)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = x.view(x.size(0), -1)
v = self.fc_value(x)
a = self.fc_action_adv(x)
if self.dueling_type == 'max':
q = v + a - a.max()
else:
q = v + a - a.mean()
return q | vinaykudari/maze-solver | cnn_dueling.py | cnn_dueling.py | py | 1,552 | python | en | code | 0 | github-code | 13 |
19124576657 | # Note: This is a model directly from the https://pytorch.org/tutorials/intermediate/speech_command_classification_with_torchaudio_tutorial.html
# This model reflects the model described in the following paper: https://arxiv.org/pdf/1610.00087.pdf
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchaudio
import sys
import matplotlib.pyplot as plt
# import IPython.display as ipd
from tqdm import tqdm
class M5(nn.Module):
def __init__(self, n_input, n_output, stride=16, n_channel=32):
super().__init__()
self.conv1 = nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)
self.bn1 = nn.BatchNorm1d(n_channel)
self.pool1 = nn.MaxPool1d(4)
self.conv2 = nn.Conv1d(n_channel, n_channel, kernel_size=3)
self.bn2 = nn.BatchNorm1d(n_channel)
self.pool2 = nn.MaxPool1d(4)
self.conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)
self.bn3 = nn.BatchNorm1d(2 * n_channel)
self.pool3 = nn.MaxPool1d(4)
self.conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)
self.bn4 = nn.BatchNorm1d(2 * n_channel)
self.pool4 = nn.MaxPool1d(4)
self.fc1 = nn.Linear(2 * n_channel, n_output)
def forward(self, x):
# make data [batch_size, input_channels, signal_length]
# x = torch.unsqueeze(x, dim=-2)
# x = x.permute(0,2,1)
# batch len, by channel length, by tensor for each channel
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(self.bn2(x))
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(self.bn3(x))
x = self.pool3(x)
x = self.conv4(x)
x = F.relu(self.bn4(x))
x = self.pool4(x)
x = F.avg_pool1d(x, x.shape[-1])
x = x.permute(0, 2, 1)
x = self.fc1(x)
return F.log_softmax(x, dim=2) | achandlr/Music-Genre-Classifier | src/models/M5_Audio_Classifier.py | M5_Audio_Classifier.py | py | 1,963 | python | en | code | 0 | github-code | 13 |
19420524977 | import shutil
import subprocess
from dataclasses import dataclass
def does_program_exist(prog_name):
if shutil.which(prog_name) is None:
return False
else:
return True
@dataclass(frozen=True, order=True)
class Opts:
steps: int = 1000000
sterics: bool = False
extra_pdbs: str = ""
connect_str : str = "0,A222-A251"
class BuildMotifGraphWrapper(object):
def __init__(self):
pass
def setup(self):
if not does_program_exist("build_motif_graph"):
raise ValueError("must have build_motif_graph in path!")
def run(self, sequence, build_file, ens_file, opts : Opts):
cmd = (
f"build_motif_graph --build {build_file} --ensembles {ens_file } "
f"--seq {sequence} "
)
if opts.extra_pdbs != "":
cmd += f"--pdbs {opts.extra_pdbs} "
if opts.connect_str != "":
cmd += f"--connect \"{opts.connect_str}\" "
if opts.steps != 1000000:
cmd += f"--steps {opts.steps} "
if opts.sterics:
cmd += "--sterics "
try:
output = subprocess.check_output(cmd, shell=True).decode("utf8")
lines = output.split("\n")
return int(lines[-2])
except:
return -1
| jyesselm/rnamake_ens_gen | rnamake_ens_gen/wrapper.py | wrapper.py | py | 1,296 | python | en | code | 0 | github-code | 13 |
38190607909 | from databases import Database
from fastapi import HTTPException
from sqlite3 import IntegrityError
import json
from models.term_definition import TermDefinition
class LexicalException(Exception):
pass
# global cache of Lexicon
class Lexicon():
resolve_query = "SELECT Params, Definition, Line FROM Term WHERE Name=:name;"
resolve_doc_query = "SELECT Doc FROM Term WHERE Name=:name;"
insert_query = "INSERT INTO Term (Name, Params, ParamNum, Definition, Line, Creator, Doc) VALUES (:name, :params, :paramcount, :def, :line, :creator, :doc);"
def __init__(self, db:Database):
self.database = db
async def resolve(self, name:str):
values = {"name": name}
results = await self.database.fetch_all(query=Lexicon.resolve_query, values=values)
if len(results) == 1:
params = results[0]["Params"]
if params == "null":
params = []
else:
params = json.loads(params)
definition = json.loads(results[0]["Definition"])
retpacket = {"name":name,"params":params,"definition":definition,"line":results[0]["Line"]}
return json.dumps(retpacket)
elif len(results) == 0:
raise HTTPException(status_code=404, detail="Term is unknown")
else:
# log error here
raise LexicalException
async def resolve_doc(self, name:str):
values = {"name": name}
results = await self.database.fetch_all(query=Lexicon.resolve_doc_query, values=values)
if len(results) == 1:
return json.dumps({"doc":str(results[0])})
elif len(results) == 0:
raise HTTPException(status_code=404, detail="Term is unknown")
else:
# log error here
raise LexicalException
async def assign(self, termdef:TermDefinition):
if not termdef.params or termdef.params == "[]":
params = "null"
param_count = 0
else:
params = termdef.params
param_count = termdef.params.count(",")+1
values = {"name": termdef.term, "params": params, "paramcount": param_count, "def": termdef.definition, "line": termdef.line, "creator": termdef.creator, "doc": termdef.doc}
try:
results = await self.database.execute(query=Lexicon.insert_query, values=values)
print(results)
except IntegrityError as ie:
# get the real definition of the term, since assignment could not happen
old_def = await self.database.fetch_all(query=Lexicon.resolve_query, values={"name": termdef.term})
# only get the line, nothing else
old_def = old_def[0][2]
raise HTTPException(status_code=409, detail=old_def)
except Exception as ex:
# log error here
raise HTTPException(status_code=500, detail=ex)
return "{complete}"
| rottytooth/Babble | lexicon_dao.py | lexicon_dao.py | py | 2,955 | python | en | code | 0 | github-code | 13 |
7377922214 | import numpy as np
import scipy
from matplotlib import pyplot as plt
from numpy.random import randint
from ikrlib import train_gmm, logpdf_gmm
from projekt_lib import wav16khz2mfcc,png2fea
train_n = wav16khz2mfcc('train_data/non_target_train').values()
train_t = wav16khz2mfcc('train_data/target_train').values()
test_n = wav16khz2mfcc('train_data/non_target_dev').values()
test_t = wav16khz2mfcc('train_data/target_dev').values()
# non_target_png_train = png2fea('train_data/non_target_train').values()
# target_png_train = png2fea('train_data/target_train').values()
# non_target_png_dev = png2fea('train_data/non_target_dev').values()
# target_png_dev = png2fea('train_data/target_dev').values()
train_t = np.vstack(train_t)
train_n = np.vstack(train_n)
#two gmm models to train and test
M_t = 3
MUs_t = train_t[randint(1, len(train_t), M_t)]
#COVs_t = [np.var(train_t, axis=0)] * M_t
COVs_t = [np.cov(train_t.T)] * M_t
Ws_t = np.ones(M_t) / M_t
M_n = 20
MUs_n = train_n[randint(1, len(train_n), M_n)]
#COVs_t = [np.var(train_n, axis=0)] * M_n
COVs_n = [np.cov(train_n.T)] * M_n
Ws_n = np.ones(M_n) / M_n
# Run 30 iterations of EM algorithm to train the two GMMs from males and females
for jj in range(30):
[Ws_t, MUs_t, COVs_t, TTL_t] = train_gmm(train_t, Ws_t, MUs_t, COVs_t);
[Ws_n, MUs_n, COVs_n, TTL_n] = train_gmm(train_n, Ws_n, MUs_n, COVs_n);
print('Iteration:', jj, ' Total log-likelihood:', TTL_t, 'for males;', TTL_n, 'for females')
P_t=0.5
P_n=1.0-P_t
score = []
for tst in test_t:
ll_m = logpdf_gmm(tst, Ws_t, MUs_t, COVs_t)
ll_f = logpdf_gmm(tst, Ws_n, MUs_n, COVs_n)
score.append((sum(ll_m) + np.log(P_t)) - (sum(ll_f) + np.log(P_n)))
print(score)
print('Fraction of correctly recognized targets: %f' % (np.mean(np.array(score) > 0)))
score = []
for tst in test_n:
ll_m = logpdf_gmm(tst, Ws_t, MUs_t, COVs_t)
ll_f = logpdf_gmm(tst, Ws_n, MUs_n, COVs_n)
score.append((sum(ll_m) + np.log(P_t)) - (sum(ll_f) + np.log(P_n)))
print(score)
print('Fraction of correctly recognized targets: %f' % (np.mean(np.array(score) < 0)))
# train_target = np.vstack(target_wav_train)
# train_non = np.vstack(non_target_wav_train)
# dim = train_target.shape[1]
#
# # PCA reduction to 2 dimensions
#
# cov_tot = np.cov(np.vstack([train_non, train_target]).T, bias=True)
# # take just 2 largest eigenvalues and corresponding eigenvectors
# d, e = scipy.linalg.eigh(cov_tot, eigvals=(dim - 2, dim - 1))
#
# train_n_pca = train_non.dot(e)
# train_t_pca = train_target.dot(e)
# plt.plot(train_n_pca[:, 1], train_n_pca[:, 0], 'b.', ms=1)
# plt.plot(train_t_pca[:, 1], train_t_pca[:, 0], 'r.', ms=1)
# plt.show()
#
# #print(non_target_wav_train) | xgalba03/SUR---person-recognition-NN- | IKR_demos_py/projekt.py | projekt.py | py | 2,686 | python | en | code | 0 | github-code | 13 |
28008586690 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 06:14:25 2019
#pythonprogramming.net python3.7 basics tutorial - Making a simple TicTacToe game.
@author: RB
"""
import itertools
def win(current_game):
def all_same(l):
if l.count(l[0]) == len(l) and l[0] != 0:
return True
else:
return False
#horizonal
for row in game:
print(row)
if all_same(row):
print(f'Player {row[0]} is the winner horizontally!')
return True
#diagonal
diags = []
for col, row in enumerate(reversed(range(len(game)))):
diags.append(game[row][col])
if all_same(diags):
print(f'Player {diags[0]} is the winner diagonally! (/)')
return True
diags = []
for ix in range(len(game)):
diags.append(game[ix][ix])
if all_same(diags):
print(f'Player {diags[0]} is the winner diagonally! (\\)')
return True
#vertical
for col in range(len(game)):
check = []
for row in game:
check.append(row[col])
if all_same(check):
print(f'Player {check[0]} is the winner vertically!')
return True
return False
def game_board(game_map, player=0, row=0, column=0, just_display=False):
try: #try statement to handle for errors to begin bracketing of funct
#parameters with defaults in the function
if game_map[row][column] != 0:
print('This position is occupado! Choose another!')
return game_map, False
print(' '+' '.join([str(i) for i in range(len(game_map))])) #top row numbers
if not just_display:
game_map[row][column] = player
for count, row in enumerate(game_map): #inbuilt func enumerate
print(count, row)
return game_map, True
except IndexError as e: #if error (what type) happens do this.
print('Error row/column input must be 0, 1 or 2', e)
return game_map, False
except Exception as e: #any error you didn't think about.
print('Something went very wrong!.', e)
return game_map, False
#else: another option
#finally: another option that is very rare
play = True
players = [1,2]
while play:
game_size = int(input("What size of tic tac toe? "))
game = [[0 for i in range(game_size)] for i in range(game_size)]
game_won = False
game, _ = game_board(game, just_display=True) #underscore means doesn't matter true or false
player_choice = itertools.cycle([1,2])
while not game_won:
current_player = next(player_choice)
print(f'Current Player: {current_player}')
played = False
while not played:
column_choice = int(input("What column do you want to play? (0, 1, 2): "))
row_choice = int(input("What row do you want to play? (0, 1, 2): "))
game, played = game_board(game, current_player, row_choice, column_choice)
if win(game):
game_won = True
again = input('The game is over. Do you want to play again? (y/n) ')
if again.lower() == 'y':
print('restarting...')
elif again.lower() == 'n':
print('byeee!')
play = False
else:
print('Not a valid answer. So...c u l8r aligator.')
play = False
| Ravenblack7575/Exercises-Testing | tictactoe4.py | tictactoe4.py | py | 3,671 | python | en | code | 0 | github-code | 13 |
71606898577 |
MENU = {
"espresso": {
"ingredients": {
"water": 50,
"coffee": 18,
},
"cost": 1.5,
},
"latte": {
"ingredients": {
"water": 200,
"milk": 150,
"coffee": 24,
},
"cost": 2.5,
},
"cappuccino": {
"ingredients": {
"water": 250,
"milk": 100,
"coffee": 24,
},
"cost": 3.0,
}
}
resources = {
"water": 300,
"milk": 200,
"coffee": 100,
}
def calculate():
print("please insert coins: ")
total += float(input("How many quarters? ")) * 0.25
total += float(input("How many nickels? ")) *0.1
total += float(input("How many dimes? ")) *0.05
total += float(input("How many pennies? ")) *0.01
return total
def resource_calculate():
resources['water'] = MENU[order_id]['ingredients']['water']
resources['milk'] = MENU[order_id]['ingredients']['milk']
resources['coffee'] = MENU[order_id]['ingredients']['coffee']
return resources
def processing(payment, order_id, balance, change):
if payment < MENU[order_id]['cost']:
print("Sorry that's not enough money. Money refunded.")
else:
balance += MENU[order_id]['cost']
change = payment - MENU[order_id]['cost']
print(f"Here is your {order_id} and change of ${change}")
resource_calculate()
return balance, change
status = True
balance = 0
change = 0
while status:
order_id=(input("What would you like? (espresso/latte/cappuccino): "))
if order_id == "off" :
print("Machine will be terminated")
status = False
elif order_id == "report":
print(f"Water: {resources['water']}ml")
print(f"Milk: {resources['milk']}ml")
print(f"Coffee: {resources['coffee']}g")
print(f"Balance is ${balance}")
else:
payment = calculate()
processing(payment, order_id, balance, change)
| hlee0995/Python_review | Coffee Machine.py | Coffee Machine.py | py | 2,083 | python | en | code | 0 | github-code | 13 |
27802796412 | # 이미 푼 문제
# 해결 방법만 떠올리고 skip
# 브루트포스
# combination으로 팀을 뽑으면 쉬울거같은데?
# 시간복잡도도 충분 20C10
# -- 이전 코드를 본 후 --
# 스타트, 링크팀 분리 후 각 팀의 능력치를 구할 때, set, permu를 사용하네 좋은 코드다
# 잘 살펴보자
#20c10 하면 20만 시간복잡도는 충분하다
from itertools import combinations,permutations
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
min_val = 1e9
for comb in list(combinations(range(N), N//2)):
sum_1 = 0
sum_2 = 0
for i, j in permutations(comb, 2):
sum_1 += arr[i][j]
for i, j in permutations(set(range(N)) - set(comb), 2):
sum_2 += arr[i][j]
min_val = min(min_val, abs(sum_1-sum_2))
print(min_val)
| tkdgns8234/DataStructure-Algorithm | Algorithm/백준/백준강의/알고리즘_중급_1/브루트포스/순열-연습/스타트와_링크.py | 스타트와_링크.py | py | 815 | python | ko | code | 0 | github-code | 13 |
32674133645 | import numpy as np
from dt import *
from datasets import *
from sklearn.model_selection import cross_validate
from plots import read_csv, write_csv
import copy
def eval(algorithm, dataset):
cls = {"LocalInformationGain": LocalInformationGainDecisionTreeClassifier,
"GlobalInformationGain": GlobalInformationGainDecisionTreeClassifier,
"LocalGainRatio": LocalGainRatioDecisionTreeClassifier,
"GlobalGainRatio": GlobalGainRatioDecisionTreeClassifier,
"LocalNVI": LocalNVIDecisionTreeClassifier,
"GlobalNVI": GlobalNVIDecisionTreeClassifier,
"LocalGiniImpurity": LocalGiniImpurityDecisionTreeClassifier,
"GlobalGiniImpurity": GlobalGiniImpurityDecisionTreeClassifier,
"LocalJaccard": LocalJaccardDecisionTreeClassifier,
"GlobalJaccard": GlobalJaccardDecisionTreeClassifier,
"LocalAccuracy": LocalAccuracyDecisionTreeClassifier,
"GlobalAccuracy": GlobalAccuracyDecisionTreeClassifier}[algorithm]
d = {"monks": monks,
"iris": iris,
"mfeat": mfeat,
"square": square,
"circle": circle,
"digits": digits,
"wine": wine,
"diabetes": diabetes}[dataset]()
n_nodes = []
for i in range(10):
clf = cls()
clf.fit(d.data, d.target)
n_nodes.append(clf.tree.get_n_nodes())
return min(n_nodes)
filename = "treesize_bestof.csv"
data = read_csv(filename)
for d in data:
algorithm = d["algorithm"]
for dataset, v in d.items():
if v is None:
d[dataset] = eval(algorithm, dataset)
print(algorithm, dataset)
write_csv(filename, data)
| 285714/DecisionTrees | Figures/treesize_bestof.py | treesize_bestof.py | py | 1,697 | python | en | code | 2 | github-code | 13 |
71819385618 | import numpy as np
import tensorflow as tf
def WALS(R,I,It,bm,bg,bu,_N):
_GL, _UL = I.shape
_IL = np.sum(I)
_TL = np.sum(It)
zero=np.zeros_like(R)
bias = bm+np.expand_dims(bg,1)+bu
idx=tf.where(I)
print("idx ready")
input_tensor = tf.SparseTensor(indices=idx,
values=tf.gather_nd(R-bias,idx),
dense_shape=(_GL,_UL)
)
print("sparse ready")
model = tf.contrib.factorization.WALSModel(input_rows=_GL, input_cols=_UL,
n_components=_N,
unobserved_weight=0,
regularization=0.5*_GL*_UL/_IL,
row_init='random',
col_init='random',
row_weights=1,
col_weights=1 )
row_factor = model.row_factors[0]
col_factor = model.col_factors[0]
row_update_op = model.update_row_factors(sp_input=input_tensor)[1]
col_update_op = model.update_col_factors(sp_input=input_tensor)[1]
sess=tf.Session()
sess.run(model.initialize_op)
sess.run(model.worker_init)
Rp = row_factor@tf.transpose(col_factor)+bias
sqd = tf.squared_difference(R,Rp)
print("...")
loss = tf.math.reduce_sum( tf.where(I, sqd, zero) )/_IL
print("...")
test_loss= tf.math.reduce_sum( tf.where(It, sqd, zero) )/_TL
print("...")
#reg_cost = ( tf.math.reduce_mean(row_factor**2) + tf.math.reduce_mean(col_factor**2) )*_GL*_UL/_IL
print("initiated")
for i in range(32):
sess.run(model.row_update_prep_gramian_op)
print("",end="-")
sess.run(model.initialize_row_update_op)
print("",end="-")
sess.run(row_update_op)
print("",end="-")
sess.run(model.col_update_prep_gramian_op)
print("",end="-")
sess.run(model.initialize_col_update_op)
print("",end="-")
sess.run(col_update_op)
print()
print(sess.run(loss),sess.run(test_loss))
return row_factor.eval(session=sess), col_factor.eval(session=sess).transpose() | greg3566/BoardgameRating | MF.py | MF.py | py | 2,334 | python | en | code | 0 | github-code | 13 |
35230779392 | #!/usr/bin/python3
"""
This script lists all State objects from database passed into program
Using SQLalchemy, this script connects to a MySQL server running on
localhost at port 3306.
This script takes 3 arguments: mysql username,
mysql password, and database name.
These arguments are used to connect to the MySQL server.
"""
import sys
from model_state import Base, State
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
if __name__ == "__main__":
engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'
.format(sys.argv[1], sys.argv[2], sys.argv[3]))
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
states = session.query(State) \
.order_by(State.id.asc()) \
.all()
for state in states:
print("{}: {}".format(state.id, state.name))
| fernandogmo/holbertonschool-higher_level_programming | 0x0F-python-object_relational_mapping/7-model_state_fetch_all.py | 7-model_state_fetch_all.py | py | 896 | python | en | code | 1 | github-code | 13 |
1346866891 | import math
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
from pyhealth.datasets import SampleEHRDataset
from pyhealth.models import BaseModel
from pyhealth.models.utils import get_last_visit
class FinalAttentionQKV(nn.Module):
def __init__(
self,
attention_input_dim: int,
attention_hidden_dim: int,
attention_type: str = "add",
dropout: float = 0.5,
):
super(FinalAttentionQKV, self).__init__()
self.attention_type = attention_type
self.attention_hidden_dim = attention_hidden_dim
self.attention_input_dim = attention_input_dim
self.W_q = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_k = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_v = nn.Linear(attention_input_dim, attention_hidden_dim)
self.W_out = nn.Linear(attention_hidden_dim, 1)
self.b_in = nn.Parameter(
torch.zeros(
1,
)
)
self.b_out = nn.Parameter(
torch.zeros(
1,
)
)
nn.init.kaiming_uniform_(self.W_q.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_k.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_v.weight, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.W_out.weight, a=math.sqrt(5))
self.Wh = nn.Parameter(
torch.randn(2 * attention_input_dim, attention_hidden_dim)
)
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(
torch.zeros(
1,
)
)
nn.init.kaiming_uniform_(self.Wh, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
self.dropout = nn.Dropout(p=dropout)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.sigmoid = nn.Sigmoid()
def forward(self, input):
(
batch_size,
time_step,
input_dim,
) = input.size() # batch_size * input_dim + 1 * hidden_dim(i)
input_q = self.W_q(input[:, -1, :]) # b h
input_k = self.W_k(input) # b t h
input_v = self.W_v(input) # b t h
if self.attention_type == "add": # B*T*I @ H*I
q = torch.reshape(
input_q, (batch_size, 1, self.attention_hidden_dim)
) # B*1*H
h = q + input_k + self.b_in # b t h
h = self.tanh(h) # B*T*H
e = self.W_out(h) # b t 1
e = torch.reshape(e, (batch_size, time_step)) # b t
elif self.attention_type == "mul":
q = torch.reshape(
input_q, (batch_size, self.attention_hidden_dim, 1)
) # B*h 1
e = torch.matmul(input_k, q).squeeze() # b t
elif self.attention_type == "concat":
q = input_q.unsqueeze(1).repeat(1, time_step, 1) # b t h
k = input_k
c = torch.cat((q, k), dim=-1) # B*T*2I
h = torch.matmul(c, self.Wh)
h = self.tanh(h)
e = torch.matmul(h, self.Wa) + self.ba # B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
else:
raise ValueError(
"Unknown attention type: {}, please use add, mul, concat".format(
self.attention_type
)
)
a = self.softmax(e) # B*T
if self.dropout is not None:
a = self.dropout(a)
v = torch.matmul(a.unsqueeze(1), input_v).squeeze() # B*I
return v, a
class PositionwiseFeedForward(nn.Module): # new added
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(torch.relu(self.w_1(x)))), None
class PositionalEncoding(nn.Module): # new added / not use anymore
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=400):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0.0, max_len).unsqueeze(1)
div_term = torch.exp(
torch.arange(0.0, d_model, 2) * -(math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def forward(self, x):
pos = self.pe[:, : x.size(1)].clone().requires_grad_(False)
x = x + pos
return self.dropout(x)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = nn.ModuleList(
[nn.Linear(d_model, self.d_k * self.h) for _ in range(3)]
)
self.final_linear = nn.Linear(d_model, d_model)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def attention(self, query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1) # b h t d_k
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k) # b h t t
if mask is not None: # 1 1 t t
scores = scores.masked_fill(mask == 0, -1e9) # b h t t 下三角
p_attn = torch.softmax(scores, dim=-1) # b h t t
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn # b h t v (d_k)
def cov(self, m, y=None):
if y is not None:
m = torch.cat((m, y), dim=0)
m_exp = torch.mean(m, dim=1)
x = m - m_exp[:, None]
cov = 1 / (x.size(1) - 1) * x.mm(x.t())
return cov
def forward(self, query, key, value, mask=None):
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1) # 1 1 t t
nbatches = query.size(0) # b
input_dim = query.size(1) # i+1
feature_dim = query.size(1) # i+1
# input size -> # batch_size * d_input * hidden_dim
# d_model => h * d_k
query, key, value = [
l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))
] # b num_head d_input d_k
x, self.attn = self.attention(
query, key, value, mask=mask, dropout=self.dropout
) # b num_head d_input d_v (d_k)
x = (
x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
) # batch_size * d_input * hidden_dim
# DeCov
DeCov_contexts = x.transpose(0, 1).transpose(1, 2) # I+1 H B
Covs = self.cov(DeCov_contexts[0, :, :])
DeCov_loss = 0.5 * (
torch.norm(Covs, p="fro") ** 2 - torch.norm(torch.diag(Covs)) ** 2
)
for i in range(feature_dim - 1):
Covs = self.cov(DeCov_contexts[i + 1, :, :])
DeCov_loss += 0.5 * (
torch.norm(Covs, p="fro") ** 2 - torch.norm(torch.diag(Covs)) ** 2
)
return self.final_linear(x), DeCov_loss
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-7):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
returned_value = sublayer(self.norm(x))
return x + self.dropout(returned_value[0]), returned_value[1]
class SingleAttention(nn.Module):
def __init__(
self,
attention_input_dim,
attention_hidden_dim,
attention_type="add",
time_aware=False,
):
super(SingleAttention, self).__init__()
self.attention_type = attention_type
self.attention_hidden_dim = attention_hidden_dim
self.attention_input_dim = attention_input_dim
self.time_aware = time_aware
# batch_time = torch.arange(0, batch_mask.size()[1], dtype=torch.float32).reshape(1, batch_mask.size()[1], 1)
# batch_time = batch_time.repeat(batch_mask.size()[0], 1, 1)
if attention_type == "add":
if self.time_aware:
# self.Wx = nn.Parameter(torch.randn(attention_input_dim+1, attention_hidden_dim))
self.Wx = nn.Parameter(
torch.randn(attention_input_dim, attention_hidden_dim)
)
self.Wtime_aware = nn.Parameter(torch.randn(1, attention_hidden_dim))
nn.init.kaiming_uniform_(self.Wtime_aware, a=math.sqrt(5))
else:
self.Wx = nn.Parameter(
torch.randn(attention_input_dim, attention_hidden_dim)
)
self.Wt = nn.Parameter(
torch.randn(attention_input_dim, attention_hidden_dim)
)
self.bh = nn.Parameter(
torch.zeros(
attention_hidden_dim,
)
)
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(
torch.zeros(
1,
)
)
nn.init.kaiming_uniform_(self.Wd, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wx, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wt, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == "mul":
self.Wa = nn.Parameter(
torch.randn(attention_input_dim, attention_input_dim)
)
self.ba = nn.Parameter(
torch.zeros(
1,
)
)
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == "concat":
if self.time_aware:
self.Wh = nn.Parameter(
torch.randn(2 * attention_input_dim + 1, attention_hidden_dim)
)
else:
self.Wh = nn.Parameter(
torch.randn(2 * attention_input_dim, attention_hidden_dim)
)
self.Wa = nn.Parameter(torch.randn(attention_hidden_dim, 1))
self.ba = nn.Parameter(
torch.zeros(
1,
)
)
nn.init.kaiming_uniform_(self.Wh, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wa, a=math.sqrt(5))
elif attention_type == "new":
self.Wt = nn.Parameter(
torch.randn(attention_input_dim, attention_hidden_dim)
)
self.Wx = nn.Parameter(
torch.randn(attention_input_dim, attention_hidden_dim)
)
self.rate = nn.Parameter(torch.zeros(1) + 0.8)
nn.init.kaiming_uniform_(self.Wx, a=math.sqrt(5))
nn.init.kaiming_uniform_(self.Wt, a=math.sqrt(5))
else:
raise RuntimeError(
"Wrong attention type. Please use 'add', 'mul', 'concat' or 'new'."
)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def forward(self, input, mask, device):
(
batch_size,
time_step,
input_dim,
) = input.size() # batch_size * time_step * hidden_dim(i)
time_decays = (
torch.tensor(range(time_step - 1, -1, -1), dtype=torch.float32)
.unsqueeze(-1)
.unsqueeze(0)
.to(device=device)
) # 1*t*1
b_time_decays = time_decays.repeat(batch_size, 1, 1) + 1 # b t 1
if self.attention_type == "add": # B*T*I @ H*I
last_visit = get_last_visit(input, mask)
q = torch.matmul(last_visit, self.Wt) # b h
q = torch.reshape(q, (batch_size, 1, self.attention_hidden_dim)) # B*1*H
if self.time_aware == True:
k = torch.matmul(input, self.Wx) # b t h
time_hidden = torch.matmul(b_time_decays, self.Wtime_aware) # b t h
else:
k = torch.matmul(input, self.Wx) # b t h
h = q + k + self.bh # b t h
if self.time_aware:
h += time_hidden
h = self.tanh(h) # B*T*H
e = torch.matmul(h, self.Wa) + self.ba # B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
elif self.attention_type == "mul":
last_visit = get_last_visit(input, mask)
e = torch.matmul(last_visit, self.Wa) # b i
e = (
torch.matmul(e.unsqueeze(1), input.permute(0, 2, 1)).reshape(
batch_size, time_step
)
+ self.ba
) # b t
elif self.attention_type == "concat":
last_visit = get_last_visit(input, mask)
q = last_visit.unsqueeze(1).repeat(1, time_step, 1) # b t i
k = input
c = torch.cat((q, k), dim=-1) # B*T*2I
if self.time_aware:
c = torch.cat((c, b_time_decays), dim=-1) # B*T*2I+1
h = torch.matmul(c, self.Wh)
h = self.tanh(h)
e = torch.matmul(h, self.Wa) + self.ba # B*T*1
e = torch.reshape(e, (batch_size, time_step)) # b t
elif self.attention_type == "new":
last_visit = get_last_visit(input, mask)
q = torch.matmul(last_visit, self.Wt) # b h
q = torch.reshape(q, (batch_size, 1, self.attention_hidden_dim)) # B*1*H
k = torch.matmul(input, self.Wx) # b t h
dot_product = torch.matmul(q, k.transpose(1, 2)).reshape(
batch_size, time_step
) # b t
denominator = self.sigmoid(self.rate) * (
torch.log(2.72 + (1 - self.sigmoid(dot_product)))
* (b_time_decays.reshape(batch_size, time_step))
)
e = self.relu(self.sigmoid(dot_product) / (denominator)) # b * t
else:
raise ValueError(
"Wrong attention type. Plase use 'add', 'mul', 'concat' or 'new'."
)
if mask is not None:
e = e.masked_fill(mask == 0, -1e9)
a = self.softmax(e) # B*T
v = torch.matmul(a.unsqueeze(1), input).reshape(batch_size, input_dim) # B*I
return v, a
class ConCareLayer(nn.Module):
"""ConCare layer.
Paper: Liantao Ma et al. Concare: Personalized clinical feature embedding via capturing the healthcare context. AAAI 2020.
This layer is used in the ConCare model. But it can also be used as a
standalone layer.
Args:
input_dim: dynamic feature size.
static_dim: static feature size, if 0, then no static feature is used.
hidden_dim: hidden dimension of the channel-wise GRU, default 128.
transformer_hidden: hidden dimension of the transformer, default 128.
num_head: number of heads in the transformer, default 4.
pe_hidden: hidden dimension of the positional encoding, default 64.
dropout: dropout rate, default 0.5.
Examples:
>>> from pyhealth.models import ConCareLayer
>>> input = torch.randn(3, 128, 64) # [batch size, sequence len, feature_size]
>>> layer = ConCareLayer(64)
>>> c, _ = layer(input)
>>> c.shape
torch.Size([3, 128])
"""
def __init__(
self,
input_dim: int,
static_dim: int = 0,
hidden_dim: int = 128,
num_head: int = 4,
pe_hidden: int = 64,
dropout: int = 0.5,
):
super(ConCareLayer, self).__init__()
# hyperparameters
self.input_dim = input_dim
self.hidden_dim = hidden_dim # d_model
self.transformer_hidden = hidden_dim
self.num_head = num_head
self.pe_hidden = pe_hidden
# self.output_dim = output_dim
self.dropout = dropout
self.static_dim = static_dim
# layers
self.PositionalEncoding = PositionalEncoding(
self.transformer_hidden, dropout=0, max_len=400
)
self.GRUs = nn.ModuleList(
[
nn.GRU(1, self.hidden_dim, batch_first=True)
for _ in range(self.input_dim)
]
)
self.LastStepAttentions = nn.ModuleList(
[
SingleAttention(
self.hidden_dim,
8,
attention_type="new",
time_aware=True,
)
for _ in range(self.input_dim)
]
)
self.FinalAttentionQKV = FinalAttentionQKV(
self.hidden_dim,
self.hidden_dim,
attention_type="mul",
dropout=self.dropout,
)
self.MultiHeadedAttention = MultiHeadedAttention(
self.num_head, self.transformer_hidden, dropout=self.dropout
)
self.SublayerConnection = SublayerConnection(
self.transformer_hidden, dropout=self.dropout
)
self.PositionwiseFeedForward = PositionwiseFeedForward(
self.transformer_hidden, self.pe_hidden, dropout=0.1
)
if self.static_dim > 0:
self.demo_proj_main = nn.Linear(self.static_dim, self.hidden_dim)
self.dropout = nn.Dropout(p=self.dropout)
self.tanh = nn.Tanh()
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
def concare_encoder(self, input, static=None, mask=None):
# input shape [batch_size, timestep, feature_dim]
if self.static_dim > 0:
demo_main = self.tanh(self.demo_proj_main(static)).unsqueeze(
1
) # b hidden_dim
batch_size = input.size(0)
time_step = input.size(1)
feature_dim = input.size(2)
if self.transformer_hidden % self.num_head != 0:
raise ValueError("transformer_hidden must be divisible by num_head")
# forward
GRU_embeded_input = self.GRUs[0](
input[:, :, 0].unsqueeze(-1).to(device=input.device),
torch.zeros(batch_size, self.hidden_dim)
.to(device=input.device)
.unsqueeze(0),
)[
0
] # b t h
Attention_embeded_input = self.LastStepAttentions[0](
GRU_embeded_input, mask, input.device
)[0].unsqueeze(
1
) # b 1 h
for i in range(feature_dim - 1):
embeded_input = self.GRUs[i + 1](
input[:, :, i + 1].unsqueeze(-1),
torch.zeros(batch_size, self.hidden_dim)
.to(device=input.device)
.unsqueeze(0),
)[
0
] # b 1 h
embeded_input = self.LastStepAttentions[i + 1](
embeded_input, mask, input.device
)[0].unsqueeze(
1
) # b 1 h
Attention_embeded_input = torch.cat(
(Attention_embeded_input, embeded_input), 1
) # b i h
if self.static_dim > 0:
Attention_embeded_input = torch.cat(
(Attention_embeded_input, demo_main), 1
) # b i+1 h
posi_input = self.dropout(
Attention_embeded_input
) # batch_size * d_input+1 * hidden_dim
contexts = self.SublayerConnection(
posi_input,
lambda x: self.MultiHeadedAttention(
posi_input, posi_input, posi_input, None
),
) # # batch_size * d_input * hidden_dim
DeCov_loss = contexts[1]
contexts = contexts[0]
contexts = self.SublayerConnection(
contexts, lambda x: self.PositionwiseFeedForward(contexts)
)[0]
weighted_contexts, a = self.FinalAttentionQKV(contexts)
return weighted_contexts, DeCov_loss
def forward(
self,
x: torch.tensor,
static: Optional[torch.tensor] = None,
mask: Optional[torch.tensor] = None,
) -> Tuple[torch.tensor]:
"""Forward propagation.
Args:
x: a tensor of shape [batch size, sequence len, input_dim].
static: a tensor of shape [batch size, static_dim].
mask: an optional tensor of shape [batch size, sequence len], where
1 indicates valid and 0 indicates invalid.
Returns:
output: a tensor of shape [batch size, fusion_dim] representing the
patient embedding.
decov: the decov loss value
"""
# rnn will only apply dropout between layers
batch_size, time_steps, _ = x.size()
out = torch.zeros((batch_size, time_steps, self.hidden_dim))
out, decov = self.concare_encoder(x, static, mask)
out = self.dropout(out)
return out, decov
class ConCare(BaseModel):
"""ConCare model.
Paper: Liantao Ma et al. Concare: Personalized clinical feature embedding via capturing the healthcare context. AAAI 2020.
Note:
We use separate ConCare layers for different feature_keys.
Currently, we automatically support different input formats:
- code based input (need to use the embedding table later)
- float/int based value input
If you need the interpretable feature correlations provided by the ConCare model calculates the , we do not recommend use embeddings for the input features.
We follow the current convention for the ConCare model:
- case 1. [code1, code2, code3, ...]
- we will assume the code follows the order; our model will encode
each code into a vector and apply ConCare on the code level
- case 2. [[code1, code2]] or [[code1, code2], [code3, code4, code5], ...]
- we will assume the inner bracket follows the order; our model first
use the embedding table to encode each code into a vector and then use
average/mean pooling to get one vector for one inner bracket; then use
ConCare one the braket level
- case 3. [[1.5, 2.0, 0.0]] or [[1.5, 2.0, 0.0], [8, 1.2, 4.5], ...]
- this case only makes sense when each inner bracket has the same length;
we assume each dimension has the same meaning; we run ConCare directly
on the inner bracket level, similar to case 1 after embedding table
- case 4. [[[1.5, 2.0, 0.0]]] or [[[1.5, 2.0, 0.0], [8, 1.2, 4.5]], ...]
- this case only makes sense when each inner bracket has the same length;
we assume each dimension has the same meaning; we run ConCare directly
on the inner bracket level, similar to case 2 after embedding table
Args:
dataset: the dataset to train the model. It is used to query certain
information such as the set of all tokens.
feature_keys: list of keys in samples to use as features,
e.g. ["conditions", "procedures"].
label_key: key in samples to use as label (e.g., "drugs").
mode: one of "binary", "multiclass", or "multilabel".
static_keys: the key in samples to use as static features, e.g. "demographics". Default is None.
we only support numerical static features.
use_embedding: list of bools indicating whether to use embedding for each feature type,
e.g. [True, False].
embedding_dim: the embedding dimension. Default is 128.
hidden_dim: the hidden dimension. Default is 128.
**kwargs: other parameters for the ConCare layer.
Examples:
>>> from pyhealth.datasets import SampleEHRDataset
>>> samples = [
... {
... "patient_id": "patient-0",
... "visit_id": "visit-0",
... "list_codes": ["505800458", "50580045810", "50580045811"], # NDC
... "list_vectors": [[1.0, 2.55, 3.4], [4.1, 5.5, 6.0]],
... "list_list_codes": [["A05B", "A05C", "A06A"], ["A11D", "A11E"]], # ATC-4
... "list_list_vectors": [
... [[1.8, 2.25, 3.41], [4.50, 5.9, 6.0]],
... [[7.7, 8.5, 9.4]],
... ],
... "demographic": [0.0, 2.0, 1.5],
... "label": 1,
... },
... {
... "patient_id": "patient-0",
... "visit_id": "visit-1",
... "list_codes": [
... "55154191800",
... "551541928",
... "55154192800",
... "705182798",
... "70518279800",
... ],
... "list_vectors": [[1.4, 3.2, 3.5], [4.1, 5.9, 1.7], [4.5, 5.9, 1.7]],
... "list_list_codes": [["A04A", "B035", "C129"]],
... "list_list_vectors": [
... [[1.0, 2.8, 3.3], [4.9, 5.0, 6.6], [7.7, 8.4, 1.3], [7.7, 8.4, 1.3]],
... ],
... "demographic": [0.0, 2.0, 1.5],
... "label": 0,
... },
... ]
>>> dataset = SampleEHRDataset(samples=samples, dataset_name="test")
>>>
>>> from pyhealth.models import ConCare
>>> model = ConCare(
... dataset=dataset,
... feature_keys=[
... "list_codes",
... "list_vectors",
... "list_list_codes",
... "list_list_vectors",
... ],
... label_key="label",
... static_key="demographic",
... use_embedding=[True, False, True, False],
... mode="binary"
... )
>>>
>>> from pyhealth.datasets import get_dataloader
>>> train_loader = get_dataloader(dataset, batch_size=2, shuffle=True)
>>> data_batch = next(iter(train_loader))
>>>
>>> ret = model(**data_batch)
>>> print(ret)
{
'loss': tensor(9.5541, grad_fn=<AddBackward0>),
'y_prob': tensor([[0.5323], [0.5363]], grad_fn=<SigmoidBackward0>),
'y_true': tensor([[1.], [0.]]),
'logit': tensor([[0.1293], [0.1454]], grad_fn=<AddmmBackward0>)
}
>>>
"""
def __init__(
self,
dataset: SampleEHRDataset,
feature_keys: List[str],
label_key: str,
mode: str,
use_embedding: List[bool],
static_key: Optional[str] = None,
embedding_dim: int = 128,
hidden_dim: int = 128,
**kwargs,
):
super(ConCare, self).__init__(
dataset=dataset,
feature_keys=feature_keys,
label_key=label_key,
mode=mode,
)
self.embedding_dim = embedding_dim
self.use_embedding = use_embedding
self.hidden_dim = hidden_dim
# validate kwargs for ConCare layer
if "feature_size" in kwargs:
raise ValueError("feature_size is determined by embedding_dim")
# the key of self.feat_tokenizers only contains the code based inputs
self.feat_tokenizers = {}
self.static_key = static_key
self.label_tokenizer = self.get_label_tokenizer()
# the key of self.embeddings only contains the code based inputs
self.embeddings = nn.ModuleDict()
# the key of self.linear_layers only contains the float/int based inputs
self.linear_layers = nn.ModuleDict()
self.static_dim = 0
if self.static_key is not None:
self.static_dim = self.dataset.input_info[self.static_key]["len"]
self.concare = nn.ModuleDict()
# add feature ConCare layers
for idx, feature_key in enumerate(self.feature_keys):
input_info = self.dataset.input_info[feature_key]
# sanity check
if input_info["type"] not in [str, float, int]:
raise ValueError(
"ConCare only supports str code, float and int as input types"
)
elif (input_info["type"] == str) and (input_info["dim"] not in [2, 3]):
raise ValueError(
"ConCare only supports 2-dim or 3-dim str code as input types"
)
elif (input_info["type"] == str) and (use_embedding[idx] == False):
raise ValueError(
"ConCare only supports embedding for str code as input types"
)
elif (input_info["type"] in [float, int]) and (
input_info["dim"] not in [2, 3]
):
raise ValueError(
"ConCare only supports 2-dim or 3-dim float and int as input types"
)
# for code based input, we need Type
# for float/int based input, we need Type, input_dim
if use_embedding[idx]:
self.add_feature_transform_layer(feature_key, input_info)
self.concare[feature_key] = ConCareLayer(
input_dim=embedding_dim,
static_dim=self.static_dim,
hidden_dim=self.hidden_dim,
**kwargs,
)
else:
self.concare[feature_key] = ConCareLayer(
input_dim=input_info["len"],
static_dim=self.static_dim,
hidden_dim=self.hidden_dim,
**kwargs,
)
output_size = self.get_output_size(self.label_tokenizer)
self.fc = nn.Linear(len(self.feature_keys) * self.hidden_dim, output_size)
def forward(self, **kwargs) -> Dict[str, torch.Tensor]:
"""Forward propagation.
The label `kwargs[self.label_key]` is a list of labels for each patient.
Args:
**kwargs: keyword arguments for the model. The keys must contain
all the feature keys and the label key.
Returns:
A dictionary with the following keys:
loss: a scalar tensor representing the final loss.
loss_task: a scalar tensor representing the task loss.
loss_decov: a scalar tensor representing the decov loss.
y_prob: a tensor representing the predicted probabilities.
y_true: a tensor representing the true labels.
"""
patient_emb = []
decov_loss = 0
for idx, feature_key in enumerate(self.feature_keys):
input_info = self.dataset.input_info[feature_key]
dim_, type_ = input_info["dim"], input_info["type"]
# for case 1: [code1, code2, code3, ...]
if (dim_ == 2) and (type_ == str):
x = self.feat_tokenizers[feature_key].batch_encode_2d(
kwargs[feature_key]
)
# (patient, event)
x = torch.tensor(x, dtype=torch.long, device=self.device)
# (patient, event, embedding_dim)
x = self.embeddings[feature_key](x)
# (patient, event)
mask = torch.any(x !=0, dim=2)
# for case 2: [[code1, code2], [code3, ...], ...]
elif (dim_ == 3) and (type_ == str):
x = self.feat_tokenizers[feature_key].batch_encode_3d(
kwargs[feature_key]
)
# (patient, visit, event)
x = torch.tensor(x, dtype=torch.long, device=self.device)
# (patient, visit, event, embedding_dim)
x = self.embeddings[feature_key](x)
# (patient, visit, embedding_dim)
x = torch.sum(x, dim=2)
# (patient, visit)
mask = torch.any(x !=0, dim=2)
# for case 3: [[1.5, 2.0, 0.0], ...]
elif (dim_ == 2) and (type_ in [float, int]):
x, mask = self.padding2d(kwargs[feature_key])
# (patient, event, values)
x = torch.tensor(x, dtype=torch.float, device=self.device)
# (patient, event, embedding_dim)
if self.use_embedding[idx]:
x = self.linear_layers[feature_key](x)
# (patient, event)
mask = mask.bool().to(self.device)
# for case 4: [[[1.5, 2.0, 0.0], [1.8, 2.4, 6.0]], ...]
elif (dim_ == 3) and (type_ in [float, int]):
x, mask = self.padding3d(kwargs[feature_key])
# (patient, visit, event, values)
x = torch.tensor(x, dtype=torch.float, device=self.device)
# (patient, visit, embedding_dim)
x = torch.sum(x, dim=2)
if self.use_embedding[idx]:
x = self.linear_layers[feature_key](x)
# (patient, event)
mask = mask[:, :, 0]
mask = mask.bool().to(self.device)
else:
raise NotImplementedError
if self.static_dim > 0:
static = torch.tensor(
kwargs[self.static_key], dtype=torch.float, device=self.device
)
x, decov = self.concare[feature_key](x, static=static, mask=mask)
else:
x, decov = self.concare[feature_key](x, mask=mask)
patient_emb.append(x)
decov_loss += decov
patient_emb = torch.cat(patient_emb, dim=1)
# (patient, label_size)
logits = self.fc(patient_emb)
# obtain y_true, loss, y_prob
y_true = self.prepare_labels(kwargs[self.label_key], self.label_tokenizer)
loss_task = self.get_loss_function()(logits, y_true)
loss = decov_loss + loss_task
y_prob = self.prepare_y_prob(logits)
results = {
"loss": loss,
"y_prob": y_prob,
"y_true": y_true,
'logit': logits,
}
if kwargs.get('embed', False):
results['embed'] = patient_emb
return results
if __name__ == "__main__":
from pyhealth.datasets import SampleEHRDataset
samples = [
{
"patient_id": "patient-0",
"visit_id": "visit-0",
# "single_vector": [1, 2, 3],
"list_codes": ["505800458", "50580045810", "50580045811"], # NDC
"list_vectors": [[1.0, 2.55, 3.4], [4.1, 5.5, 6.0]],
"list_list_codes": [["A05B", "A05C", "A06A"], ["A11D", "A11E"]], # ATC-4
"list_list_vectors": [
[[1.8, 2.25, 3.41], [4.50, 5.9, 6.0]],
[[7.7, 8.5, 9.4]],
],
"label": 1,
"demographic": [1.0, 2.0, 1.3],
},
{
"patient_id": "patient-0",
"visit_id": "visit-1",
# "single_vector": [1, 5, 8],
"list_codes": [
"55154191800",
"551541928",
"55154192800",
"705182798",
"70518279800",
],
"list_vectors": [[1.4, 3.2, 3.5], [4.1, 5.9, 1.7], [4.5, 5.9, 1.7]],
"list_list_codes": [["A04A", "B035", "C129"]],
"list_list_vectors": [
[[1.0, 2.8, 3.3], [4.9, 5.0, 6.6], [7.7, 8.4, 1.3], [7.7, 8.4, 1.3]],
],
"label": 0,
"demographic": [1.0, 2.0, 1.3],
},
]
# dataset
dataset = SampleEHRDataset(samples=samples, dataset_name="test")
# data loader
from pyhealth.datasets import get_dataloader
train_loader = get_dataloader(dataset, batch_size=2, shuffle=True)
# model
model = ConCare(
dataset=dataset,
feature_keys=[
"list_codes",
"list_vectors",
"list_list_codes",
# "list_list_vectors",
],
static_key="demographic",
label_key="label",
use_embedding=[True, False, True],
mode="binary",
hidden_dim=64,
)
# data batch
data_batch = next(iter(train_loader))
# try the model
ret = model(**data_batch)
print(ret)
# try loss backward
ret["loss"].backward()
| sunlabuiuc/PyHealth | pyhealth/models/concare.py | concare.py | py | 37,784 | python | en | code | 778 | github-code | 13 |
21575492145 | #!usr/bin/env python3
from collections import defaultdict
from collections import deque
from heapq import heappush, heappop
import sys
import math
import bisect
import random
def LI(): return list(map(int, sys.stdin.readline().split()))
def I(): return int(sys.stdin.readline())
def LS():return list(map(list, sys.stdin.readline().split()))
def S(): return list(sys.stdin.readline())[:-1]
def IR(n):
l = [None for i in range(n)]
for i in range(n):l[i] = I()
return l
def LIR(n):
l = [None for i in range(n)]
for i in range(n):l[i] = LI()
return l
def SR(n):
l = [None for i in range(n)]
for i in range(n):l[i] = S()
return l
def LSR(n):
l = [None for i in range(n)]
for i in range(n):l[i] = LS()
return l
sys.setrecursionlimit(1000000)
mod = 1000000007
N, M = LI()
edge = list(map(lambda x: [x[0]-1, x[1]-1], LIR(M)))
table = [[] for i in range(N)]
for e in edge:
table[e[0]].append(e[1])
table[e[1]].append(e[0])
checked = [0]*N
q = deque()
ans = 0
for i in range(N):
if (not checked[i]):
checked[i] = 1
for v in table[i]:
q.append((v, i))
else:
continue
flag = True
while q:
v = q.pop()
checked[v[0]] = 1
for u in table[v[0]]:
if (checked[u]) and (u != v[1]):
flag = False
elif (u != v[1]):
q.append((u, v[0]))
if (flag) :
ans += 1
print(ans)
"""
for e in edge:
utov[e[0]].append(e[1])
check = [0]*N
def dfs(v, before, check):
check[v] = 1
print(check)
for i in utov[v]:
if (check[i]) and (i != before):
return False
dfs(i, v, check)
check[v] = 0
return True
first_v = utov[edge[0][0]][0]
if (not dfs(first_v, first_v, check)):
print("No")
else:
print("Yes")
"""
| hppRC/competitive-programming-solutions | ARC/ARC-B/ARC037B.py | ARC037B.py | py | 1,848 | python | en | code | 3 | github-code | 13 |
42855737830 | import torch
from torch.nn import functional as F
def scalar_to_support(x, support_size):
"""
Transform a scalar to a categorical representation with (2 * support_size + 1) categories
See paper appendix Network Architecture
"""
x = torch.clamp(x, -support_size, support_size)
floor = x.floor()
prob = x - floor
logits = torch.zeros(x.shape[0], x.shape[1], 2 * support_size + 1)
logits = logits.to(x.device)
logits.scatter_(
2, (floor + support_size).long().unsqueeze(-1), (1 - prob).unsqueeze(-1)
)
indexes = floor + support_size + 1
prob = prob.masked_fill_(2 * support_size < indexes, 0.0)
indexes = indexes.masked_fill_(2 * support_size < indexes, 0.0)
logits.scatter_(2, indexes.long().unsqueeze(-1), prob.unsqueeze(-1))
return logits
def support_to_scalar(logits):
"""
Transform a categorical representation to a scalar
See paper appendix Network Architecture
"""
# Decode to a scalar
probabilities = torch.softmax(logits, dim=-1)
support_size = logits.shape[-1]//2
support = (
torch.tensor([x for x in range(-support_size, support_size + 1)])
.expand(probabilities.shape)
.float()
.to(device=probabilities.device)
)
x = torch.sum(support * probabilities, dim=-1)
return x
def logit_regression_loss(pred, true, mask=None):
support_size = (pred.shape[-1] - 1) // 2
if len(true.shape) == 3:
b,n,l = true.shape
true_dist = scalar_to_support(true.flatten(0,1), support_size) # process positive and negative values
true_dist = true_dist.reshape(*([b,n]+list(true_dist.shape[1:])))
else:
true_dist = scalar_to_support(true, support_size) # process positive and negative values
pred_logprob = F.log_softmax(pred, dim=-1)
loss_all = -(true_dist * pred_logprob).mean(dim=-1) # cross entropy (b, n, l)
if mask is None:
loss = loss_all.mean()
else:
loss = (loss_all * mask).sum() / mask.sum().clip(1)
return loss
def logit_regression_mae(pred, true, mask=None):
pred = support_to_scalar(pred)
mae_all = torch.abs(true-pred)
if mask is None:
mae = mae_all.mean()
else:
mae = (mae_all*mask).sum()/mask.sum().clip(1)
return mae
def masked_loss(loss_func, pred, true, mask=None):
if mask is not None:
if pred.shape > true.shape:
true = true.expand_as(pred)
else:
pred = pred.expand_as(true)
losses = loss_func(pred, true, reduction='none')
loss = (losses * mask).sum() / mask.sum().clip(1)
else:
loss = loss_func(pred, true, reduction='mean')
return loss
def masked_mean(values, mask=None):
if mask is not None:
mean = (values * mask).sum() / mask.sum().clip(1)
else:
mean = values.mean()
return mean | rlditr23/RL-DITR | ts/utils.py | utils.py | py | 2,872 | python | en | code | 10 | github-code | 13 |
3743390771 | from keycloakManager.keycloakConnection import Connection
import asyncio
class Login(Connection):
"""### Login (saljemo user creaditionale i dobijamo token)
- `email`
- `secret`
##### Ova klasa je child i nasledjuje objekte za konekciju od parnet klase i vraca keycloak token
"""
def __init__(self, email: str, secret: str):
self.email = email
self.secret = secret
super().__init__() # nasledjivanje parent klase
async def getToken(self):
"""### Login (saljemo user creaditionale i dobijamo token)
- `email`
- `secret`
"""
# Ulazimo u realm demo kao admin
await asyncio.sleep(0)
token = self.openID.token(self.email, self.secret)
return token
# login 0.5 logout 0.2 | mifa43/WebApp | auth/src/keycloakManager/keycloakLogin.py | keycloakLogin.py | py | 834 | python | en | code | 0 | github-code | 13 |
22028708813 | import world_of_supply_rllib as wsr
import random
import numpy as np
import time
import ray
from ray.tune.logger import pretty_print
from ray.rllib.utils import try_import_tf
import ray.rllib.agents.trainer_template as tt
from ray.rllib.models.tf.tf_action_dist import MultiCategorical
from ray.rllib.models import ModelCatalog
from functools import partial
import ray.rllib.agents.ppo.ppo as ppo
from ray.rllib.agents.ppo.ppo_tf_policy import PPOTFPolicy
from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy
import ray.rllib.agents.qmix.qmix as qmix
from ray.rllib.agents.qmix.qmix_policy import QMixTorchPolicy
import ray.rllib.env.multi_agent_env
from gym.spaces import Box, Tuple, MultiDiscrete, Discrete
from world_of_supply_rllib import Utils
from world_of_supply_rllib_models import FacilityNet
import ray.rllib.models as models
tf = try_import_tf()
ray.shutdown()
ray.init()
# Configuration ===============================================================================
env_config = {
'episod_duration': 1000,
'global_reward_weight_producer': 0.90,
'global_reward_weight_consumer': 0.90,
'downsampling_rate': 20
}
env = wsr.WorldOfSupplyEnv(env_config)
base_trainer_config = {
'env_config': env_config,
'timesteps_per_iteration': 25000,
# == Environment Settings ==
#'lr': 0.0005,
'gamma': 0.99,
# === Settings for the Trainer process ===
'train_batch_size': 2000,
'batch_mode': 'complete_episodes',
'rollout_fragment_length': 50,
}
ppo_policy_config_producer = {
"model": {
"fcnet_hiddens": [128, 128],
#"custom_model": "facility_net"
}
}
ppo_policy_config_consumer = {
"model": {
"fcnet_hiddens": [256, 256],
#"custom_model": "facility_net",
# == LSTM ==
#"use_lstm": True,
#"max_seq_len": 8,
#"lstm_cell_size": 128,
#"lstm_use_prev_action_reward": False,
}
}
# Model Configuration ===============================================================================
models.ModelCatalog.register_custom_model("facility_net", FacilityNet)
def print_model_summaries():
config = models.MODEL_DEFAULTS.copy()
config.update({"custom_model": "facility_net"})
facility_net = models.ModelCatalog.get_model_v2(
obs_space = env.observation_space,
action_space = env.action_space_consumer,
num_outputs = 1,
model_config = config)
facility_net.rnn_model.summary()
# Policy Configuration ===============================================================================
policies = {
'baseline_producer': (wsr.ProducerSimplePolicy, env.observation_space, env.action_space_producer, wsr.SimplePolicy.get_config_from_env(env)),
'baseline_consumer': (wsr.ConsumerSimplePolicy, env.observation_space, env.action_space_consumer, wsr.SimplePolicy.get_config_from_env(env)),
'ppo_producer': (PPOTFPolicy, env.observation_space, env.action_space_producer, ppo_policy_config_producer),
'ppo_consumer': (PPOTFPolicy, env.observation_space, env.action_space_consumer, ppo_policy_config_consumer)
}
def filter_keys(d, keys):
return {k:v for k,v in d.items() if k in keys}
policy_mapping_global = {
'SteelFactoryCell_1p': 'baseline_producer',
'SteelFactoryCell_1c': 'baseline_consumer',
'LumberFactoryCell_2p': 'baseline_producer',
'LumberFactoryCell_2c': 'baseline_consumer',
'ToyFactoryCell_3p': 'ppo_producer',
'ToyFactoryCell_3c': 'ppo_consumer',
'ToyFactoryCell_4p': 'ppo_producer',
'ToyFactoryCell_4c': 'ppo_consumer',
'ToyFactoryCell_5p': 'ppo_producer',
'ToyFactoryCell_5c': 'ppo_consumer',
'WarehouseCell_6p': 'baseline_producer',
'WarehouseCell_6c': 'baseline_consumer',
'WarehouseCell_7p': 'baseline_producer',
'WarehouseCell_7c': 'baseline_consumer',
'RetailerCell_8p': 'baseline_producer',
'RetailerCell_8c': 'baseline_consumer',
'RetailerCell_9p': 'baseline_producer',
'RetailerCell_9c': 'baseline_consumer',
}
def update_policy_map(policy_map, i = 0, n_iterations = 0): # apply all changes by default
pass
# if i == int(n_iterations/100*25):
# policy_map['WarehouseCell_6p'] = 'ppo_producer'
# policy_map['WarehouseCell_6c'] = 'ppo_consumer'
# if i == int(n_iterations/100*35):
# policy_map['WarehouseCell_7p'] = 'ppo_producer'
# policy_map['WarehouseCell_7c'] = 'ppo_consumer'
def create_policy_mapping_fn(policy_map):
# policy mapping is sampled once per episod
def mapping_fn(agent_id):
for f_filter, policy_name in policy_map.items():
if f_filter in agent_id:
return policy_name
return mapping_fn
# Training Routines ===============================================================================
def print_training_results(result):
keys = ['date', 'episode_len_mean', 'episodes_total', 'episode_reward_max', 'episode_reward_mean', 'episode_reward_min',
'timesteps_total', 'policy_reward_max', 'policy_reward_mean', 'policy_reward_min']
for k in keys:
print(f"- {k}: {result[k]}")
def play_baseline(n_iterations):
HandCodedTrainer = tt.build_trainer("HandCoded", wsr.SimplePolicy)
ext_conf = {
"multiagent": {
"policies": filter_keys(policies, ['baseline_producer', 'baseline_consumer']),
"policy_mapping_fn": lambda agent_id: 'baseline_producer' if Utils.is_producer_agent(agent_id) else 'baseline_consumer',
"policies_to_train": ['baseline_producer', 'baseline_consumer']
}
}
handcoded_trainer = HandCodedTrainer(
env = wsr.WorldOfSupplyEnv,
config = dict(base_trainer_config, **ext_conf))
for i in range(n_iterations):
print("== Iteration", i, "==")
print_training_results(handcoded_trainer.train())
return handcoded_trainer
def train_ppo(n_iterations):
policy_map = policy_mapping_global.copy()
ext_conf = ppo.DEFAULT_CONFIG.copy()
ext_conf.update({
"num_workers": 16,
"num_gpus": 1,
"vf_share_layers": True,
"vf_loss_coeff": 20.00,
"vf_clip_param": 200.0,
"lr": 2e-4,
"multiagent": {
"policies": filter_keys(policies, set(policy_mapping_global.values())),
"policy_mapping_fn": create_policy_mapping_fn(policy_map),
"policies_to_train": ['ppo_producer', 'ppo_consumer']
}
})
print(f"Environment: action space producer {env.action_space_producer}, action space consumer {env.action_space_consumer}, observation space {env.observation_space}")
ppo_trainer = ppo.PPOTrainer(
env = wsr.WorldOfSupplyEnv,
config = dict(ext_conf, **base_trainer_config))
training_start_time = time.process_time()
for i in range(n_iterations):
print(f"\n== Iteration {i} ==")
update_policy_map(policy_map, i, n_iterations)
print(f"- policy map: {policy_map}")
ppo_trainer.workers.foreach_worker(
lambda ev: ev.foreach_env(
lambda env: env.set_iteration(i, n_iterations)))
t = time.process_time()
result = ppo_trainer.train()
print(f"Iteration {i} took [{(time.process_time() - t):.2f}] seconds")
print_training_results(result)
print(f"Training ETA: [{(time.process_time() - training_start_time)*(n_iterations/(i+1)-1)/60/60:.2f}] hours to go")
return ppo_trainer | ikatsov/tensor-house | supply-chain/world_of_supply/world_of_supply_rllib_training.py | world_of_supply_rllib_training.py | py | 7,774 | python | en | code | 1,049 | github-code | 13 |
10191613795 | import heapq
def solution(operations):
heap = []
for x in operations:
oper, num = x.split()
num = int(num)
if oper == "D" and num == 1:
if heap:
max_value = max(heap)
heap.remove(max_value)
elif oper == "D" and num == -1:
if heap:
heapq.heappop(heap)
else:
heapq.heappush(heap, num)
if not heap:
return [0, 0]
return max(heap), heap[0]
| Jinnie-J/Algorithm-study | programmers/이중우선순위큐.py | 이중우선순위큐.py | py | 487 | python | en | code | 0 | github-code | 13 |
71165000659 | import torch
import torch.nn.functional as F
from config import config
_config = config()
print('asdasd')
def evaluate(golden_list, predict_list):
num_list_1 = len(golden_list)
num_list_2 = len(golden_list[0])
num_gt = 0
num_pre = 0
for i in range(num_list_1):
for j in range(len(predict_list[i])):
if golden_list[i][j][0] == 'B':
num_gt += 1
if predict_list[i][j][0] == 'B':
num_pre += 1
tp = 0
for i in golden_list:
i += ['$']
for i in range(num_list_1):
B_index = 0
end_index = 0
current_index = 0
while current_index < len(golden_list[i]):
if golden_list[i][current_index][0] == 'B':
B_index = current_index
end_index = B_index + 1
while golden_list[i][end_index][0] == 'I':
end_index += 1
if golden_list[i][B_index:end_index] == predict_list[i][B_index:end_index]:
if current_index == len(predict_list[i]) - 1:
tp += 1
elif predict_list[i][end_index][0] != 'I':
tp += 1
current_index = end_index
else:
current_index += 1
if tp == 0:
return 0
P = tp/num_gt
R = tp/num_pre
F1 = ( 2 * P * R ) / (P + R)
return F1
def new_LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hx, cx = hidden
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + ( (1 - forgetgate) * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
##n = [['B-TAR','I-TAR','O','B-HYP','O']]
##o = [['B-TAR','O','O','B-HYP','I-HYP']]
##
##p = [['B-TAR','B-TAR','B-TAR','I-TAR']]
##q = [['B-TAR','B-TAR','B-TAR','O']]
##
##aa = [['B-TAR','I-TAR','O','B-HYP'],['B-TAR','O','O','B-HYP']]
##bb = [['O','O','B-HYP','I-HYP'],['O','O','O','O']]
##
##cc = [['B-TAR','O', 'I-TAR', 'O','B-TAR'],['B-TAR','B-TAR','I-TAR','O']]
##dd = [['B-TAR','O', 'O', 'O','O'],['O','O','O','O']]
##
##
##print(evaluate(n,o))
##print(evaluate(p,q))
##print(evaluate(aa,bb))
##print(evaluate(cc,dd))
def get_char_sequence(model, batch_char_index_matrices, batch_word_len_lists):
x,y,z = batch_char_index_matrices.size()
c = batch_char_index_matrices.view(-1, z)
batch_word_len_lists = batch_word_len_lists.view(-1)
input_char_embeds = model.char_embeds(c)
perm_idx, sorted_batch_word_len_lists = model.sort_input(batch_word_len_lists)
input_char_embeds = input_char_embeds[perm_idx]
# print('#### reorder: ', input_char_embeds.size())
_, desorted_indices = torch.sort(perm_idx, descending=False)
output_sequence = pack_padded_sequence(input_char_embeds, sorted_batch_word_len_lists.data.tolist(), batch_first=True)
# print('after padded: ',output_sequence.data.size())
output_sequence, (h1, h2) = model.char_lstm(output_sequence)
# print('%%%%%%%:', output_sequence.data.size())
# print('After lstm. It should be 2,14,50 :', h1.size(), h2.size())
# print(h1.data)
# print(h2.data)
# print(h1.data[1])
h1[0] = h1[0][desorted_indices]
# _, desorted_indices = torch.sort(desorted_indices, descending=True)
h1[1] = h1[1][desorted_indices]
# print(h1.data[0])
# print(h1.data[1])
print('After packed :', h1.data.size())
new = torch.cat((h1[0],h1[1]),dim = -1)
new = new.view(x,y,100)
return new
# Get hidden state of the shape [2,14,50].
# Recover the hidden_states corresponding to the sorted index.
# Re-shape it to get a Tensor the shape [2,7,100].
#return result
| Elijahlen/Python_Project_BiLSTM_Hyponymy-Classification | todo.py | todo.py | py | 3,960 | python | en | code | 1 | github-code | 13 |
17268390576 | #! /usr/bin/env python3
'''Letters to words challenge assistant.
This program takes a collection of letters entered as a string and returns
all possible valid English words.
'''
# Standard libraries:
from collections import defaultdict
from itertools import permutations
from pathlib import Path
import sys
# Third part libraries:
from english_words import english_words_lower_alpha_set as english_dict
# Global Constants:
# Available from https://github.com/dwyl/english-words.git
WORD_FILE = 'C:\working\github\english-words\words_alpha.txt'
# Minimum word length:
WORD_MIN = 2
def create_permutations(letters):
letter_permutations = set()
for i in range(WORD_MIN, len(letters) + 1):
letter_permutations.update(
{''.join(letter_set) for letter_set in permutations(letters, i)})
return letter_permutations
def find_words(word_list, word_set):
return {word for word in word_list if word in word_set}
def initialize(word_file=WORD_FILE):
word_set = set()
with open(word_file) as word_file_handle:
for word in word_file_handle:
word_set.add(word.strip())
return word_set
def usage(invocation):
basename = Path(invocation).name
print(f'{basename} <letters>\n\tWhere letters is a string of letters you'
f' want to build words from.\n\te.g., {basename} "oxefaqz"\n'
f'\n\tNote: Only American letters (a-z) are allowed.\n')
sys.exit(1)
def main(argv):
letters = str()
if len(argv) != 2 or argv[1].lower() in ('-h', '--help'):
usage(argv[0])
else:
letters = argv[1].lower()
if not letters.isalpha():
usage(argv[0])
# word_set = initialize()
# Alternate:
word_set = english_dict
letter_permutations = create_permutations(letters)
results = find_words(letter_permutations, word_set)
print(f'Found {len(results)} words:')
# Note: Depending on dict being ordered (Python v3.6+)
ordered_results = defaultdict(list)
for word in sorted(results):
word_len = len(word)
ordered_results[word_len].append(word)
for i in range(WORD_MIN, len(letters) + 1):
for word in ordered_results[i]:
print(word)
if __name__ == '__main__':
main(sys.argv)
| sockduct/Weekly | letters2words.py | letters2words.py | py | 2,285 | python | en | code | 0 | github-code | 13 |
23559722226 | """ The "Reader" class was created for the conversion of JSON data to a dictionary
Where the dictionary's keys are input strings and values are the "Result" class objects.
"""
import json
from EGS_task.Result import Result
class Reader:
# Opening JSON file
@staticmethod
def json2dict(json_path):
with open(json_path) as json_file:
data = json.load(json_file)
input_dct = {}
for i in range(len(data)):
result = Result()
result.expected_res_setter(data[i]["result"]["expected"])
input_dct[data[i]["key"]] = result
return input_dct
| Azatyan0/EGS-Task | EGS_task/Reader.py | Reader.py | py | 632 | python | en | code | 0 | github-code | 13 |
29147914676 | import datetime
from transaction import Transaction
from Crypto.Hash import SHA256
class Block:
def __init__(self, transaction_list: [Transaction], hash_key: str = None, nonce: bytes = None, prev_hash: str = None):
# When we create a block we know its previous block
self.previous_hash = prev_hash
self.timestamp = str(datetime.datetime.now())
# This a list which contains Transaction objects. At first is empty.
self.list_of_transactions = transaction_list
# These values will be known when the block is mined from some node.
self.hash_key = hash_key
self.nonce = nonce
'''
Converts the block's transactions and previous_hash into a bytearray
'''
def bytearray_before_nonce(self) -> bytearray:
# Convert all the info of the block into bytearray, in order to hash it with SHA256
block_bytearray = bytearray()
# We will use the transaction_ids and not the Transaction Objects
for trans in self.list_of_transactions:
# Add it to the bytearray by converting the string into bytes
block_bytearray.extend(trans.transaction_id.hexdigest().encode('utf-8'))
# Add the previous_hash & timestamp into the bytearray
block_bytearray.extend(self.previous_hash.encode('utf-8'))
block_bytearray.extend(self.timestamp.encode('utf-8'))
return block_bytearray
'''
Get a list of transactions.
When only_ids is True we return only the transaction ids (into digest), otherwise the whole Transaction object
'''
def get_transactions(self, only_ids=False) -> list:
if only_ids:
return [trans.transaction_id.hexdigest() for trans in self.list_of_transactions]
else:
return self.list_of_transactions
'''
Convert the object into dictionary in order to transfer it to other nodes with JSON format
'''
def to_dict(self) -> dict:
return {
'previousHashKey': self.previous_hash,
'hashKey': self.hash_key,
'nonce': self.nonce.decode('ISO-8859-1') if self.nonce is not None else None,
'timestamp': self.timestamp,
'transactions': [trans.to_dict() for trans in self.list_of_transactions]
}
'''
Add a new transaction the block but first check if it already exists
'''
def add_transaction(self, new_transaction: Transaction) -> None:
my_trans_ids = [x.transaction_id.hexdigest() for x in self.list_of_transactions]
if new_transaction.transaction_id.hexdigest() not in my_trans_ids:
self.list_of_transactions.append(new_transaction)
else:
print('Transaction is already in this block.')
'''
When a node finds a correct nonce number then the block is mined,
so we can add the nonce and hash_key fields
'''
def is_mined(self, nonce: bytes, hash_key: str) -> None:
# If we want to convert the nonce into int: int.from_bytes(nonce, 'big')
self.nonce = nonce
self.hash_key = hash_key
'''
Remove common transactions from my current block.
This function is called when a new block arrives or when we get a new chain
'''
def remove_common_transactions(self, transactions: [dict]) -> None:
# Convert the lists into sets in order to find differences
curr_transactions_ids = set(self.get_transactions(only_ids=True))
other_transactions_ids = {trans['id'] for trans in transactions}
# Remove from the current block the transactions that are validated from another block
unique_transactions_ids = curr_transactions_ids - other_transactions_ids
self.list_of_transactions = [trans for trans in self.list_of_transactions
if trans.transaction_id.hexdigest() in unique_transactions_ids]
'''
This function is used when we try to validate a block and
and we have to check the block hash
'''
@staticmethod
def find_hash_key(nonce: str, timestamp: str, previous_hash: str, transactions: [dict]) -> SHA256:
# Convert all the info of the block into bytearray, in order to hash it with SHA256
block_bytearray = bytearray()
# Fetch the transaction_ids from the dictionary where are all the transactions
# contained in the given block
for trans in transactions:
# Add it to the bytearray
block_bytearray.extend(trans['id'].encode('utf-8'))
# Add the previous_hash & timestamp & nonce into the bytearray
block_bytearray.extend(previous_hash.encode('utf-8'))
block_bytearray.extend(timestamp.encode('utf-8'))
block_bytearray.extend(nonce.encode('ISO-8859-1'))
return SHA256.new(data=block_bytearray)
| LightingSpider/Blockchain-NBC | block.py | block.py | py | 4,828 | python | en | code | 1 | github-code | 13 |
4067796509 | # -*- coding: utf-8 -*-
'''
Created on Mon Jun 17 11:34:02 2019
@author: Patrik_Zelena
'''
from DatabaseHelper import DatabaseHelper
from HotelKNNAlgorithm import HotelKNNAlgorithm
from Evaluator import Evaluator
from surprise import Dataset, Reader
import random
import numpy as np
import logging
log = logging.getLogger('RecSys.Recommender')
def LoadData():
helper = DatabaseHelper()
data = helper.loadDataset()
rankings = helper.getPopularityRanks()
return (data, rankings)
def LoadDataForLocation(lat, lon, user):
helper = DatabaseHelper()
data = helper.loadDatasetForLocation(lat, lon, user)
rankings = helper.getPopularityRanks()
return (data, rankings)
def BuildRecModel():
np.random.seed(0)
random.seed(0)
# Load up common data set for the recommender algorithms
(evaluationData, rankings) = LoadData()
# Construct an Evaluator to, you know, evaluate them
evaluator = Evaluator(evaluationData, rankings)
hotelKNNAlgorithm = HotelKNNAlgorithm()
log.info('Algorithm=K-Nearest Neighbour')
evaluator.SetAlgorithm(hotelKNNAlgorithm, 'HotelKNNAlgorithm')
evaluator.Evaluate(False)
evaluator.TrainAndSaveAlgorithm()
def GetRecommendations(user, k, lat, lon):
np.random.seed(0)
random.seed(0)
# Load up common data set for the recommender algorithms
(evaluationData, rankings) = LoadDataForLocation(lat, lon, user)
if (evaluationData.size > 100):
# Construct an Evaluator to, you know, evaluate them
reader = Reader(rating_scale=(0, 3))
filteredData = Dataset.load_from_df(evaluationData, reader=reader)
evaluator = Evaluator(filteredData, rankings)
hotelKNNAlgorithm = HotelKNNAlgorithm()
evaluator.SetAlgorithm(hotelKNNAlgorithm, 'HotelKNNAlgorithm')
return evaluator.GetTopNRecs(user, k)
else:
log.error('Not enough hotel in range')
return []
BuildRecModel() | zeletrik/HRecSys | app/Recommender.py | Recommender.py | py | 2,024 | python | en | code | 1 | github-code | 13 |
8489640134 | # Fileneme: test001.py
import string
def reverse(text):
return text[::-1]
def is_palindrome(text):
text = text.lower()
print(text)
text = text.replace(' ', '')
print(text)
for char in string.punctuation:
text = text.replace(char, '')
print(text)
return text == reverse(text)
def main():
something = input('Enter text:')
if (is_palindrome(something)):
print("Yes, it is")
else:
print("No, it is not")
if __name__ == '__main__':
main()
else:
print("it was improted!")
| likaiharry/First | test001.py | test001.py | py | 547 | python | en | code | 0 | github-code | 13 |
73019813459 | import os
import logging
import time
from logging import handlers
LOG_FORMAT = "%(asctime)s - %(levelname)s: %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
debug_file_path = './data/track_{0}.log'.format(time.strftime('%Y%m%d_%H%M%S'))
app_file_path = './data/app_{0}.log'.format(time.strftime('%Y%m%d_%H%M%S'))
folder_path = os.path.dirname(debug_file_path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# logging.basicConfig(filename=file_path, filemode='w+',
# level=logging.INFO, format=LOG_FORMAT)
#log_runtime = logging.getLogger("scapy.runtime")
log_debug = logging.getLogger("ins401-log.debug")
log_app = logging.getLogger("ins401-log.app")
debug_file_output = logging.FileHandler(
filename=debug_file_path, mode='w+', encoding='utf-8')
debug_file_output.setFormatter(logging.Formatter(LOG_FORMAT))
log_debug.addHandler(debug_file_output)
log_debug.setLevel(logging.INFO)
app_file_output = logging.FileHandler(
filename=app_file_path, mode='w+', encoding='utf-8')
app_file_output.setFormatter(logging.Formatter(LOG_FORMAT))
log_app.addHandler(app_file_output)
log_app.setLevel(logging.INFO)
def track_log_status(message, *args):
if not message or len(message) == 0:
return
log_debug.info(message, *args)
| yiweisong/ins401-log | app/debug.py | debug.py | py | 1,328 | python | en | code | 0 | github-code | 13 |
34560439464 | #Joe Hester
#Asteroids
#final project
import pygame
from pygame.locals import *
from math import cos,sin,pi,hypot
import random
class Ship(object):
def __init__(self,x,y,vx,vy):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.angle = -pi/2
self.v = 0
self.pointlist = [(0,-10),(-5,10),(5,10)]
def draw(self,screen):
non_local_pl = [(point[0]+self.x,point[1]+self.y) for point in self.pointlist]
pygame.draw.polygon(screen,(255,0,0),non_local_pl)
def move(self):
self.vy = self.v*sin(self.angle)
self.vx = self.v*cos(self.angle)
self.x = (self.x+self.vx)%1200
self.y = (self.y+self.vy)%700
def rotate(self,dangle):
self.angle = self.angle + dangle
#rotates a list of points about an axis
new_points = []
for point in self.pointlist:
new_points.append([point[0]*cos(dangle) - point[1]*sin(dangle),point[0]*sin(dangle)+point[1]*cos(dangle)])
self.pointlist = new_points
class Faser(object):
v = 20
def __init__(self,x,y,r,vx,vy):
self.x = x
self.y = y
self.r = r
self.vx = vx
self.vy = vy
self.angle = -pi/2
self.red = 255
self.green = 255
self.blue = 255
def draw(self,surface):
pygame.draw.circle(surface,(self.red,self.green,self.blue),(int(self.x),int(self.y),),self.r)
def move(self):
self.x = self.x + self.vx
self.y = self.y + self.vy
def change_color(self):
self.red = random.randint(0,255)
self.green = random.randint(0,255)
self.blue = random.randint(0,255)
class Rock(object):
def __init__(self,x,y,vx,vy,r):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.r = r
def draw(self,surface):
pygame.draw.circle(surface,(255,255,255),(int(self.x),int(self.y)),self.r)
def move(self):
self.x = (self.x+self.vx)%1200
self.y = (self.y+self.vy)%700
pygame.init()
screen = pygame.display.set_mode((1200,700))
clock = pygame.time.Clock()
ship = Ship(300,300,0,0)
rightpress = False
leftpress = False
upup = True
updown = False
vmax = 10
bull = []
ast = []
for i in xrange(6):
ast.append(Rock(random.randint(0,1700), random.randint(0,700), random.uniform(-3,3), random.uniform(-3,3), 30))
playing = True
going = True
dead = False
while going == True:
screen.fill((0,0,0))
for event in pygame.event.get():
if event.type == QUIT:
going = False
if event.type == KEYDOWN:
if event.key == K_RIGHT:
rightpress = True
elif event.key == K_LEFT:
leftpress = True
elif event.key == K_UP:
ship.move()
upup = False
updown = True
elif event.key == K_SPACE:
bull.append(Faser(ship.x,ship.y,7,Faser.v*cos(ship.angle),Faser.v*sin(ship.angle)))
if event.type == KEYUP:
if event.key == K_RIGHT:
rightpress = False
if event.key == K_LEFT:
leftpress = False
if event.key == K_UP:
upup = True
updown = False
elif event.type == MOUSEBUTTONDOWN:
if dead == True:
dead = False
ship.v = 0
ship.x = 600; ship.y = 350
ast = []
for i in xrange(6):
ast.append(Rock(random.randint(0,1700), random.randint(0,700), random.uniform(-3,3), random.uniform(-3,3), 30))
if dead == False:
for circ in bull:
circ.draw(screen)
circ.move()
circ.change_color()
if circ.x >= 1207 or circ.x <= -7:
bull.remove(circ)
if circ.y >= 707 or circ.y <= -7:
bull.remove(circ)
for rock in ast:
if hypot(rock.x-circ.x,rock.y-circ.y)<rock.r + circ.r:
if rock.r > 10:
for i in xrange(3):
new_rock = Rock(rock.x,rock.y,random.uniform(-3,3),random.uniform(-3,3),rock.r/2)
ast.append(new_rock)
ast.remove(rock)
bull.remove(circ)
break
for rock in ast:
rock.draw(screen)
rock.move()
if hypot(rock.x-ship.x,rock.y-ship.y) < rock.r+9:
dead = True
if rightpress == True:
ship.rotate(.05)
if leftpress == True:
ship.rotate(-.05)
if upup == True:
ship.v = ship.v/1.01
if updown == True:
ship.v = ship.v + .5
if ship.v >= vmax:
ship.v = vmax
if playing == True:
clock.tick(50)
ship.draw(screen)
ship.move()
else:
screen.fill((255,0,0))
pygame.display.update()
| csharrison/Student-Final-Projects | joseph_hester/asteroids_2.py | asteroids_2.py | py | 5,462 | python | en | code | 1 | github-code | 13 |
21004127975 | import numpy as np
import random
import time
from math import sqrt
import pandas as pd
import glob
from numpy import random
import sys
import pathlib
import math
from math import *
import time
import argparse
import logging
import apache_beam as beam
from apache_beam.dataframe.convert import to_dataframe
from apache_beam.dataframe.convert import to_pcollection
from apache_beam.io import ReadFromText
from apache_beam.options.pipeline_options import PipelineOptions
def SafetyMeasures(row):
# Split the row into columns
columns = row.split(',')
# Extract the required columns
track_id = columns[1]
recording_id = columns[0]
vehicle_density = columns[28]
total_velocity = columns[32]
total_acceleration = columns[33]
speed_limit = columns[26]
num_collision_vehicles = columns[37]
class_type = columns[23]
slip_angle = columns[27]
#Speed Limit Compliance: Check for cars traveling at speeds higher than the speed limit.
compliance = float(total_velocity) <= float(speed_limit)
#Driver Awareness: Check for cars stopped for an extended period of time.(velocity 0 and acceleration 0)
if total_velocity == '0' and total_acceleration == '0':
driver_awareness = "low awareness"
else:
driver_awareness = "high awareness"
#Collision Safety: Predict whether or not a car will collide with another car. (same xCenter and yCenter at same time)
if int(num_collision_vehicles) > 1:
potential_collision = True
else:
potential_collision = False
#Pedestrian Safety: Check to see if there are pedestrians regularly crossing the road.
if class_type == "pedestrian":
pedestrian_presence = True
else:
pedestrian_presence = False
#Vulnerable Road User Safety: (pedestrian, bicycle, motorcycle)Check to see if there are pedestrians regularly crossing the road.
if class_type in ["pedestrian", "bicycle", "motorcycle"]:
vulnerable_user = True
else:
vulnerable_user = False
##Slip Angle: the angle between the direction in which a wheel is pointing and the direction in which it is actually traveling
if abs(float(slip_angle)) >= 0 and abs(float(slip_angle)) < 20:
slip_angle_safety = "safe slip angle"
else:
slip_angle_safety = "unsafe slip angle"
# Construct output message
processed_data = {
"track_id": track_id,
"recording_id": recording_id,
"vehicle_density": vehicle_density,
"driver_awareness": driver_awareness,
"compliance": compliance,
"potential_collision": potential_collision,
"pedestrian_presence": pedestrian_presence,
"vulnerable_user": vulnerable_user,
"slip_angle_safety": slip_angle_safety
}
#process data
return processed_data
#pipeline input and output
with beam.Pipeline() as pipeline:
(
pipeline
| 'ReadFromText' >> beam.io.ReadFromText('combined_dataset.csv', skip_header_lines=1)
| 'ProcessData' >> beam.Map(SafetyMeasures)
| 'WriteToText' >> beam.io.WriteToText('output', file_name_suffix='.txt')
)
| SaarucaK/cloud-project | project_combined_pipeline.py | project_combined_pipeline.py | py | 3,148 | python | en | code | 0 | github-code | 13 |
17781804063 | import requests
from bs4 import BeautifulSoup
url = 'https://news.ycombinator.com/news'
url_htmltext_request = requests.get(url).text
soup_object = BeautifulSoup(url_htmltext_request, 'html.parser')
titlelink = soup_object.select('.titlelink')
subtext = soup_object.select('.subtext')
def my_news(links, subtext):
my_news = []
for index, item in enumerate(links):
title = titlelink[index].getText()
href = titlelink[index].get('href', None)
point = subtext[index].select('.score')
if len(point) > 0:
point = point[0].getText()
my_news.append({'title': title, 'href': href, 'votes': point})
return my_news
print(my_news(titlelink, subtext))
| adeagbaje/webscraping | webscraping.py | webscraping.py | py | 748 | python | en | code | 0 | github-code | 13 |
17114961134 | import logging
from agr_literature_service.api.models import ReferenceModel, CrossReferenceModel, \
ReferencefileModel, AuthorModel, MeshDetailModel
from agr_literature_service.lit_processing.utils.db_read_utils import \
get_references_by_curies, get_curie_to_title_mapping, \
get_pmid_list_without_pmc_package, get_pmid_to_reference_id_mapping, \
get_reference_id_by_curie, retrieve_newly_added_pmids, retrieve_all_pmids, \
get_reference_id_by_pmid, get_cross_reference_data, get_reference_ids_by_pmids, \
get_doi_data, get_author_data, get_mesh_term_data, \
get_mod_corpus_association_data_for_ref_ids, get_cross_reference_data_for_ref_ids, \
get_author_data_for_ref_ids, get_mesh_term_data_for_ref_ids, \
get_mod_reference_type_data_for_ref_ids, get_mod_abbreviations
from agr_literature_service.lit_processing.data_ingest.utils.db_write_utils import \
insert_referencefile, insert_referencefile_mod_for_pmc
from ...fixtures import db, load_sanitized_references, populate_test_mod_reference_types # noqa
logging.basicConfig(format='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class TestDbReadUtils:
def test_db_read_functions(self, db, load_sanitized_references, populate_test_mod_reference_types): # noqa
refs = db.query(ReferenceModel).order_by(ReferenceModel.curie).all()
db_entries = get_references_by_curies(db, [refs[0].curie, refs[1].curie])
assert db_entries[refs[0].curie]['issue_name'] == '1'
assert db_entries[refs[0].curie]['volume'] == '888'
assert db_entries[refs[0].curie]['page_range'] == '88'
assert db_entries[refs[1].curie]['issue_name'] == '66'
assert db_entries[refs[1].curie]['volume'] == '4'
assert db_entries[refs[1].curie]['page_range'] == '937'
missing_agr_in_mod = {}
missing_agr_in_mod['ZFIN'] = [refs[0].curie, refs[1].curie]
agr_to_title = get_curie_to_title_mapping(missing_agr_in_mod['ZFIN'])
assert agr_to_title[refs[0].curie] == refs[0].title
assert agr_to_title[refs[1].curie] == refs[1].title
reference_id = get_reference_id_by_curie(db, refs[0].curie)
assert reference_id == refs[0].reference_id
def test_db_read_functions2(self, db, load_sanitized_references, populate_test_mod_reference_types): # noqa
pmids = retrieve_newly_added_pmids(db)
pmids_db = []
test_pmid = None
test_reference_id = None
for x in db.query(CrossReferenceModel).filter_by(curie_prefix='PMID').all():
if test_pmid is None:
test_pmid = x.curie.replace("PMID:", '')
test_reference_id = x.reference_id
pmids_db.append(x.curie.replace("PMID:", ''))
pmids.sort()
pmids_db.sort()
assert pmids == pmids_db
all_pmids = retrieve_all_pmids(db)
all_pmids.sort()
assert all_pmids == pmids_db
reference_id = get_reference_id_by_pmid(db, test_pmid)
assert reference_id == test_reference_id
pmid_to_reference_id = {}
reference_id_to_pmid = {}
get_reference_ids_by_pmids(db, "|".join(pmids), pmid_to_reference_id, reference_id_to_pmid)
doi_to_reference_id = get_doi_data(db)
for x in db.query(CrossReferenceModel).all():
if x.curie.replace == 'PMID':
pmid = x.curie.replace("PMID:", "")
assert pmid_to_reference_id[pmid] == x.reference_id
assert reference_id_to_pmid[x.reference_id] == pmid
if x.curie.replace == 'DOI':
assert doi_to_reference_id[x.curie] == x.reference_id
def test_db_read_functions3(self, db, load_sanitized_references, populate_test_mod_reference_types): # noqa
refs = db.query(ReferenceModel).order_by(ReferenceModel.curie).all()
ref_ids = [refs[0].reference_id, refs[1].reference_id]
(reference_id_to_doi, reference_id_to_pmcid) = get_cross_reference_data(db, None, ref_ids)
for x in db.query(CrossReferenceModel).filter_by(reference_id=refs[0].reference_id).all():
if x.curie_prefix == 'DOI':
assert x.curie.replace("DOI:", "") == reference_id_to_doi[refs[0].reference_id]
if x.curie_prefix == 'PMCID':
assert x.curie.replace("PMCID:", "") == reference_id_to_pmcid[refs[0].reference_id]
refs = db.query(ReferenceModel).order_by(ReferenceModel.curie).all()
reference_id_to_authors = get_author_data(db, 'ZFIN', ref_ids, 500)
authors = reference_id_to_authors[refs[0].reference_id]
x = db.query(AuthorModel).filter_by(reference_id=refs[0].reference_id, order=1).one_or_none()
assert x.last_name == authors[0]['last_name']
assert x.name == authors[0]['name']
reference_id_to_mesh_terms = get_mesh_term_data(db, 'ZFIN', ref_ids, 500)
mesh_terms = reference_id_to_mesh_terms[refs[0].reference_id]
(heading_term, qualifier_term) = mesh_terms[0]
mesh_terms = db.query(MeshDetailModel).filter_by(reference_id=refs[0].reference_id).all()
assert mesh_terms[0].heading_term == heading_term
assert mesh_terms[0].qualifier_term == qualifier_term
def test_db_read_functions4(self, db, load_sanitized_references, populate_test_mod_reference_types): # noqa
refs = db.query(ReferenceModel).order_by(ReferenceModel.curie).all()
ref_ids = str(refs[0].reference_id) + ", " + str(refs[1].reference_id)
reference_id_to_mod_corpus_data = get_mod_corpus_association_data_for_ref_ids(db, ref_ids)
mca_data = reference_id_to_mod_corpus_data[refs[0].reference_id]
assert mca_data[0]['mod_abbreviation'] == 'ZFIN'
assert mca_data[0]['corpus'] is True
assert mca_data[0]['mod_corpus_sort_source'] == 'Dqm_files'
reference_id_to_xrefs = get_cross_reference_data_for_ref_ids(db, ref_ids)
xref_data = reference_id_to_xrefs[refs[0].reference_id]
for x in xref_data:
if x['curie'].startswith('PMID:'):
assert x['curie'] == 'PMID:33622238'
elif x['curie'].startswith('DOI:'):
assert x['curie'] == 'DOI:10.1186/s12576-021-00791-4'
reference_id_to_authors = get_author_data_for_ref_ids(db, ref_ids)
author_data = reference_id_to_authors[refs[0].reference_id]
assert author_data[0]['name'] == 'Shin-Ichiro Karaki'
assert author_data[0]['orcid'] == 'ORCID:0000-0002-8525-2965'
def test_db_read_functions5(self, db, load_sanitized_references, populate_test_mod_reference_types): # noqa
refs = db.query(ReferenceModel).order_by(ReferenceModel.curie).all()
ref_ids = str(refs[0].reference_id) + ", " + str(refs[1].reference_id)
reference_id_to_mesh_terms = get_mesh_term_data_for_ref_ids(db, ref_ids)
mesh_data = reference_id_to_mesh_terms[refs[0].reference_id]
assert mesh_data[0]['heading_term'] == 'Animals'
assert mesh_data[1]['heading_term'] == 'Carbachol'
assert mesh_data[1]['qualifier_term'] == 'pharmacology'
reference_id_to_mod_reference_types = get_mod_reference_type_data_for_ref_ids(db, ref_ids)
mrt_data = reference_id_to_mod_reference_types[refs[0].reference_id]
assert mrt_data[0]['reference_type'] == 'Journal'
assert mrt_data[0]['source'] == 'ZFIN'
mods = get_mod_abbreviations(db)
mods.sort()
assert mods == ['FB', 'MGI', 'RGD', 'SGD', 'WB', 'XB', 'ZFIN']
def test_pmc_read_and_write_functions(self, db, load_sanitized_references, populate_test_mod_reference_types): # noqa
pmid = "33622238"
crossRef = db.query(CrossReferenceModel).filter_by(curie='PMID:' + pmid).one_or_none()
reference_id = crossRef.reference_id
file_class = 'supplement'
file_publication_status = 'final'
file_name_with_suffix = "test_suppl.txt"
md5sum = "d5073c77841aa7ae1066dbd2323dcd56"
referencefile_id = insert_referencefile(db, pmid, file_class,
file_publication_status,
file_name_with_suffix,
reference_id, md5sum,
logger)
insert_referencefile_mod_for_pmc(db, pmid, file_name_with_suffix,
referencefile_id, logger)
db.commit()
refFile = db.query(ReferencefileModel).filter_by(reference_id=reference_id).one_or_none()
assert refFile.display_name == 'test_suppl'
assert refFile.file_extension == 'txt'
assert refFile.md5sum == md5sum
pmids = get_pmid_list_without_pmc_package(['ZFIN'], db)
pmid_list = ['34354223', '35151207']
pmids.sort()
pmid_list.sort()
assert pmids == pmid_list
pmid_to_reference_id = get_pmid_to_reference_id_mapping(db)
assert pmid_to_reference_id.get(pmid) == reference_id
| alliance-genome/agr_literature_service | tests/lit_processing/utils/test_db_read_utils.py | test_db_read_utils.py | py | 9,089 | python | en | code | 1 | github-code | 13 |
6810068590 | #!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
def g1( v ):
x,y=v[0],v[1]
return 2*x**2+y**2-2
def g2(v ):
x,y=v[0],v[1]
return (x-1/2)**2+(y-1)**2-1/4
def g( v ):
x,y=v[0],v[1]
w=np.array([g1(v),g2(v)])
#print(w)
return w
def G(v):
a=g(v)
return 1/2*a.T.dot(a)
def J(v):
x,y=v[0],v[1]
return np.array([[4*x,2*y],[2*(x-1/2),2*(y-1)]])
eps=10**-8
def znajdz(v):
vold=np.array([1000,1000])
while(np.linalg.norm(vold-v)>eps):
vold=v
z=np.linalg.solve(J(v),g(v))
w=1
vtest=v-w*z
while(G(vtest)>G(v)):
w=w/2
vtest=v-w*z
v=vtest
return v
s=znajdz(np.array([1,1]))
initial=np.array([1/2,1])+ 1*np.random.rand(5,2)
for v in initial:
a=znajdz(v)
if(G(a)<eps):
print("Rozwiązanie",a)
else:
print("Minimum lokalne",a) | matstep0/metody_numeryczne | zad14/zad14.py | zad14.py | py | 774 | python | en | code | 0 | github-code | 13 |
13344724191 | from tkinter import *
from datetime import datetime
import pytz
root = Tk()
root.title('—')
frame = Frame()
Label(root, text='Choose any Timezone', fg='navyblue').pack()
frame.pack()
time_scrollbar = Scrollbar(frame, orient=VERTICAL)
time_listbox = Listbox(frame, yscrollcommand=time_scrollbar.set)
time_scrollbar.configure(command=time_listbox.yview)
for timezone in pytz.all_timezones:
time_listbox.insert(END, timezone)
time_scrollbar.pack(side=RIGHT, fill=Y)
time_listbox.pack(side=LEFT, fill=BOTH, expand=1)
time_variable = StringVar()
time_label = Label(root, textvariable=time_variable)
time_label.pack(fill=X)
while True:
selection = time_listbox.curselection()
if len(selection) > 0:
timezone = pytz.timezone(time_listbox.get(selection[0]))
current = datetime.now(timezone)
time_variable.set(current.strftime('%Y-%m-%d %1:%M:%S %p %Z'))
root.update()
| aaravdave/YoungWonks | Level 3/Tkinter/4) Listbox and Notebook/Question 2.py | Question 2.py | py | 908 | python | en | code | 0 | github-code | 13 |
73097435216 | def custom_sort(string):
return(string.swapcase())
def generator(text, sep="", option=None):
"""Option is an optional arg, sep is mandatory"""
if not isinstance(text, str):
yield("ERROR")
return
if not isinstance(sep, str) or not sep:
yield("ERROR")
return
output = text.split(sep)
if option == "unique":
output = list(dict.fromkeys(output))
elif option == "ordered":
output.sort(key=custom_sort)
elif option == "shuffle":
i = 1
while i <= 10:
stock = output
output = [x for i, x in enumerate(stock) if i % 2]
output += [x for i, x in enumerate(stock) if not i % 2]
i += 1
elif option:
yield("ERROR")
return
for elm in output:
yield elm
if __name__ == '__main__':
text = "Le Lorem Ipsum est simplement du faux texte."\
+ "Le Lorem est une base de simplemet"
# text = "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18"
print("######test######")
for word in generator(text, sep=" "):
print(word)
print("######test_shuffle######")
for word in generator(text, sep=" ", option="shuffle"):
print(word)
print("######test_ordered######")
for word in generator(text, sep=" ", option="ordered"):
print(word)
print("######test_unique######")
for word in generator(text, sep=" ", option="unique"):
print(word)
print("######test error######")
for word in generator("hello", sep=" ", option="uniqu"):
print(word)
| Cizeur/Bootcamp_Python | day01/ex03/generator.py | generator.py | py | 1,574 | python | en | code | 0 | github-code | 13 |
31724254292 |
def f(stack):
result = stack.pop()
if len(stack) == 0:
return result
else:
last = f(stack)
stack.append(result)
return last
def reverse(stack):
if len(stack) == 0:
return
last = f(stack)
reverse(stack)
stack.append(last) | Zhouxinyu668/Algorithm | recurrent/reverse_stack.py | reverse_stack.py | py | 292 | python | en | code | 1 | github-code | 13 |
6485178565 | """
https://www.codeeval.com/open_challenges/82/
"""
def isArmstrongNumber(line):
"""Determine if armstrong
number """
squaredSum = 0
getNum = 0
for i in list(line):
i = i.strip()
if len(i) > 0:
getNum = int(line)
squaredSum += (int(i) ** len(line.strip()))
print(getNum==squaredSum)
fileName = "Practice/Armstrong.txt"
file = open(fileName, 'r')
for line in file:
if (len(line)) > 0:
isArmstrongNumber(line)
| elexie/Codeeval-python | easy/ArmstrongNumbers.py | ArmstrongNumbers.py | py | 491 | python | en | code | 0 | github-code | 13 |
26963586445 | # coding: utf-8
from http.client import IncompleteRead
from typing import List, Tuple
import tweepy
from loguru import logger
from tweepy import API, OAuthHandler, Stream
from urllib3.exceptions import ProtocolError, ReadTimeoutError
from .authapi import AuthApi
from .listener import ListenerConsole, ListenerDB
LOGGER_ROOT = "./logs/"
logger.add(LOGGER_ROOT + "general.log", level="DEBUG", rotation="5 MB")
# TODO Use "extended" mode https://github.com/tweepy/tweepy/issues/974
class MinerStream(object):
"""Class used to receive Stream tweets and send them to a location (DB, console, bot)s
Arguments:
None
Raises:
ValueError: [description]
Returns:
Run a Tweepy Stream object
"""
@logger.catch()
def __init__(self):
# Attributes related to Auth management
self.auth_keys: List[AuthApi] = list()
self.current_auth_idx: int = 0
self.current_auth_handler: OAuthHandler = None
# Attributes related to the Stream API
self.keywords: List[str] = list()
self.locations: Tuple[float] = ()
@logger.catch()
def to(self, output):
"""
Define where the data will be sent. It can be stdout, file file, database or
sent to a bot.
Arguments:
output {str} -- Where the data will be directed.
"""
if output == "database":
logger.info("Output mode set to database")
self._output = output
return self
elif output == "console":
logger.info("Output mode set to console")
self._output = output
elif output == "bot":
logger.info("Output mode set to bot")
self._output = output
else:
logger.error("Invalid output mode passed")
@logger.catch()
def mine(self):
"""
Method to collect tweets.
If a rate limit error is raised, switch the account used and restart the collection
"""
if not (self.keywords or self.locations):
raise ValueError("No keywords or location provided")
auth_key: AuthApi = self.auth_keys[self.current_auth_idx]
logger.debug("Generating the API handler")
logger.debug(
"Creds provided :: {access_token} :://:: {consumer_token}",
access_token=self.auth_keys[0].access_token,
consumer_token=self.auth_keys[0].consumer_api_key,
)
self.current_auth_handler = auth_key.generate_api
api = self.current_auth_handler
if self._output == "console":
self._streamer_console(self.current_auth_handler)
elif self._output == "database":
try:
self._streamer_db(self.config, self.current_auth_handler)
except ReadTimeoutError:
logger.error("Raised a ReadTimeoutError :: Restart the service")
self.mine()
except ProtocolError:
logger.error("Raised a ProtocolError :: Restart the service")
self.mine()
except IncompleteRead:
logger.error("Raised an IncompleteRead error :: Restart the service")
self.mine()
def search(self, *args) -> None:
"""
Define the keywords or locations sent to the Twitter API to get the tweets.
*Args:
List[str]: Strings that are going to be asked to the API.
_or_
Tuple[float]: Tuple of 4 floats that will delimit the collection area.
"""
logger.info("Search arguments definition")
for elt in args:
if type(elt) == str:
self.keywords.append(elt)
elif type(elt) == tuple and len(elt) == 4:
self.locations = elt
else:
logger.error("Invalid keywords or locations provided to .search()")
logger.debug(f"Keywords used to search :: {self.keywords}")
logger.debug(f"Locations used to search :: {self.locations}")
def db_config(
self, host="localhost", port=27017, db="twitter", collection="tweet"
) -> None:
"""
Configuration of the Mongo database used to store the tweets retrieved.
Keyword Arguments:
host {str} -- Host's name (default: {"localhost"})
port {int} -- Port used (default: {27017})
db {str} -- The name of the database (default: {"twitter"})
collection {str} -- The name of the collection used (default: {"tweet"})
"""
logger.info("DB configuration")
config = {"host": host, "port": port, "db": db, "collection": collection}
logger.debug("Database configuration set to: {config}", config=config)
self.config = config
@logger.catch()
def _streamer_db(self, config, auth_handler):
logger.debug("Connecting to the API")
api: API = tweepy.API(auth_handler)
stream = Stream(auth_handler, ListenerDB(api, config))
try:
self._filter(stream)
except IncompleteRead:
logger.error("Raised an IncompleteRead error :: Restart the service")
self.mine()
except KeyboardInterrupt:
logger.info("Stopped.")
stream.disconnect()
@logger.catch()
def _streamer_console(self, auth_handler):
logger.debug("Generating the API")
api: API = tweepy.API(auth_handler)
stream = Stream(self.current_auth_handler[0], ListenerConsole(api))
self._filter(stream)
def _filter(self, stream: Stream):
logger.debug("Locations passed :: {locations}", locations=self.locations)
if self.keywords:
logger.debug("Passing keywords to the Streamer")
stream.filter(track=self.keywords, is_async=True)
elif self.locations:
logger.debug("Passing locations to the Streamer")
stream.filter(locations=self.locations, is_async=True)
else:
logger.debug("Failed to start stream.")
logger.info("...Stream started...")
def _auth_next_account(self):
"""
Internal function that shouldn't be called outside of mine, it tries to
grab the next account and if it reaches the end, it wraps back around to the
first set of keys.
:return: the new api, but it also sets self.api so unnecessary
"""
self.current_auth_idx = self.current_auth_idx + 1
if len(self.auth_keys) <= self.current_auth_idx:
self.current_auth_idx = 0
auth_key: AuthApi = self.auth_keys[self.current_auth_idx]
self.current_auth_handler: Tuple[OAuthHandler, str] = auth_key._generate_api
| Kydlaw/pumpy | pumpy/twitter_mining.py | twitter_mining.py | py | 6,769 | python | en | code | 1 | github-code | 13 |
22477766653 | from typing import Optional
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import torch.nn as nn
import torch.nn.functional
from src.metric.detection_f1 import DetectionF1Metric
from src.losses.negative_loss import NegativeLoss
from src.losses.combine_loss import CombineLoss
from src.losses.dot_detection_loss import DotDetectionLoss
from src.metric.counting import CountingMetric, CountingMAEMetric
class Regressor(pl.LightningModule):
def __init__(self,
model_name: str,
encoder_name: str,
input_channels: int,
output_channels: int,
spatial_mode: str,
loss_function: str,
lr: float,
train_steps: int,
visualize_test_images: bool,
obj_threshold: float,
mask_mode: str,
mask_size: tuple[int, int]
):
super().__init__()
self._model_name = model_name
self._encoder_name = encoder_name
self._input_channels = input_channels
self._output_channels = output_channels
self._spatial_mode = spatial_mode
self._loss_function = loss_function
self._lr = lr
self._train_steps = train_steps
self._visualize_test_images = visualize_test_images
self._obj_threshold = obj_threshold
self._mask_mode = mask_mode
self._mask_size = mask_size
if self._model_name == 'v1':
from src.model.mymodel import MyModel
self.network = MyModel(encoder_name=self._encoder_name,
classes=self._output_channels, reduce_spatial_mode=self._spatial_mode)
elif self._model_name == 'v2':
from src.model.mymodelv2 import MyModelV2
self.network = MyModelV2(encoder_name=self._encoder_name,
classes=self._output_channels, reduce_spatial_mode=self._spatial_mode)
elif self._model_name in ['UNet', 'DeepLabV3Plus']:
if self._model_name == 'UNet':
network = smp.Unet
elif self._model_name == 'DeepLabV3Plus':
network = smp.DeepLabV3Plus
else:
raise NotImplementedError(
f'Unsupported model: {self._model_name}')
self.network = network(
encoder_name=self._encoder_name,
encoder_weights='imagenet',
in_channels=self._input_channels,
classes=self._output_channels,
activation=None,
)
if self._loss_function == 'Combine':
self.loss = CombineLoss()
elif self._loss_function == 'MSE':
self.loss = torch.nn.MSELoss()
else:
raise NotImplementedError(
f'Unsupported loss function: {self._loss_function}')
if self._mask_mode == 'dot':
self.val_f1 = DetectionF1Metric()
self.val_count = CountingMetric(self._output_channels)
elif self._mask_mode == 'count':
self.val_count = CountingMAEMetric(self._output_channels)
self.test_f1_10 = DetectionF1Metric(correct_distance=1.0)
self.test_f1_30 = DetectionF1Metric(correct_distance=3.0)
self.test_f1_50 = DetectionF1Metric(correct_distance=5.0)
self.test_f1_70 = DetectionF1Metric(correct_distance=7.0)
self.test_count = CountingMetric(self._output_channels)
self.save_hyperparameters()
def optimizer_zero_grad(self, epoch, batch_idx, optimizer):
optimizer.zero_grad(set_to_none=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self._model_name == 'v2':
out, out_x2, out_x4 = self.network(x)
out = nn.functional.interpolate(out, size=(self._mask_size[1], self._mask_size[0]), mode='bilinear', align_corners=True)
out_x2 = nn.functional.interpolate(out_x2, size=(self._mask_size[1], self._mask_size[0]), mode='bilinear', align_corners=True)
out_x4 = nn.functional.interpolate(out_x4, size=(self._mask_size[1], self._mask_size[0]), mode='bilinear', align_corners=True)
return out, out_x2, out_x4
return self.network(x)
def calculate_loss(self, y_pred, mask, gt, is_stage=False):
if self._loss_function == 'Combine':
return self.loss(y_pred, self.postprocessing(y_pred, thresh=min(0.1 + (self.current_epoch/25), self._obj_threshold)), mask, gt, is_stage=is_stage)
elif self._loss_function == 'MSE':
return self.loss(y_pred, mask), {}
else:
return self.loss(torch.sigmoid(y_pred), mask), {}
def training_step(self, batch: torch.Tensor, batch_idx: int) -> Optional[torch.Tensor]:
image, mask, gt = batch
if self._model_name == 'v2':
out, out_x2, out_x4 = self.forward(image)
loss_x2, loss_d = self.calculate_loss(out, mask, gt)
loss_x4, _ = self.calculate_loss(out_x2, mask, gt, is_stage=True)
loss_x8, _ = self.calculate_loss(out_x4, mask, gt, is_stage=True)
loss = loss_x2 + loss_x4 * 1/2 + loss_x8 * 1/4
else:
out = self.forward(image)
loss, loss_d = self.calculate_loss(out, mask, gt)
self.log('train_loss', loss, on_step=True,
on_epoch=True, sync_dist=True)
for key, value in loss_d.items():
self.log(f'train_{key}', value,
on_step=True, on_epoch=True, sync_dist=True)
return loss
def validation_step(self, batch: torch.Tensor, batch_idx: int) -> None:
image, mask, gt = batch
if self._model_name == 'v2':
y_pred, _, _ = self.forward(image)
else:
y_pred = self.forward(image)
assert y_pred.shape == mask.shape, f'Predicted shape: {y_pred.shape}, mask shape: {mask.shape}'
if self._mask_mode == 'dot':
predicted_points = self.postprocessing(y_pred)
f1, precission, recall = self.val_f1(predicted_points, gt)
self.log('val_f1', f1, on_step=False,
on_epoch=True, sync_dist=True)
self.log('val_P', precission, on_step=False,
on_epoch=True, sync_dist=True)
self.log('val_R', recall, on_step=False,
on_epoch=True, sync_dist=True)
mae, mae_norm = self.val_count(predicted_points, gt)
elif self._mask_mode == 'count':
mae, mae_norm = self.val_count(y_pred, gt)
self.log('val_mae', mae, on_step=False, on_epoch=True, sync_dist=True)
self.log('val_mae_norm', mae_norm, on_step=False, on_epoch=True, sync_dist=True)
def test_step(self, batch: torch.Tensor, batch_idx: int):
image, mask, gt, _, _ = batch
if self._model_name == 'v2':
y_pred, _, _ = self.forward(image)
else:
y_pred = self.forward(image)
if self._mask_mode == 'dot':
predicted_points = self.postprocessing(y_pred)
for test, correct_distance in zip([self.test_f1_10, self.test_f1_30, self.test_f1_50, self.test_f1_70], [1.0, 3.0, 5.0, 7.0]):
f1, precission, recall = test(predicted_points, gt)
self.log(f'test_f1_{correct_distance}', f1,
on_step=False, on_epoch=True, sync_dist=True)
self.log(f'test_P_{correct_distance}', precission,
on_step=False, on_epoch=True, sync_dist=True)
self.log(f'test_R_{correct_distance}', recall,
on_step=False, on_epoch=True, sync_dist=True)
mae, mae_norm = self.val_count(predicted_points, gt)
elif self._mask_mode == 'count':
mae, mae_norm = self.val_count(y_pred, gt)
self.log(f'test_mae', mae, on_step=False,
on_epoch=True, sync_dist=True)
self.log(f'test_mae_norm', mae_norm, on_step=False,
on_epoch=True, sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters(), lr=self._lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=self._train_steps//2,
)
schedule = {
'scheduler': scheduler,
'interval': 'step',
}
return [optimizer], [schedule]
def postprocessing(self, y_pred_raw: torch.Tensor, thresh: float = None) -> torch.Tensor:
y_pred = torch.sigmoid(y_pred_raw)
y_pred = self._nms(y_pred)
return_values = []
thresh = self._obj_threshold if thresh is None else thresh
for batch_id in range(y_pred.shape[0]):
pred_b = y_pred[batch_id]
predictions = []
for class_id in range(pred_b.shape[0]):
yx = torch.argwhere(pred_b[class_id] > thresh)
if yx.shape[0] > 0:
predictions.append(
torch.cat([
torch.full((yx.shape[0], 1), class_id, dtype=torch.int32).to(
y_pred.device),
yx[:, [1, 0]],
y_pred_raw[batch_id][class_id][yx[:, 0],
yx[:, 1]][:, None],
], 1),
)
if len(predictions) > 0:
predictions = torch.cat(predictions, dim=0)
else:
predictions = torch.zeros(
(0, 4), dtype=torch.float32).to(y_pred.device)
return_values.append(predictions)
return return_values
@staticmethod
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = torch.nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
| PUTvision/UAV-DOT-DETECT | src/model/regressor.py | regressor.py | py | 10,103 | python | en | code | 0 | github-code | 13 |
44466297211 | from binance.client import Client
import json
from datetime import datetime, timedelta, time
import time
import schedule
#Binance apì keys
api_key= ''
api_secret= ''
client = Client(api_key, api_secret)
# Symbol you wish to trade and price
symbol = 'BNBUSDT'
price = client.get_avg_price(symbol=symbol)
# initialize bot lol
print('Copernico TWAP BOT initializing...')
time.sleep(1)
print('{} Price: {}'.format(symbol, price['price']))
## TWAP BUY
# HOW MUCH OF THE ASSET YOU WISH TO BUY
quantity = 0.04
def job():
t = time.localtime()
current_time = time.strftime("%D:%H:%M:%S", t)
print(current_time)
order_buy = client.create_order(symbol=symbol, side=Client.SIDE_BUY, type=Client.ORDER_TYPE_MARKET, quantity=quantity)
print(order_buy)
# Schedule a job to run for the next 24 hours
schedule.every(1).hours.until(timedelta(hours=24)).do(job)
all_jobs = schedule.get_jobs()
print(all_jobs)
while True:
schedule.run_pending()
time.sleep(1)
| PixelNoob/copernico | twap.py | twap.py | py | 975 | python | en | code | 0 | github-code | 13 |
41998448364 | class TicTacToe:
def __init__(self):
self.numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.divider = '_+_+_'
self.playerX = 'x'
self.playerO = 'o'
self.prompt = ' turn to chose a square (1-9): '
self.input = ''
self.turns = 0
def clear_input(self):
self.input = ''
def is_input_valid(self):
try:
if 1 > int(self.input) > 9:
return False
else:
return True
except ValueError:
return False
def get_user_input(self, turn):
if turn == 'x':
self.input = input(f'{self.playerX}\'s{self.prompt}')
else:
self.input = input(f'{self.playerO}\'s{self.prompt}')
def display_game_interface(self):
print(f'{self.numbers[0]}|{self.numbers[1]}|{self.numbers[2]}')
print(self.divider)
print(f'{self.numbers[3]}|{self.numbers[4]}|{self.numbers[5]}')
print(self.divider)
print(f'{self.numbers[6]}|{self.numbers[7]}|{self.numbers[8]}')
def start_game(self):
while self.turns < 9:
turn = 'x' if self.turns % 2 == 0 else 'o'
self.display_game_interface()
self.get_user_input(turn)
if self.is_input_valid():
i = int(self.input)
self.numbers[i - 1] = turn
self.turns = self.turns + 1
else:
print(f'value {self.input} is not valid, try again')
if __name__ == '__main__':
game = TicTacToe()
game.start_game()
| vitorbarros/byu-cse210 | w01/prove_developer_solo_code_submission.py | prove_developer_solo_code_submission.py | py | 1,580 | python | en | code | 0 | github-code | 13 |
17387250812 | import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', 9999))
try :
while True :
data, addr = sock.recvfrom(1000)
data = "Hello "+data
sock.sendto(data, addr)
except KeyboardInterrupt :
sock.close()
| bhawiyuga/sister2016 | ipc/udp_simple_server.py | udp_simple_server.py | py | 241 | python | en | code | 0 | github-code | 13 |
74176772179 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 15 09:49:22 2022
@author: gregz
"""
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import os.path as op
from astropy.convolution import convolve, Gaussian2DKernel, Gaussian1DKernel
from astropy.table import Table
from scipy.interpolate import interp1d, griddata
from astropy.modeling.models import Gaussian2D
from astropy.modeling.fitting import LevMarLSQFitter
from sklearn.decomposition import PCA
import seaborn as sns
_lines = [3726.1, 3729.1, 3889., 4101.76, 4340.5, 4363.2, 4471., 4861.3,
4958.9, 5006.8, 5875.7, 6300.3, 6312.1, 6548., 6562.8, 6583.4,
6678., 6716.5, 6730.8]
# Plotting style to mended depending on necessity
sns.set_context('poster')
sns.set_style('ticks')
plt.rcParams["font.family"] = "Times New Roman"
def get_continuum(y, sel, bins=25):
yz = y * 1.
yz[sel] = np.nan
x = np.array(np.arange(len(y)), dtype=float)
xc = np.array([np.nanmean(xi) for xi in np.array_split(x, bins)])
yc = np.array([np.nanmedian(xi) for xi in np.array_split(y, bins)])
sel = np.isfinite(yc)
I = interp1d(xc[sel], yc[sel], kind='linear', bounds_error=False,
fill_value='extrapolate')
return I(x)
def pca_fit(H, data, sel):
sel = sel * np.isfinite(data)
sol = np.linalg.lstsq(H.T[sel], data[sel])[0]
res = np.dot(H.T, sol)
return res
def get_sky_pixels(sky, init_sel):
for j in np.arange(2):
cont = get_continuum(sky, init_sel, bins=15)
mad = np.nanmedian(np.abs(sky-cont - np.nanmedian(sky-cont)))
mask = sky-cont > 5. * mad
for i in np.arange(1, 6):
mask[i:] += mask[:-i]
mask[:-i] += mask[i:]
init_sel += mask
return mask
def make_new_cube(cube, xgrid, ygrid, xc, yc, size=7):
S = np.zeros((xgrid.shape[0]*xgrid.shape[1], 2))
S[:, 0] = xgrid.ravel()-xc
S[:, 1] = ygrid.ravel()-yc
xg, yg = np.meshgrid(np.linspace(-size, size, size*2*4+1),
np.linspace(-size, size, size*2*4+1))
newcube = np.ones((cube.shape[0], size*2*4+1, size*2*4+1))*np.nan
for i in np.arange(cube.shape[0]):
sel = np.isfinite(cube[i].ravel())
if sel.sum()>20:
newcube[i] = griddata(S[sel], cube[i].ravel()[sel], (xg, yg),
method='linear')
return newcube, xg, yg
def get_data_and_grid(filename, xc, yc, sky=False):
k = fits.open(filename)
x = np.arange(k[0].header['NAXIS1'])*k[0].header['CDELT1'] + k[0].header['CRVAL1']
y = np.arange(k[0].header['NAXIS2'])*k[0].header['CDELT2'] + k[0].header['CRVAL2']
w = np.arange(k[0].header['NAXIS3'])*k[0].header['CDELT3'] + k[0].header['CRVAL3']
xgrid, ygrid = np.meshgrid(x, y)
if sky:
k2 = fits.open(filename.replace('_cube.fits', '_sky_cube.fits'))
data = k[0].data + k2[0].data
else:
data = k[0].data
k2 = fits.open(filename.replace('_cube.fits', '_error_cube.fits'))
datae = k2[0].data
return w, xgrid, ygrid, data, datae, k[0].header
def write_cube(wave, xgrid, ygrid, Dcube, outname, he):
'''
Write data cube to fits file
Parameters
----------
wave : 1d numpy array
Wavelength for data cube
xgrid : 2d numpy array
x-coordinates for data cube
ygrid : 2d numpy array
y-coordinates for data cube
Dcube : 3d numpy array
Data cube, corrected for ADR
outname : str
Name of the outputted fits file
he : object
hdu header object to carry original header information
'''
hdu = fits.PrimaryHDU(np.array(Dcube, dtype='float32'))
hdu.header['CRVAL1'] = xgrid[0, 0]
hdu.header['CRVAL2'] = ygrid[0, 0]
hdu.header['CRVAL3'] = wave[0]
hdu.header['CRPIX1'] = 1
hdu.header['CRPIX2'] = 1
hdu.header['CRPIX3'] = 1
hdu.header['CTYPE1'] = 'pixel'
hdu.header['CTYPE2'] = 'pixel'
hdu.header['CTYPE3'] = 'pixel'
hdu.header['CDELT1'] = xgrid[0, 1] - xgrid[0, 0]
hdu.header['CDELT2'] = ygrid[1, 0] - ygrid[0, 0]
hdu.header['CDELT3'] = wave[1] - wave[0]
for key in he.keys():
if key in hdu.header:
continue
if ('CCDSEC' in key) or ('DATASEC' in key):
continue
if ('BSCALE' in key) or ('BZERO' in key):
continue
hdu.header[key] = he[key]
hdu.writeto(outname, overwrite=True)
base = '/Users/gregz/cure/Remedy/for_ashley/'
bfile = op.join(base, 'SN2022erw_20220314_LRS2B_cube.fits')
rfile = op.join(base, '2022gnp_20220411_LRS2R_cube.fits')
objname = '2022gnp_20220411'
#posb = (-2.00, -2.5)
posr = (0.25, -0.75)
#posr = (-1.6, -0.8)
#posbh = (-3.75, -0.25)
posrh = (-3.75, -0.25)
#posrh = (-3.35, 1.45)
radius = 2.0
redshift = 0.01086
allwave = np.arange(3650., 10500.7, 0.7)
allwave = np.arange(6450., 10500.7, 0.7)
spec, SKY, BACK, error = ([], [], [], [])
for filename, pos, posh in zip([rfile], [posr], [posrh]):
xc, yc = pos
xc2, yc2 = posh
wave, xgrid, ygrid, data, datae, header = get_data_and_grid(filename, xc, yc,
sky=True)
orign = np.zeros(wave.shape, dtype=bool)
for line in _lines:
orign[np.abs(wave-line*(1+redshift))<10.] = True
uvmask = np.abs(wave-3736.0) < 1.6
data[uvmask] = np.nan
datae[uvmask] = np.nan
d = np.sqrt((xgrid-xc)**2 + (ygrid-yc)**2)
d2 = np.sqrt((xgrid-xc2)**2 + (ygrid-yc2)**2)
skysel = (d > 4.) * (xgrid > -1.) * (np.isfinite(data).sum(axis=0) > 3000.)
detwave = (1+redshift) * 6563.
wsel = np.abs(wave-detwave) < 5.
image = np.nanmean(data[wsel], axis=0)
skysel = (np.isfinite(data).sum(axis=0) > 3800.)* (image < np.nanpercentile(image, 25))
plt.figure()
plt.imshow(image, origin='lower')
plt.imshow(skysel, origin='lower', alpha=0.5)
plt.show()
sky = np.nanmedian(data[:, skysel], axis=1)
sky[wave<4650.] = sky[wave<4650]*0.92
skydata = data * 1.
skydata[:] = sky[:, np.newaxis, np.newaxis]
skydata[np.isnan(data)] = np.nan
data = data - sky[:, np.newaxis, np.newaxis]
# New smoothed/interpolated spectra
for i in np.arange(data.shape[1]):
for j in np.arange(data.shape[2]):
sel = np.isnan(data[:, i, j])
if (~sel).sum() > 100.:
data[:, i, j] = np.interp(wave, wave[~sel], data[~sel, i, j], left=np.nan,
right=np.nan)
datae[:, i, j] = np.interp(wave, wave[~sel], datae[~sel, i, j], left=np.nan,
right=np.nan)
datae[sel, i, j] = datae[sel, i, j]*1.5
cont_cube = data * np.nan
for i in np.arange(data.shape[1]):
for j in np.arange(data.shape[2]):
if np.isfinite(data[:,i,j]).sum() > 1000.:
fsel = np.isnan(data[:, i, j])
cont_cube[:, i, j] = get_continuum(data[:, i, j], orign, bins=100)
cont_cube[fsel, i, j] = np.nan
cont_sub = data - cont_cube
cont_sub[-1] = 0.0
objsel = np.isfinite(cont_sub).sum(axis=0) == len(cont_sub)
objsel = objsel * (d > radius)
# Fit PCA Model
pcawave = np.ones(wave.shape, dtype=bool)
pca = PCA(n_components=5).fit(cont_sub[pcawave][:, objsel].T)
Hk = pca.components_
em_model = data * 0.
# Fit residuals for PCA eigenvalues and subtract model
for i in np.arange(data.shape[1]):
for j in np.arange(data.shape[2]):
yp = cont_sub[:, i, j]
res = pca_fit(Hk, yp[pcawave],
np.ones((pcawave.sum(),), dtype=bool))
ycopy = np.nan * yp
ycopy[pcawave] = res
ycopy[np.isnan(yp)] = np.nan
em_model[:, i, j] = ycopy
background = cont_cube * 0.
pixsize = xgrid[0, 1] - xgrid[0, 0]
for i in np.arange(cont_cube.shape[0]):
image = cont_cube[i] * 1.
nanmask = np.isnan(image)
mask = d <= radius
image[mask] = np.nan
smooth = convolve(image, Gaussian2DKernel(radius*1.0/2.35/pixsize),
boundary='fill', fill_value=0.0)
smooth[nanmask] = np.nan
background[i] = smooth
outname = op.join(base, '%s_sky.fits' % op.basename(filename).split('_cube')[0])
write_cube(wave, xgrid, ygrid, skydata, outname, header)
outname = op.join(base, '%s_pca.fits' % op.basename(filename).split('_cube')[0])
write_cube(wave, xgrid, ygrid, em_model, outname, header)
outname = op.join(base, '%s_smooth.fits' % op.basename(filename).split('_cube')[0])
write_cube(wave, xgrid, ygrid, background, outname, header)
outname = op.join(base, '%s_sub.fits' % op.basename(filename).split('_cube')[0])
sub = data-background-em_model
back = background + em_model
write_cube(wave, xgrid, ygrid, sub, outname, header)
fitter = LevMarLSQFitter()
image = np.nanmedian(sub[100:300], axis=0) * 1e18
sel = (d < 3.) * np.isfinite(image)
G = Gaussian2D(amplitude=np.nanmax(image), x_mean=xc, y_mean=yc)
fit = fitter(G, xgrid[sel], ygrid[sel], image[sel])
for n in fit.param_names:
print('%s: %0.2f' % (n, getattr(fit, n).value))
W = fit(xgrid, ygrid)
W = W / W.sum()
ispec = np.zeros((sub.shape[0],))
iback = np.zeros((sub.shape[0],))
isky = np.zeros((sub.shape[0],))
ierror = np.zeros((sub.shape[0],))
for i in np.arange(sub.shape[0]):
dsel = (d<=radius) * np.isfinite(sub[i])
d2sel = (d2<=radius) * np.isfinite(sub[i])
cor = W[dsel].sum()
ispec[i] = (np.nansum(sub[i, dsel] * W[dsel]) /
np.sum(W[dsel]**2)) / cor
isky[i] = np.nansum(skydata[i, dsel])
iback[i] = np.nansum(back[i, d2sel])
ierror[i] = (np.sqrt(np.nansum(datae[i, dsel]**2 *
W[dsel]**2)) /
np.sum(W[dsel]**2)) / cor
spec.append(np.interp(allwave, wave, ispec, left=np.nan, right=np.nan))
error.append(np.interp(allwave, wave, ierror, left=np.nan, right=np.nan))
SKY.append(np.interp(allwave, wave, isky, left=np.nan, right=np.nan))
BACK.append(np.interp(allwave, wave, iback, left=np.nan, right=np.nan))
# overlap = np.isfinite(spec[0]) * np.isfinite(spec[1])
# norm = np.nanmedian(spec[0][overlap] / spec[1][overlap])
# normback = np.nanmedian(BACK[0][overlap] / BACK[1][overlap])
# normsky = np.nanmedian(SKY[0][overlap] / SKY[1][overlap])
# G = Gaussian1DKernel(2.5)
# spec[1] = convolve(spec[1], G) * norm
# error[1] = error[1] * norm
# SKY[1] = convolve(SKY[1], G) * normsky
# BACK[1] = convolve(BACK[1], G) * normback
# avg = np.nanmean(spec, axis=0)
# sky = np.nanmean(SKY, axis=0)
# avgerror = np.nanmean(error, axis=0)
# back = np.nanmean(BACK, axis=0)
avg = spec[0]
sky = SKY[0]
back = BACK[0]
avgerror = error[0]
T = Table([allwave, avg, avgerror, back, sky], names=['wavelength', 'f_lam', 'e_lam', 'b_lam', 's_lam'])
T.write(op.join(base, '%s_spec.dat' % objname), format='ascii.fixed_width_two_line')
wran = [[3650, 10500], [3650, 3900], [3900, 5200], [6600, 6950],
[8000, 10000]]
wran = [[6450, 10500], [6600, 6950], [7300, 7800], [7900, 8800],
[8800, 9500]]
fig, ax = plt.subplots(5, 1, figsize=(20, 10))
ax[0].set_position([0.1, 0.53, 0.86, 0.42])
ax[1].set_position([0.1, 0.08, 0.19, 0.42])
ax[2].set_position([0.323, 0.08, 0.19, 0.42])
ax[3].set_position([0.546, 0.08, 0.19, 0.42])
ax[4].set_position([0.77, 0.08, 0.19, 0.42])
for i, wr in enumerate(wran):
wsel = (allwave>wr[0]) * (allwave<wr[1])
ax[i].plot(allwave[wsel], sky[wsel]*1e17, color='olive', lw=0.5, alpha=0.5, label='Sky Model')
ax[i].plot(allwave[wsel], back[wsel]*1e17, color='salmon', lw=0.5, label='Background Galaxy + HII region + sky residuals')
ax[i].plot(allwave[wsel], avg[wsel]*1e17, color='k', lw=0.5, label='Target')
f_ax10 = ax[i]
f_ax10.tick_params(axis='both', which='both', direction='in')
f_ax10.tick_params(axis='y', which='both', left=True, right=True)
f_ax10.tick_params(axis='x', which='both', bottom=True, top=True)
f_ax10.tick_params(axis='both', which='major', length=8, width=2)
f_ax10.tick_params(axis='both', which='minor', length=5, width=1)
f_ax10.minorticks_on()
ax[i].set_xlim([wr[0], wr[1]])
ax[i].set_ylim([-5, 75])
ax[0].legend()
ax[0].set_ylabel(r'F$_{\lambda}$ (10$^{-17}$erg s$^{-1}$ cm$^{-1}$ ${\AA}^{-1}$)', labelpad=10, fontsize=22)
plt.savefig(op.join(base, '%s_plot.png' % objname), dpi=150)
| grzeimann/Panacea | sn_reduction_example.py | sn_reduction_example.py | py | 12,577 | python | en | code | 8 | github-code | 13 |
72752780177 | """
SJTU-AU333-数字图像处理-作业2边缘检测
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
from numpy import *
import os
from Smoothing import medianSmooth
# 支持中文标题
plt.rcParams['font.sans-serif']=['SimHei'] #显示中文标签
plt.rcParams['axes.unicode_minus']=False #这两行需要手动设置
# 输入输出目录与算子列表
srcpath = './MSGaussRes'
respath = './EdgeDetectRes'
bipath = './BiRes'
opeLst = ['sobel', 'lap', 'LoG', 'canny']
def edgeDetect(filename, ope):
"""
使用不同的算子进行边缘检测,包括sobel、Lapalcian、高斯-Laplacian(Marr算子)、Canny算子
算子尺寸都选择3
:param img: 图片数据,numpy,mat
:param ope: 使用的算子,可选sobel, lap, LoG, canny
:return: 边缘矩阵、算子名称
"""
if ope not in opeLst:
raise ValueError('算子参数错误,尝试使用\'sobel\', \'lap\', \'LoG\', \'canny\'')
# 读取文件
file = os.path.join(srcpath, filename)
filehead = os.path.splitext(filename)[0]
filetail = os.path.splitext(filename)[1]
img = cv2.imread(file)
res = None
if ope == 'sobel':
dx = cv2.Sobel(img, cv2.CV_16S, 1, 0, ksize=3)
dy = cv2.Sobel(img, cv2.CV_16S, 0, 1, ksize=3)
dx = cv2.convertScaleAbs(dx)
dy = cv2.convertScaleAbs(dy)
res = cv2.addWeighted(dx, 0.5, dy, 0.5, 0)
elif ope == 'lap':
res = cv2.Laplacian(img, cv2.CV_16S, ksize=3)
res = cv2.convertScaleAbs(res)
elif ope == 'LoG':
res = cv2.GaussianBlur(img, (3, 3), 0)
res = cv2.Laplacian(res, cv2.CV_16S, ksize=3)
res = cv2.convertScaleAbs(res)
elif ope == 'canny':
res = cv2.Canny(img, 40, 110)
outfilehead = filehead + '-' + ope
cv2.imshow(outfilehead, res)
cv2.waitKey(100)
cv2.imwrite(os.path.join(respath, outfilehead + filetail), res)
return res, ope
def edgeDetectNoWrite(img, ope):
"""
使用不同的算子进行边缘检测,包括sobel、Lapalcian、高斯-Laplacian(Marr算子)、Canny算子
算子尺寸都选择3
:param img: 图片数据,numpy,mat
:param ope: 使用的算子,可选sobel, lap, LoG, canny
:return: 边缘矩阵、算子名称
"""
if ope not in opeLst:
raise ValueError('算子参数错误,尝试使用\'sobel\', \'lap\', \'LoG\', \'canny\'')
res = None
if ope == 'sobel':
dx = cv2.Sobel(img, cv2.CV_16S, 1, 0, ksize=3)
dy = cv2.Sobel(img, cv2.CV_16S, 0, 1, ksize=3)
dx = cv2.convertScaleAbs(dx)
dy = cv2.convertScaleAbs(dy)
res = cv2.addWeighted(dx, 0.5, dy, 0.5, 0)
elif ope == 'lap':
res = cv2.Laplacian(img, cv2.CV_16S, ksize=3)
res = cv2.convertScaleAbs(res)
elif ope == 'LoG':
res = cv2.GaussianBlur(img, (3, 3), 0)
res = cv2.Laplacian(res, cv2.CV_16S, ksize=3)
res = cv2.convertScaleAbs(res)
elif ope == 'canny':
res = cv2.Canny(img, 40, 110)
return res
def binaryzation(filename, threshold):
"""
使用给定的阈值进行二值化
:param img: 图像数据
:param threshold: 阈值
:return: 二值化图像,与原图像同尺寸
"""
# 读取文件
file = os.path.join(respath, filename)
img = cv2.imread(file)
biGph = img.copy()
biGph[biGph >= threshold] = 255
biGph[biGph < threshold] = 0
cv2.imshow(filename, biGph)
cv2.waitKey(100)
cv2.imwrite(os.path.join(bipath, filename), biGph)
return biGph
if __name__ == '__main__':
# print('Hello world')
#
# # 所有算子测试
# for ope in opeLst:
# for filename in os.listdir(srcpath):
# res, op = edgeDetect(filename, ope)
# cv2.destroyAllWindows()
#
# # 所有算子测试结果二值化
# for ope in opeLst:
# for filename in os.listdir(respath):
# bires = binaryzation(filename, 30)
# cv2.destroyAllWindows()
# 在sobel、Canny算子中选用不同的梯度阈值测试
fname = os.path.join(respath, 'gn(5, 5)-sobel.jpg')
img = cv2.imread(fname)
thLst = [40, 70, 100, 130, 160]
lowThLst = [10, 40, 70, 40, 40]
highLst = [110, 110, 110, 80, 140]
# # sobel不同阈值
# for th in thLst:
# tmp = img.copy()
# tmp[tmp >= th] = 255
# tmp[tmp < th] = 0
# cv2.imshow(fname, tmp)
# cv2.waitKey(0)
# cv2.imwrite('./MThRes/' + str(th) + '-gn(5, 5)-sobel.jpg', tmp)
# canny不同阈值
# fname = os.path.join(srcpath, 'gn(5, 5).jpg')
# img = cv2.imread(fname)
# for i in range(5):
# low = lowThLst[i]
# high = highLst[i]
# res = cv2.Canny(img, low, high)
# cv2.imshow(fname, res)
# cv2.waitKey(0)
# cv2.imwrite('./MThRes/' + str(low) + '-' + str(high) + '-gn(5, 5)-sobel.jpg', res)
# 噪声影响测试
f1 = './EdgeDetect/noise-no.bmp'
f2 = './EdgeDetect/noise-salt.jpg'
img1 = cv2.imread(f1)
img2 = cv2.imread(f2)
# res1 = edgeDetectNoWrite(img1, 'sobel')
# res2 = edgeDetectNoWrite(img1, 'lap')
# res3 = edgeDetectNoWrite(img2, 'sobel')
# res4 = edgeDetectNoWrite(img2, 'lap')
# cv2.imwrite('./NoiseRes/' + 'sobel-' + 'noise-no.bmp', res1)
# cv2.imwrite('./NoiseRes/' + 'lap-' + 'noise-no.bmp', res2)
# cv2.imwrite('./NoiseRes/' + 'sobel-' + 'noise-salt.bmp', res3)
# cv2.imwrite('./NoiseRes/' + 'lap-' + 'noise-salt.bmp', res4)
# 中值平滑+canny解决椒盐噪声
res5 = medianSmooth(img2, (7, 7))
res5 = edgeDetectNoWrite(res5, 'canny')
cv2.imwrite('./NoiseRes/' + 'median+canny-' + 'noise-salt.bmp', res5)
| hahhforest/SmallProjects | DigitalImageProcessing/边缘检测/EdgeDetect.py | EdgeDetect.py | py | 5,706 | python | en | code | 0 | github-code | 13 |
41633358786 | """import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import namedtuple
n_groups = 3
means_alg = (20, 35, 30, 35, 27)
std_men = (2, 3, 4, 1, 2)
means_women = (25, 32, 34, 20, 25)
std_women = (3, 5, 2, 3, 3)
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = ax.bar(index, means, bar_width,
alpha=opacity, color='b',
yerr=std_men, error_kw=error_config,
label='Men')
rects2 = ax.bar(index + bar_width, means_women, bar_width,
alpha=opacity, color='r',
yerr=std_women, error_kw=error_config,
label='Women')
ax.set_xlabel('Group')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(('Luxemburg', 'Cologne', 'New York'))
#ax.legend()
fig.tight_layout()
plt.show()
"""
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 13})
plt.rc('legend', fontsize=12.5)
plt.rc('xtick', labelsize=12)
plt.rc('ytick', labelsize=11.5)
plt.rc('axes', titlesize=11)
plt.rc('axes', labelsize=16.8)
N = 4
'''
means_infected_low = (0.9128-(305.71/787), 0.9542-(143.53/220), 0.9405-(155.61/433))
means_frw_low = (355.48/790, 143.53/220, 155.61/433)
means_infected = (0.9917-(355.48/790), 0.9463-(222.75/436), 0.9547-(118.63/600))
means_frw = (305.71/787, 222.75/436, 118.63/600)
'''
''' FULL RESULTS LUXEMBURG
x = np.asarray((100,10,5,3,2,1))
frw = np.asarray((330.136, 345.506, 356.647, 357.763, 362.719, 384.708)) / cnum
recv = np.asarray((0.9323, 0.934, 0.9467, 0.9365, 0.9432, 0.9494)) - frw
'''
cnum_lux = 790 #cars number lux
x_lux = np.asarray((100,10,5,1))
frw_lux = np.asarray((330.136, 345.506, 356.647, 384.708))*0.55 / cnum_lux
recv_lux = np.asarray((0.9323, 0.934, 0.9467, 0.9494)) - frw_lux
x_lux, frw_lux, recv_lux = np.flip(x_lux), np.flip(frw_lux), np.flip(recv_lux)
cnum_ny = 600 #cars number ny
x_ny = np.asarray((100,10,5,1))
frw_ny = np.asarray((94.32, 96.06, 103.15, 134.31)) / cnum_ny
recv_ny = np.asarray((0.9266, 0.9429, 0.9244, 0.974)) - frw_ny
x_ny, frw_ny, recv_ny = np.flip(x_ny), np.flip(frw_ny), np.flip(recv_ny)
ind = np.arange(N) # the x locations for the groups
width = 0.37 # the width of the bars: can also be len(x) sequence
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
gap = 0.07
p1 = ax1.bar(ind, frw_lux, width, color='b', edgecolor='k')
p2 = ax1.bar(ind, recv_lux, width,
bottom=frw_lux, color='#ffa500', edgecolor='k')
p3 = ax2.bar(ind, frw_ny, width, color='b', edgecolor='k')
p4 = ax2.bar(ind, recv_ny, width,
bottom=frw_ny, color='#ffa500', edgecolor='k')
#p1 = ax.plot(x, frw)
#p1 = ax.plot(x, recv)
'''
# Shrink current axis's height by 10% on the bottom
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
# Put a legend below current axis
#ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
# fancybox=True, shadow=True, ncol=5)
plt.ylabel('Nodes (%)')
plt.title('')
plt.xticks(ind+width+gap, ('Luxemburg', 'Cologne', 'New York'))
plt.yticks(np.arange(0, 1.1, 0.1))
plt.legend((p2[0], p4[0], p3[0]), ('Receivers high density', 'Receivers low density', 'Forwarders'),
loc='upper center', bbox_to_anchor=(0.5, -0.08),
fancybox=True, shadow=True, ncol=5)
'''
#plt.ylabel('Nodes (%)')
plt.title('New York')
plt.xticks(ind, ('1', '5', '10', 'Unlimited'))
plt.yticks(np.arange(0, 1.1, 0.1))
plt.sca(ax1)
plt.ylabel('Nodes (%)')
plt.title('Luxembourg')
plt.xticks(ind, ('1', '5', '10', 'Unlimited'))
plt.yticks(np.arange(0, 1.1, 0.1))
plt.ylim((0.0, 1.0))
fig.text(0.5, 0.019, 'Emitters Length Limit', ha='center', va='center')
ax1.legend((p1[0], p2[0]), ('Relay', 'EPIC'), loc='lower left', fancybox=True)
plt.tight_layout()
plt.gcf().subplots_adjust(bottom=0.1)
#plt.show()
plt.savefig('grafici/top_car/emitters.png', dpi=300) | MrSpadala/Epic_VANET | grafici/top_car/graph-TC-emitters.py | graph-TC-emitters.py | py | 4,027 | python | en | code | 2 | github-code | 13 |
12653168420 | from matplotlib import pyplot as plt
from torchvision import transforms
from PyTorch_2.Faces import Faces_dataset
from PyTorch_2.Faces.Faces_show_landmarks import show_landmarks
from PyTorch_2.Faces.models.Transform import Rescale, RandomCrop
# 调用torchvision.transforms.Compose实现图片大小、数据格式转换
# 把图像的短边调整为256
scale = Rescale(256)
# 随机裁剪为224大小的正方形
crop = RandomCrop(128)
composed = transforms.Compose([Rescale(256), RandomCrop(224)])
# ---在一个样本上应用上述的每个变换---
plt.figure()
face_dataset = Faces_dataset.FaceLandmarksDataset(csv_file='F:/faces/face_landmarks.csv', root_dir='F:/faces/')
sample = face_dataset[39]
for i, tsform in enumerate([scale, crop, composed]):
transformed_sample = tsform(sample)
ax = plt.subplot(1, 3, i + 1) # 在一个图上排列3个子图
plt.tight_layout()
ax.set_title(type(tsform).__name__)
show_landmarks(**transformed_sample)
plt.show() | Darling1116/Greeting_1116 | PyTorch_2/Faces/Faces_transform_test1.py | Faces_transform_test1.py | py | 986 | python | en | code | 0 | github-code | 13 |
12762850396 | '''APARTMENT BUILDING ADMINISTRATOR undolist.append(deepcopy(l))'''
def testInit(expenseslist): #10 items in the list at the begining of the execution
expenseslist.append((1, "gas", 100))
expenseslist.append((2, "water", 80))
expenseslist.append((2, "heating", 200))
expenseslist.append((2, "electricity", 170))
expenseslist.append((5, "gas", 225))
expenseslist.append((6, "water", 400))
expenseslist.append((7, "electricity", 855))
expenseslist.append((8, "heating", 90))
expenseslist.append((9, "water", 432))
expenseslist.append((10, "gas", 184))
#print(expenseslist)
def printmenu(): #prints the menu
print("1.Add apartment")
print("2.Remove apartment")
print("3.Remove apartments from a to b")
print("4.Remove apartment type")
print("5.Replace apartment type expense with")
print("6.List")
print("7.List expenses for apartment:")
print("8.List expenses <,=,> than the value:")
print("9.Sum the expenses for the type:")
print("10.Write the maximum amount per each expense type for apartment:")
print("11.Sort apartments by total expenses")
print("12.Sort the types of expenses by amountS")
print("13.Keep only expenses for type:")
print("14.Keep only expenses < than the value:")
print("15.Undo the last operation")
def help_command(): #prints the possible comands that the user can give
print("\t These are the valid commands:")
print("\t add <apartment> <type> <amount>")
print("\t remove <apartment>")
print("\t remove <start apartment> to <end apartment>")
print("\t remove <type>")
print("\t replace <apartment> <type> with <amount>")
print("\t list")
print("\t list <apartment>")
print("\t list [ < | = | > ] <amount>")
print("\t sum <type>")
print("\t max <apartment>")
print("\t sort apartment")
print("\t sort type")
print("\t filter <type>")
print("\t filter <value>")
print("\t undo")
def readCommand(cmd): #function that reads the command and slice it by space
if cmd.find(" ") == -1:
command = cmd
'''
Fara parametrii. help sau exit
'''
params = ""
return (command, params)
else:
command = cmd[0:cmd.find(" ")] #extrage comanda, ex add, remove
'''
Cu parametrii
'''
params = cmd[cmd.find(" "):]
params = params.split(" ")
for i in range(1, len(params)):
params[i] = params[i].strip()
i=1
while i<len(params):
if params[i]==' ':
params.pop(i)
i=i-1
i=i+1
if params[1]=='':
raise ValueError("Incomplete command")
else:
return (command, params)
def findById(expenseslist, expense_id,expense_tip): #returns -1 if the id with the correct type is not found and the position of the element otherwise
pos = -1
for i in range(0, len(expenseslist)):
exp = expenseslist[i]
if exp[0] == expense_id:
if exp[1]==expense_tip:
pos = i
break
return pos
def findbyid(expenseslist, expense_id): #returns -1 if the id with the correct type is not found and the position of the element otherwise
pos = -1
for i in range(0, len(expenseslist)):
exp = expenseslist[i]
if exp[0] == expense_id:
pos = i
break
return pos
def addexpense(expenseslist, expense): #function that adds a new expense in expenseslist
#if findById(expenseslist, expense[0],expense[1])== -1: # if there is no expense
expenseslist.append(expense) #with the current id and the current type
def add_command(expenseslist, cmd,undolist): #checks the inputs and call addexpense if it is all right
if (len(cmd)-1)% 3 !=0:
raise ValueError("Invalid input. Expense was not added")
else:
list=expenseslist[:]
undolist.append(list) #UNDO
fin=int(len(cmd)/3)
for i in range(0,fin):
nrap= int(cmd[3*i+1])
amount = int(cmd[3*i+3])
if nrap <= 0 or len(cmd[2]) == 0 or amount<0:
raise ValueError("Invalid input. Expense was not added")
else:
expense=(nrap, cmd[3*i+2], amount)
addexpense(expenseslist, expense)
def listall(expenseslist): #prints the list with all the existant expenses
if len(expenseslist)>0:
for exp in expenseslist:
print("Apartment",exp[0],"type of expense",exp[1],"amount",exp[2])
else:
print("List is empty.")
def listbyapartment(expenseslist,id): #prints all the axpenses of a given apartment
if checkid(expenseslist,id)!=-1:
print("Apartment",id,"has the following expenses:")
for exp in expenseslist:
if exp[0]==id:
print(exp[1],"amount",exp[2])
else:
print("Apartment",id,"does not exist")
def listbyexpenses(expenseslist,sign,money): #prints all the apartment with total expenses <,=,> than a given value(money)
max=maximum(expenseslist)
ok=False
if sign==">":
print("Al the apartments with total expenses",sign,money,":")
for i in range(1,max+1):
results = [t[2] for t in expenseslist if t[0] == i]
s=0
for r in results:
s=s+int(r)
if s> money:
ok=True
print("Apartment",i)
if ok==False:
print("There are no apartments with total expenses",sign,money)
elif sign=="<":
print("Al the apartments with total expenses",sign,money,":")
for i in range(1,max+1):
results = [t[2] for t in expenseslist if t[0] == i]
s=0
for r in results:
s=s+int(r)
if (s< money) and s>0:
ok=True
print("Apartment",i)
if ok==False:
print("There are no apartments with total expenses",sign,money)
elif sign=="=":
print("Al the apartments with total expenses",sign,money,":")
for i in range(1,max+1):
results = [t[2] for t in expenseslist if t[0] == i]
s=0
for r in results:
s=s+int(r)
if s== money:
ok=True
print("Apartment",i)
if ok==False:
print("There are no apartments with total expenses",sign,money)
else:
raise ValueError("Sign is no compatible")
def list_command(expenseslist, cmd): #manages all the posible requirements of list and call the right functions
if len(cmd) < 2:
listall(expenseslist)
elif len(cmd)==2:
nrap=int(cmd[1])
listbyapartment(expenseslist,nrap)
elif len(cmd)==3:
sign=cmd[1]
money=int(cmd[2])
if money>0:
listbyexpenses(expenseslist,sign,money)
else:
print("Money is 0")
else:
raise ValueError("Invalid command")
def checkid(expenseslist, id): #checks if a certain id of apartment belongs to the list
pos=-1
for i in range(0, len(expenseslist)):
exp = expenseslist[i]
if exp[0] == id:
pos=i
break
return pos
def checktype(expenseslist, tip): #checks if a certain type of expense is included in the list
pos=-1
for i in range(0, len(expenseslist)):
exp = expenseslist[i]
if exp[1] == tip:
pos=i
break
return pos
def remove__expenses(expenseslist,id,undolist): #removes all the expenses of the apartment with the given id
if checkid(expenseslist,id)!=-1:
list=expenseslist[:]
undolist.append(list) #UNDO
pos=checkid(expenseslist,id)
while pos!=-1:
expenseslist.pop(pos)
pos=checkid(expenseslist,id)
return True
else:
return False
def remove__expenses_type(expenseslist,tip): #removes all expenses of a certain type
if checktype(expenseslist,tip)!=-1:
pos=checktype(expenseslist,tip)
while pos!=-1:
expenseslist.pop(pos)
pos=checktype(expenseslist,tip)
return True
else:
return False
def remove_expense_fromto(expenseslist,start,stop,undolist): #removes all expenses from apartment start+1, stop-1
i=0
list=expenseslist[:]
undolist.append(list) #UNDO
while i<len(expenseslist):
exp=expenseslist[i]
if (exp[0]>start) and (exp[0]<stop):
expenseslist.pop(i)
i=i-1
i+=1
def remove_command(expenseslist, cmd,undolist): #manages all the remove comands and call the appropiate functions
if len(cmd)>2 and cmd[1].isdigit():
if cmd[2]=='to' :
start=int(cmd[1])
stop=int(cmd[3])
remove_expense_fromto(expenseslist,start,stop,undolist)
elif len(cmd) >= 2 :
x=cmd[1]
if x.isdigit():
nrap= int(cmd[1])
if nrap <= 0:
raise ValueError("Invalid input. Wrong id")
else:
if remove__expenses(expenseslist, nrap,undolist) == False:
print("Inexistent apartment")
else:
list=expenseslist[:]
undolist.append(list) #UNDO
for i in range(1,len(cmd)):
tip=cmd[i]
if remove__expenses_type(expenseslist, tip) == False:
print("Inexistent type of expense")
else:
raise ValueError("Invalid command")
def replace_command(expenseslist,cmd,undolist): #replace the expenses of the given apartment, of a certain tipe with a given value
nrap=int(cmd[1])
tip=cmd[2]
amount=int(cmd[4])
pos=findById(expenseslist,nrap,tip)
if pos!=-1:
list=expenseslist[:]
undolist.append(list) #UNDO
expenseslist[pos]=(nrap,tip,amount)
else:
print("There is no such apartment with such expense")
def sum_command(expenseslist,cmd):
tip=cmd[1]
if checktype(expenseslist,tip)!=-1:
s=0
for exp in expenseslist:
if exp[1]==tip:
s=s+exp[2]
print("The total amount of",tip,"expenses is",s)
else:
raise ValueError("The type is inexistent")
def find(tiplist,expense_type):
pos = -1
for i in range(0, len(tiplist)):
tip = tiplist[i]
if tip[0] == expense_type:
pos = i
break
return pos
def max_command(expenseslist,cmd):
if cmd[1].isdigit():
nrap=int(cmd[1])
if find(expenseslist,nrap)!=-1:
tiplist=[]
print("The maximum amounts per each expense are:")
for exp in expenseslist:
if exp[0]==nrap:
pos=find(tiplist,exp[1])
if pos!=-1:
t=tiplist[pos]
if exp[2]>t[1]:
tiplist[pos]=(exp[1],exp[2])
else:
new=(exp[1],exp[2])
tiplist.append(new)
for tip in tiplist:
print("The maximum amount of",tip[0],"expenses is",tip[1])
raise ValueError("Inexistent apartment")
else:
raise ValueError("You need an integer parameter")
def maximum(expenseslist):
max=0
for exp in expenseslist:
if exp[0]>max:
max=exp[0]
return max
def sort_apartments(expenseslist):
sortlist=[]
max=maximum(expenseslist)
for i in range(1,max+1):
if findbyid(expenseslist,i)!=-1:
results = [t[2] for t in expenseslist if t[0] == i]
s=0
for r in results:
s=s+int(r)
element=(i,s)
sortlist.append(element)
#print(sortlist)
return sortlist
def sort(s):
ok=True
while ok==True:
ok=False
for i in range(0,len(s)-1):
if s[i][1]>s[i+1][1]:
aux=s[i]
s[i]=s[i+1]
s[i+1]=aux
ok=True
return s
def sort_by_type(expenseslist):
sortlist=[]
for exp in expenseslist:
pos=find(sortlist,exp[1])
if pos==-1:
new=(exp[1],exp[2])
sortlist.append(new)
else:
s=sortlist[pos]
new=(s[0],s[1]+exp[2])
sortlist[pos]=new
#print(sortlist)
return sortlist
def sort_command(expenseslist,cmd):
cuv=cmd[1]
sortlist=[]
if cuv=='apartment':
sortlist=sort_apartments(expenseslist)
sortlist=sort(sortlist)
print("The apartments sorted ascending by total expenses are:")
for s in sortlist:
print("Apartment",s[0],"total expenses",s[1])
elif cuv=='type':
sortlist=sort_by_type(expenseslist)
sortlist=sort(sortlist)
print("The types of expenses sorted ascending by total amount are:")
for s in sortlist:
print("Expense",s[0],"total amount",s[1])
else:
raise ValueError("Invalid command!")
def remove(expenseslist,cash):
i=0
while i<len(expenseslist):
exp=expenseslist[i]
if exp[2]>=cash:
expenseslist.pop(i)
i=i-1
i+=1
def remove_type(expenseslist,tip):
if checktype(expenseslist,tip)!=-1:
i=0
while i<len(expenseslist):
exp=expenseslist[i]
if exp[1]!=tip:
expenseslist.pop(i)
i=i-1
i+=1
else:
raise ValueError("Inexistent type")
def filter_command(expenseslist,cmd,undolist):
cuv=cmd[1]
if cuv.isdigit():
cash=int(cuv)
list=expenseslist[:]
undolist.append(list) #UNDO
remove(expenseslist,cash)
elif checktype(expenseslist,cuv)!=-1:
list=expenseslist[:]
undolist.append(list) #UNDO
remove_type(expenseslist,cuv)
else:
raise ValueError("Invalid command")
def undo_command(undolist,expenseslist):
if undolist !=[]:
#print(undolist)
n=len(undolist)-1
list=undolist[n]
expenseslist=[]
expenseslist=list
undolist.pop(n)
return expenseslist
else:
print("There is no operation before that!")
return expenseslist
| danalrds/FP | ddd/functions.py | functions.py | py | 15,070 | python | en | code | 0 | github-code | 13 |
6997733750 | # Indexable skip list
# __getitem__: returns k-th element, 0-indexed
# update: will not be used outside the class
# find: if used outside the class, returns the maximal element up to val
# insert: insert val
# remove: remove val
# iterate: show the structure of the list, use this to debug
class Node:
def __init__(self, height = 0, key = None):
self.key = key
self.adj = [None]*height
self.width = [float('inf')]*height
import random
class IndSkipList:
def __init__(self):
self.head = Node()
def __getitem__(self, idx):
x = self.head; idx+= 1
for i in reversed(range(len(self.head.adj))):
while x.width[i] <= idx and x.adj[i]: idx-= x.width[i]; x = x.adj[i]
return x.key
def update(self, val):
update = [None]*len(self.head.adj)
wd = [0]*len(self.head.adj)
x = self.head
for i in reversed(range(len(self.head.adj))):
while x.adj[i] != None and x.adj[i].key < val:
wd[i]+= x.width[i]; x = x.adj[i]
update[i] = x
return update, wd
def find(self, val, update = None, exact = False):
if not update: update, wd = self.update(val)
if len(update) == 0: return None
cand = update[0].adj[0]
matched = cand and cand.key == val
if exact and matched: return cand
if not exact: return (val, sum(wd)) if matched else (update[0].key, sum(wd)-1)
def insert(self, val):
h, d = 1, 0
while random.random() < 0.5: h+= 1
node = Node(h, val)
while len(self.head.adj) < len(node.adj):
self.head.adj.append(None)
self.head.width.append(float('inf'))
update, wd = self.update(val)
if self.find(val, update, True): raise KeyError
nl = len(node.adj)
for i in range(nl):
node.adj[i] = update[i].adj[i]; node.width[i] = update[i].width[i]-d
update[i].adj[i] = node; update[i].width[i] = d+1; d+= wd[i]
for i in range(nl, len(self.head.adj)): update[i].width[i]+= 1
def remove(self, val):
update, wd = self.update(val)
x = self.find(val, update, True)
if not x: raise KeyError
nl = len(x.adj)
for i in range(nl):
update[i].adj[i] = x.adj[i]; update[i].width[i]+= x.width[i]-1
for i in range(nl, len(self.head.adj)): update[i].width[i]-= 1
def iterate(self):
# use this to debug
x = self.head
while x: print(x.key,
[((x.adj[i].key,x.width[i]) if x.adj[i] else None)
for i in range(len(x.adj))]); x = x.adj[0]
s = IndSkipList()
s.insert(234)
s.insert(2003)
s.insert(3)
s.insert(532)
s.iterate()
for i in range(4): assert s[i] == [3,234,532,2003][i] | hongjun7/PythonAlgorithms | SkipList&Index.py | SkipList&Index.py | py | 2,925 | python | en | code | null | github-code | 13 |
10642385163 | import pyglet
from pyglet.window import key
import ratcave as rc
# Create Window
window = pyglet.window.Window(resizable=True)
keys = key.KeyStateHandler()
window.push_handlers(keys)
def update(dt):
pass
pyglet.clock.schedule(update)
# Insert filename into WavefrontReader.
obj_filename = rc.resources.obj_primitives
obj_reader = rc.WavefrontReader(obj_filename)
# Create Meshes
sun = obj_reader.get_mesh("Sphere", name='sun')
merkury = obj_reader.get_mesh("Sphere", scale =.1, name='merkury')
venus = obj_reader.get_mesh("Sphere", scale =.2, name='venus')
earth = obj_reader.get_mesh("Sphere", scale =.2, name='earth')
mars = obj_reader.get_mesh("Sphere", scale =.2, name='mars')
jupyter = obj_reader.get_mesh("Sphere", scale =.4, name='jupyter')
moon = obj_reader.get_mesh("Sphere", scale =.5, name='moon')
# Create Empty Entities
empty_merkury = rc.EmptyEntity(name='sun_merkury')
empty_venus = rc.EmptyEntity(name='sun_venus')
empty_earth = rc.EmptyEntity(name='sun_earth')
empty_mars = rc.EmptyEntity(name='sun_mars')
empty_jupyter = rc.EmptyEntity(name='sun_jupyter')
# Define Relationships
sun.add_children(empty_merkury, empty_earth, empty_venus, empty_mars, empty_jupyter)
empty_merkury.add_child(merkury)
empty_venus.add_child(venus)
empty_earth.add_child(earth)
empty_mars.add_child(mars)
empty_jupyter.add_child(jupyter)
earth.add_child(moon)
# Define Relative Positions
sun.rotation.x = 50
sun.position.xyz = 0, 0, -12
merkury.position.z += 1
venus.position.z += 2
earth.position.z += 3
mars.position.z += 4
jupyter.position.z += 5
moon.position.z += 1
# Add Texture
sun.textures.append(rc.Texture.from_image(rc.resources.img_colorgrid))
# Create Scene
scene = rc.Scene(meshes=sun, bgColor=(0,0,0))
scene.camera.projection.z_far = 20
@window.event
def on_draw():
sun.rotation.y += 0.5
earth.rotation.y += 0.5
empty_merkury.rotation.y += 2
empty_venus.rotation.y += 1.5
empty_earth.rotation.y += 1
empty_mars.rotation.y += 0.75
empty_jupyter.rotation.y += 0.5
with rc.default_shader:
scene.draw()
pyglet.app.run()
| ratcave/ratcave | examples/solar_system.py | solar_system.py | py | 2,102 | python | en | code | 110 | github-code | 13 |
219611823 | """
import sys
from collections import deque
# sys.stdin = open("input.txt", 'r')
must = input()
n = int(input())
for i in range(1, n + 1):
course = input()
queue = deque()
for x in course:
if x in must and x not in queue:
queue.append(x)
if len(queue) != len(must):
print(f"#{i} NO")
continue
for x in must:
tmp = queue.popleft()
if tmp != x:
print(f"#{i} NO")
break
else:
print(f"#{i} YES")
"""
import sys
from collections import deque
# sys.stdin = open("input.txt", 'r')
must = input()
n = int(input())
for i in range(n):
course = input()
need = deque(must)
for x in course:
if x in need:
if x != need.popleft():
print(f"#{i + 1} NO")
break
else:
if len(need) == 0:
print(f"#{i + 1} YES")
else:
print(f"#{i + 1} NO")
| ignis535/baekjoon | 자료구조(스택, 큐, 해쉬, 힙)/교육과정 설계.py | 교육과정 설계.py | py | 948 | python | en | code | 0 | github-code | 13 |
29204065835 | from math import sqrt
def is_square_pairs(n: int, cuts: list, squares: list) -> bool:
prev = cuts[-1]
if n == len(cuts):
# 全要素が埋まった
if (1 + prev) in squares:
# 最初の一つ(固定 1)と最後の一つの合計がが平方数である
print(f'Count: {n}, Cuts: {cuts}')
return True
else:
# 接続し得る候補の計算
maybe = [x for x in range(1, n + 1) if x not in cuts]
# 候補を順に試していく
for i in maybe:
# 前回の値と今回の値の合計が
if (prev + i) in squares:
if is_square_pairs(n, cuts + [i], squares):
return True
return False
n = 2
while True:
sqrts = [x ** 2 for x in range(2, int(sqrt(n * 2) + 1))]
if is_square_pairs(n, [1], sqrts):
break
n += 1
print(n)
| Sunao-Yoshii/StudyDocs | Books/math_pazzle/18_cut_cake.py | 18_cut_cake.py | py | 901 | python | ja | code | 0 | github-code | 13 |
27302787185 | import re
from converter.markdown.tabular import Tabular
class Tabularx(Tabular):
def __init__(self, latex_str, caret_token):
super().__init__(latex_str, caret_token)
self._table_re = re.compile(r"""\\begin{(?P<block_name>tabularx)}{(?P<settings>.*?)}
(?P<block_contents>.*?)
\\end{(?P=block_name)}""",
flags=re.DOTALL + re.VERBOSE)
| codio/book-converter | converter/markdown/tabularx.py | tabularx.py | py | 465 | python | en | code | 2 | github-code | 13 |
2040469946 | import random
import time
import pygame
import numpy as np
# CONSTANTS
BLUE = (0,0,255)
WHITE = (0, 0, 0)
BACKGROUND = WHITE
FRAME_REFRESH_RATE = 60
DISPLAY_WIDTH = 640
DISPLAY_HEIGHT = 480
STARSHIP_SPEED = 3
max_meteor_speed = 4
INITIAL_NUMBER_OF_METEORS = 10
MAX_NUMBER_OF_CYCLES = 1000
NEW_METEOR_CYCLE_INTERVAL = 20
class Game:
def __init__(self):
print('Initialising PyGame')
pygame.init()
self.display_surface = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))
pygame.display.set_caption("Starship Meteors")
self.clock = pygame.time.Clock()
self.starship = Starship(self)
self.starship.draw()
self.meteors = []
for _ in range(INITIAL_NUMBER_OF_METEORS):
self._new_meteor()
def play(self):
global max_meteor_speed
is_running = True
starship_collided = False
cycle_count = 0
move_x = 0
move_y = 0
while is_running and not starship_collided:
cycle_count += 1
if cycle_count == MAX_NUMBER_OF_CYCLES:
self._display_message("WINNER!")
break
for event in pygame.event.get():
if event.type == pygame.QUIT:
self._display_message("Closing the game")
is_running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
print("Closing the game")
is_running = False
# WASD commands
elif event.key == pygame.K_w or event.key == pygame.K_UP:
move_y = -1
elif event.key == pygame.K_a or event.key == pygame.K_LEFT:
move_x = -1
elif event.key == pygame.K_s or event.key == pygame.K_DOWN:
move_y = 1
elif event.key == pygame.K_d or event.key == pygame.K_RIGHT:
move_x = 1
elif event.key == pygame.K_p:
self._pause()
elif event.type == pygame.KEYUP:
move_x = 0
move_y = 0
keyState = pygame.key.get_pressed()
if keyState[pygame.K_w]:
move_y = -1
elif keyState[pygame.K_a]:
move_x = -1
elif keyState[pygame.K_s]:
move_y = 1
elif keyState[pygame.K_d]:
move_x = 1
self.display_surface.fill(BACKGROUND)
self.starship.move(move_x, move_y)
self.starship.draw()
for meteor in self.meteors:
meteor.move()
meteor.draw()
starship_collided = self._check_collision(meteor)
if starship_collided:
self._display_message("Game Over!")
break
if meteor.is_out():
self.meteors.remove(meteor)
self._new_meteor()
pygame.display.update()
# Determine if new mateors should be added
if cycle_count % NEW_METEOR_CYCLE_INTERVAL == 0:
max_meteor_speed *= 1.01
self.clock.tick(FRAME_REFRESH_RATE)
time.sleep(1)
pygame.quit()
def _check_collision(self, meteor):
return self.starship.rect().colliderect(meteor.rect())
def _pause(self):
self._display_message("Pause...")
paused = True
while paused:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
paused = False
break
def _display_message(self, message):
print(message)
text_font = pygame.font.Font('freesansbold.ttf', 48)
text_surface = text_font.render(message, True, BLUE, WHITE)
text_rectangle = text_surface.get_rect()
text_rectangle.center = (DISPLAY_WIDTH/2, DISPLAY_HEIGHT/2)
self.display_surface.fill(WHITE)
self.display_surface.blit(text_surface, text_rectangle)
pygame.display.update()
def _new_meteor(self):
meteor = Meteor(self)
x = np.random.normal(self.starship.x, DISPLAY_WIDTH/4)
x = np.clip(x, 0, DISPLAY_WIDTH)
y = 10
meteor.set_position(x, y)
self.meteors.append(meteor)
class GameObject():
def __init__(self, game, image_filename=None):
self.game = game
if image_filename is not None:
self.load_image(image_filename)
def set_position(self, x=0, y=0):
self.x = x
self.y = y
def load_image(self, filename):
self.image = pygame.image.load(filename).convert()
self.width = self.image.get_width()
self.height = self.image.get_height()
if self.width > self.height:
self.height = int(30 * self.height/self.width)
self.width = 30
else:
self.width = int(30 * self.width/self.height)
self.height = 30
self.image = pygame.transform.scale(self.image, (self.width, self.height))
def rect(self):
return pygame.Rect(self.x, self.y, self.width, self.height)
def draw(self):
self.game.display_surface.blit(self.image, (self.x, self.y))
def move(self, dx, dy, speed):
self.x += dx * speed
self.y += dy * speed
if self.x < 0:
self.x = 0
elif self.x + self.width > DISPLAY_WIDTH:
self.x = DISPLAY_WIDTH - self.width
if self.y < 0:
self.y = 0
elif self.y + self.height > DISPLAY_HEIGHT:
self.y = DISPLAY_HEIGHT - self.height
class Starship(GameObject):
def __init__(self, game):
super().__init__(game, "images/img.png")
self.set_position(DISPLAY_WIDTH / 2, DISPLAY_HEIGHT - 40)
def move(self, dx, dy):
super().move(dx, dy, STARSHIP_SPEED)
def rect(self):
return pygame.Rect(self.x+10, self.y+10, self.width-10, self.height - 10)
class Meteor(GameObject):
def __init__(self, game):
super().__init__(game, "images/img_1.png")
self.speed = np.random.randint(1, max_meteor_speed)
def move(self):
self.y += self.speed
def is_out(self):
return self.y > DISPLAY_HEIGHT
def main():
print("Starting game")
game = Game()
game.play()
print("Game Over")
if __name__ == '__main__':
main() | s276842/python-playground | pygame/Starship Meteors.py | Starship Meteors.py | py | 6,689 | python | en | code | 0 | github-code | 13 |
40918582748 | #Max Millar
#SoftDev1 pd06
#k25 -- Getting More REST
#2018-11-14
from flask import Flask, render_template
import json
from urllib import request
app = Flask(__name__)
@app.route('/')
def render_test():
data = json.loads((request.urlopen("https://en.wikipedia.org/w/api.php?action=parse&page=Barack_Obama&format=json")).read())
print(data)
print("==================================================================")
print(data["parse"]["text"]["*"])
return render_template("index.html", text=data["parse"]["text"]["*"], title=data["parse"]["title"])
if __name__ == '__main__':
app.debug = True
app.run()
| stuymmillar/SoftDev | 25_rest/app.py | app.py | py | 635 | python | en | code | 0 | github-code | 13 |
5835105101 | import os
import sys
from dataclasses import dataclass
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from exception import CustomException
from logger import logging
from utils import save_object,evaluate_models
@dataclass
class ModelTrainerConfig:
trained_model_file_path=os.path.join("artifacts","model.pkl")
class ModelTrainer:
def __init__(self):
self.model_trainer_config=ModelTrainerConfig()
def initiate_model_trainer(self,train_array,test_array):
try:
logging.info("Split training and test input data")
X_train,y_train,X_test,y_test=(
train_array[:,:-1],
train_array[:,-1],
test_array[:,:-1],
test_array[:,-1]
)
models = {
"Random Forest": RandomForestClassifier(),
}
params={
"Random Forest":{
# 'criterion':['squared_error', 'friedman_mse', 'absolute_error', 'poisson'],
# 'max_features':['sqrt','log2',None],
'n_estimators': [8,16,32,64,128,256,300],
'n_jobs': [-1]
},
}
model_name = "Random Forest"
selected_models = models[model_name]
selected_params = params[model_name]
model_report:dict=evaluate_models(X_train=X_train,y_train=y_train,X_test=X_test,y_test=y_test,
models={model_name: selected_models}, param={model_name: selected_params})
best_params = model_report[model_name]['best_params']
best_model = selected_models.set_params(**best_params) # Instantiate a new model with best parameters
best_model.fit(X_train, y_train)
save_object(
file_path=self.model_trainer_config.trained_model_file_path,
obj=best_model
)
predicted = best_model.predict(X_test)
score = accuracy_score(y_test, predicted)
return score
except Exception as e:
raise CustomException(e, sys) | pras-ops/Bank_Card_Prediction_Project | src/components/model_trainer.py | model_trainer.py | py | 2,310 | python | en | code | 0 | github-code | 13 |
41231002119 | import os
import nextcord
from nextcord.ext import commands
PREFIX = os.environ['PREFIX']
intents = nextcord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=PREFIX, intents=intents)
@bot.event
async def on_ready():
print(f'{bot.user} is online!')
@bot.command()
async def ping(ctx):
await ctx.reply('Pong!')
bot.run(os.environ['TOKEN'])
| uhIgnacio-zz/heroku-example | src/main.py | main.py | py | 381 | python | en | code | 3 | github-code | 13 |
37451127020 | #defining data
#x colour is V-z k-corrected to z=0.9
#y colour is J-[3.6] k-corrected to z=0.9
#xcolour = (geec2['MAG_V']-geec2['KCORR09_V'])-(geec2['MAG_z']-geec2['KCORR09_z'])
#ycolour = (geec2['MAG_J']-geec2['KCORR09_J'])-(geec2['MAG_I1']-geec2['KCORR09_I1'])
xcolour = balogh_photoz['V-z']
ycolour = balogh_photoz['J-I1']
#2 indices, one for balogh2 one for geec2
g2index = []
b2index = []
for i in range(len(balogh_data2)):
a = np.where(balogh_photoz['Galaxy_ID']==balogh_data2['Galaxy_ID'][i])[0]
if a.size == 0:
#no match
b2index.append(False)
else:
#yes match
b2index.append(True)
g2index.append(a[0])
g2index = np.array(g2index)
b2index = np.array(b2index)
#by column
#choosecol = (balogh_data2['zp']>=0.8)*(balogh_data2['zp']<=1.0)
#Restricting by redshift
#g2index = g2index*(choosecol[b2index])
#b2index = b2index*choosecol
xdata, ydata = (xcolour[g2index], ycolour[g2index])
#PLOTTING==============================================================
pl.close()
fig, ax = pl.subplots(1, 1, figsize=(10, 10))
ms = 4.0
#ax.plot(xdata, ydata, 'o', ms=1.0, label=str(len(index))+' entries')
ax.plot(xdata[balogh_data2['Class'][b2index]=='p'], ydata[balogh_data2['Class'][b2index]=='p'], 'o', color='red', ms=ms, label='q entries')
ax.plot(xdata[balogh_data2['Class'][b2index]=='int'], ydata[balogh_data2['Class'][b2index]=='int'], 'o', color='green', ms=ms, label='int entries')
ax.plot(xdata[balogh_data2['Class'][b2index]=='sf'], ydata[balogh_data2['Class'][b2index]=='sf'], 'o', color='blue', ms=ms, label='sf entries')
#ax.plot(xdata[geec2['Type'][index]==0], ydata[geec2['Type'][index]==0], 'o', ms=4.0, color='blue', label='sf entries')
#ax.plot(xdata[geec2['Type'][index]==1], ydata[geec2['Type'][index]==1], 'o', ms=4.0, color='green', label='int entries')
#ax.plot(xdata[balogh_data1['Class']=='p'], ydata[balogh_data1['Class']=='p'], 'o', ms=4.0, color='red', label='q entries')
ax.set_xlim(0.0, 4.0)
ax.set_ylim(0.0, 4.0)
ax.set_xlabel(r'$(V-z)^{0.9}$')
ax.set_ylabel(r'$(J-[3.6])^{0.9}$')
minorLocator = mpl.ticker.AutoMinorLocator()
ax.xaxis.set_minor_locator(minorLocator)
ax.yaxis.set_minor_locator(minorLocator)
legend=ax.legend(loc='upper right')
legend.draw_frame(False)
ax.set_title('balogh_data2 '+str(len(xdata))+' entries for 0.8<zp<1.0')
#DRAW CUT LINES
#sf cuts, q cuts
qx1 = 2*np.ones(100)
qy1 = np.linspace(-150.0, 0.6311, 100)
sfx1 = 2*np.ones(100)
sfy1 = np.linspace(0.6311, 1.008, 100)
sfx2 = np.linspace(2.0, 3.0, 100)
sfy2 = 0.856*(sfx2-2)+1.008
qx2 = np.linspace(2.0, 3.0, 100)
qy2 = 0.856*(qx2-2)+0.6311
qx3 = 3*np.ones(100)
qy3 = np.linspace(1.4871, 150.0, 100)
#sf
ax.plot(sfx1, sfy1, '-', color='black', lw=2.0)
ax.plot(sfx2, sfy2, '-', color='black', lw=2.0)
ax.plot(qx1, qy1, '-', color='black', lw=2.0)
ax.plot(qx2, qy2, '-', color='black', lw=2.0)
ax.plot(qx3, qy3, '-', color='black', lw=2.0)
#producing cuts, start with int and change to q, sf
Type = np.empty(b2index.sum(), dtype='U10')
Type.fill('int')
qindex = (xdata > 3.0) + ((xdata > 2.0) * (ydata < (0.856 * (xdata - 2.0) + 0.6311)))
#Added (xdata < 3.0) AND condition to sfindex, otherwise, doesn't make sense
sfindex = (xdata < 2.0) + (ydata > (0.856 * (xdata - 2.0) + 1.008))* (xdata < 3.0)
Type[qindex]='p'
Type[sfindex]='sf'
#Checking if types are done correctly
matches = (balogh_data2['Class'][b2index]==Type).sum()
print('There are ', matches, ' matches out of ', len(xdata), ' entries') | PiercingDan/cosmos-analysis | Paste Scripts/Graphs/Old/balogh2geec2colour.py | balogh2geec2colour.py | py | 3,583 | python | en | code | 0 | github-code | 13 |
71948829457 | # 데이터 삽입 구현
katok = ['a','b','c','d','e']
def insert_data(position, friend) : # 삽입 함수
katok.append(None)
kLen = len(katok)
for i in range(kLen-1, position, -1) :
katok[i] = katok[i-1]
katok[i-1] = None
katok[position] = friend
insert_data(2, '솔라') # 2등 위치에 솔라를 넣어라
print(katok)
insert_data(6, '문별') # 6등 위치에 문별을 넣어라
print(katok)
| handhak0/2021_python_multicampus | 00.Special_Lecture/Algorithm/Code03_02.py | Code03_02.py | py | 440 | python | ko | code | 1 | github-code | 13 |
28609562473 | #Question Link: https://takeuforward.org/data-structure/fractional-knapsack-problem-greedy-approach/
#Solution Link (Python3): https://practice.geeksforgeeks.org/viewSol.php?subId=05e09acc984333ecb672d8168ef5d475&pid=701365&user=tiabhi1999
#For complete code snippet and question, please refer GFG link (https://practice.geeksforgeeks.org/problems/fractional-knapsack-1587115620/1#)
'''
class Item:
def __init__(self,val,w):
self.value = val
self.weight = w
'''
class Solution:
def fractionalknapsack(self, W,Items,n):
arr =[]
for i in range(n):
#calculating ratio
temp = Items[i].value / Items[i].weight
arr.append((temp, Items[i].value, Items[i].weight ))
arr.sort(reverse=True)
res = 0
for i in range(n):
val, wt = arr[i][1], arr[i][2]
if W > wt:
W -= wt
res += val
else:
fract = W/wt
res = res + fract * val
W = 0
break
return res
| AbhiWorkswithFlutter/StriverSDESheet-Python3-Solutions | Striver SDE Sheet/Day 8/Fractional Knapsack Problem.py | Fractional Knapsack Problem.py | py | 1,174 | python | en | code | 3 | github-code | 13 |
11732811823 | from userProfileHandler import *
import userProfile
import time
class controller:
firstName = userProfile.getFirstName()
lastName = userProfile.getLastName()
phone = userProfile.getPhoneNumber()
address = userProfile.getAddress()
email = userProfile.getEmail()
user = makeUserProfile(firstName, lastName, phone, address, email)
def pauseCode(user):
user.deleteQR()
def resumeCode(user):
user.generateQR()
def changeTimeRemaining(user, timeMin):
timeInSeconds = timeMin * 60
countdownToRemove(timeInSeconds)
def countdownToRemove(timeSec):
while timeSec > 0:
minute, second = divmod(timeSec, 60)
timer = '{:02d}:{:02d}'.format(minute, second)
time.sleep(1)
timeSec -= 1
user.deleteQR()
| Oddant1/QR-MeNow | qrCodeController.py | qrCodeController.py | py | 829 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.