blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9c92d56c644fa2633d165d90a120254998f7b4e4 | Python | justdoitqxb/BTC | /ml/textclassifier.py | UTF-8 | 5,970 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding: UTF-8 -*-
'''
@author: qxb
@copyright: 2014 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import numpy as np
import jieba,chardet
import copy
import sys,os
sys.path.append("..")
import ioio.txtoperater as to
class TextClassifier:
def __init__(self):
self.stop_words = []
self.train_txt_path = ""
self.test_txt_path = ""
self.classDict = {}
self.classNum = 1
def train_path_init(self,tpath):
self.train_txt_path = tpath
def test_path_init(self,ttpath):
self.test_txt_path = ttpath
def init_class_dict(self):
file_list = to.get_file_list(self.train_txt_path, '*.txt')
self.classNum = len(file_list)
for fl in file_list:
fl = fl.decode('GB18030')
print 'deal with ' + fl + '......'
fl = self.train_txt_path + '/' + fl
seg_list = self.split_text(fl) #list
dict_words_count ={}
for word in seg_list:
if not(word.strip() in self.stop_words) and len(word.strip())>1:
dict_words_count.setdefault(word.strip(),0.0)
dict_words_count[word.strip()]+=1
self.get_prior(dict_words_count)
filename = os.path.basename(fl).split('.')[0]
print filename
self.classDict.setdefault(filename,dict_words_count)
def set_stop_words(self):
f_stop = open("../res/stopwords.txt")
try:
f_stop_text = f_stop.read()
f_stop_text = unicode(f_stop_text,'utf-8')
finally:
f_stop.close()
self.stop_words = f_stop_text.split('\n')
def get_cossimi(self,x,y):
np_x = np.array(x)
np_y = np.array(y)
x_y = np.sum(np_x * np_y)
x_x = np.sum(np_x * np_x)
y_y = np.sum(np_y * np_y)
return x_y / float(np.sqrt(x_x)*np.sqrt(y_y))
def split_text(self,filename):
f = open(filename) #open the labeled file
try:
fl_text = f.read()
#fl_text = unicode(fl_text,'utf-8') #change the encoding
txtencode = chardet.detect(fl_text).get('encoding','utf-8')
fl_text = fl_text.decode(txtencode,'ignore').encode('utf-8')
finally:
f.close()
seg_list = jieba.cut(fl_text)#text divide into words
return seg_list
def generate_dict(self,seg_list):
# remove the words stopping to use
test_words ={}
for word in seg_list:
#print ".",
if not(word.strip() in self.stop_words) and len(word.strip())>1:
test_words.setdefault(word.strip(),0.0)
return test_words
def text_remove_a_count(self,seg_list,test_words):
new_test_words = copy.deepcopy(test_words)
test_vect = []
for word in seg_list:
#print ".",
if not(word.strip() in self.stop_words) and len(word.strip())>1:
if new_test_words.has_key(word.strip()):
new_test_words[word.strip()]+=1
for key in new_test_words:
test_vect.append(new_test_words[key])
return test_vect
def get_prior(self,txt_word):
allword = 0.0;
basegl = 1e-8
for key in txt_word:
allword += txt_word[key]
for key in txt_word:
if allword > 0:
txt_word[key] = basegl + txt_word[key] / allword
else:
txt_word[key] = basegl
def get_post(self,txt_word,test_word,numb):
itag = 0
postprob = 1.0 / numb
basegl = 1e-8
for key in test_word:
if txt_word.has_key(key):
postprob *= test_word[key]
postprob *= txt_word[key]
else:
postprob *= basegl
if postprob < 1e-100:
itag+=1
postprob*=1e50
return postprob,itag
def train_cos_simi(self,labeld_file,test_file):
tc.set_stop_words()
smp_seg_list = tc.split_text(labeld_file)
test_words = tc.generate_dict(smp_seg_list)
smp_seg_list = tc.split_text(labeld_file)
test_seg_list = tc.split_text(test_file)
sample_vect = tc.text_remove_a_count(smp_seg_list, test_words)
test_vect = tc.text_remove_a_count(test_seg_list, test_words)
testsimi = tc.get_cossimi(sample_vect, test_vect)
print testsimi.encode('utf-8')
return testsimi
def train_pp(self,label_dir,test_file):
test_class = ''
max_pro = 0.0
tag = float('Inf')
tc.set_stop_words()
tc.train_path_init(label_dir)
tc.init_class_dict()
test_seg_list = tc.split_text(test_file)
test_dict = {}
for word in test_seg_list:
if not(word.strip() in self.stop_words) and len(word.strip())>1:
test_dict.setdefault(word.strip(),0.0)
test_dict[word.strip()]+=1
for key in self.classDict:
post,itag = self.get_post(self.classDict[key], test_dict, self.classNum)
print key,post,itag
if itag <= tag and post > max_pro:
max_pro = post
tag = itag
test_class = key
print 'predict is : ' + test_class
return test_class
if __name__ == '__main__':
'''
tc = TextClassifier()
cos_simi1 = tc.train_cos_simi("../data/APEC1.txt","../data/APEC2.txt")
cos_simi2 = tc.train_cos_simi("../data/APEC1.txt","../data/other.txt")
'''
tc = TextClassifier()
tc.train_pp("../data/news_class", "../data/other.txt") | true |
431fb4079fe4a809b3c49313f999457abccba0c8 | Python | samansafiaghdam/python-class | /EX11-gt-ge-le-lt.py | UTF-8 | 2,007 | 2.96875 | 3 | [] | no_license | class time:
def __init__(obj,h,m,s):
obj.hour = h
obj.minute = m
obj.second = s
def show(obj,ruz):
obj.date = ruz
return str(obj.hour)+':'+str(obj.minute)+':'+str(obj.second)
def sanieshomar(obj):
return obj.minute*60+obj.hour*3600+obj.second
def sub(obj,newtime):
obj.hour -= newtime.hour
obj.minute -= newtime.minute
obj.second -= newtime.second
def kam_kardan(obj,hh,mm,ss):
obj.hour -= hh
obj.minute -= mm
obj.second -= ss
def __add__(obj, obj2):
obj.hour += obj2.hour
obj.minute += obj2.minute
obj.second += obj2.second
if obj.second>60:
obj.second=obj.second-60
obj.minute=obj.minute+1
if obj.minute>60:
obj.minute = obj.minute - 60
obj.hour = obj.hour + 1
def __ge__(obj, obj2):
if obj.minute*60+obj.hour*3600+obj.second>=obj2.minute*60+obj2.hour*3600+obj2.second:
return True
else:
return False
def __gt__(obj, obj2):
if obj.minute*60+obj.hour*3600+obj.second>obj2.minute*60+obj2.hour*3600+obj2.second:
return True
else:
return False
def __le__(obj, obj2):
if obj.minute*60+obj.hour*3600+obj.second<=obj2.minute*60+obj2.hour*3600+obj2.second:
return True
else:
return False
def __gt__(obj, obj2):
if obj.minute*60+obj.hour*3600+obj.second<obj2.minute*60+obj2.hour*3600+obj2.second:
return True
else:
return False
def __sub__(obj, obj2):
obj.hour -= obj2.hour
obj.minute -= obj2.minute
obj.second -= obj2.second
if obj.second>60:
obj.second=obj.second-60
obj.minute=obj.minute+1
if obj.minute>60:
obj.minute = obj.minute - 60
obj.hour = obj.hour + 1
| true |
6fbdae176410266369feb4f998ceb83bd35dbb47 | Python | dky718/codex-pythonweb | /pa1-urlparse/fetch.py | UTF-8 | 1,515 | 3.515625 | 4 | [] | no_license | #
# Assignment: Write a function to parse URLs.
#
# Fill in the body of the 'urlparse()' function in this module. If you wish,
# you may define and use additional helper functions; these generally will make
# your code clearer.
#
# To test your solution, use the unit test script provided along with this file.
# The unit test script will import your version of this module, and test your
# solution by running it with specific inputs and checking the answers. It will
# tell you if all the tests produce the expected results or not.
#
from collections import namedtuple
from urllib import request
ParseResult = namedtuple('ParseResult', 'scheme, netloc, path, params, query, fragment')
def urlparse(url):
'''
Parses a URL and returns a 'ParseResult' of its components.
Below is an example URL with its components listed by name:
foo://example.com:8042/over/there?name=ferret#nose
\_/ \______________/\_________/ \_________/ \__/
| | | | |
scheme authority path query fragment
For details, see: http://tools.ietf.org/html/rfc3986#section-3.1
For example,
>>> urlparse('foo://example.com:8042/over/there?name=ferret#nose')
ParseResult(scheme='foo', netloc='example.com:8042', path='/over/there', params='', query='name=ferret', fragment='nose')
'''
scheme = netloc = path = params = query = fragment = ''
return ParseResult(scheme, netloc, path, params, query, fragment)
| true |
b62ca7de9ad2ab975e5b8dbad9f1be58f5d3c648 | Python | dimuccidm/automation | /source-files/tests/test_finding_latest_run/test_keep_valid_run_names.py | UTF-8 | 1,504 | 2.5625 | 3 | [] | no_license | import unittest
from bin.querydirectories import keep_valid_run_names
class TestJob(unittest.TestCase):
def test_keep_valid_run_names(self):
"Test that directories with valid names can be found"
directory_names = sorted(
[
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/SAMPLE_SHEETS",
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/RUN4_34_900_DKJCN",
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/@NOTRIGHT",
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/RUN_WITH_TOO_MANY_UNDER_SCORES",
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/RUN_1_34_DJD",
]
)
results = keep_valid_run_names(directory_names)
answer = [
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/RUN4_34_900_DKJCN",
"/Users/ddimucci/PycharmProjects/Synthetic_Biology_Pipeline/source-files/tests/test_finding_latest_run/RUNDIR2/RUN_1_34_DJD",
]
self.assertEqual(
results,
answer,
)
if __name__ == "__main__":
unittest.main()
| true |
b733ad05b8f6e1de4f0e1647d973078a0efdc1fd | Python | Mellak/Segementation_IVOCT_Plaque | /dice_help.py | UTF-8 | 773 | 3.0625 | 3 | [] | no_license | import keras.backend as K
import numpy as np
# Subroutine that computes the Dice coefficient
# from true and predicted binary images
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
return 2 * K.sum(y_true_f * y_pred_f) / (K.sum(y_true_f) + K.sum(y_pred_f))
# INSERT CODE
# Subroutine that computes the Dice coefficient loss
def dice_coef_loss(y_true, y_pred):
return 1.-dice_coef(y_true, y_pred)
#Dice coefficient pour une image a plusieurs canaux
def dice_coefv3(y_true, y_pred):
intersection = K.sum(y_true * y_pred, axis=-1)
union = K.sum(y_true, axis=-1) + K.sum(y_pred, axis=-1)
dice = 2*intersection/union
return dice
def dice_coef_lossv3(y_true, y_pred):
return 1.-dice_coefv3(y_true, y_pred) | true |
816d8082594980296a2379f0260fe077e105674e | Python | karbowskim/python | /hex2base64.py | UTF-8 | 193 | 3.203125 | 3 | [] | no_license | #Cryptopals challenge 1
import binascii
import base64
hexInput = raw_input("Input hex string: ")
base64Output = base64.b64encode(binascii.unhexlify(hexInput))
print("Base64: " + base64Output) | true |
8f61aab709e77a202011dffd83818ce64b0c06b7 | Python | LucasRGoes/ports-adapters-sample | /app/domain/ports.py | UTF-8 | 6,092 | 3.25 | 3 | [
"MIT"
] | permissive | """
Ports
=====
These are the tools used by the application to communicate with external
services (or adapters on the architecture terms). It offers messages buses for
the exchange of commands with driver adapters and events with driven adapters.
It also offers interfaces for repositories to implement for database storage
and querying.
ABCs: BookRepository, BookView, UnitOfWork, UnitOfWorkManager, Sender
Classes: MessageBus
"""
import abc
from collections import defaultdict
from . import messages
from .model import Book
from .errors import CommandAlreadySubscribedError
"""
These are the abstract base class for repository driven adapters to
implement, or you could say it is the application's SPI for data storage and
query.
For data storage the Repository design pattern is used, while for data
querying the CQRS design pattern is considered, separating queries from data
creation, update and deletion.
"""
class BookRepository(abc.ABC):
"""BookRepository is an abstract base class for repositories concerning
data mutation methods.
Methods: save
"""
@abc.abstractmethod
def save(self, book: Book):
"""Method to be implemented to save books to the database.
Params
------
book: Book -- the book to be inserted on the application's database
"""
pass
class BookView(abc.ABC):
"""BookView is an abstract base class for repositories concerning data
querying methods.
Methods: get_all, get_by_isbn, get_by_name, get_by_author
"""
@abc.abstractmethod
def get_all(self) -> list:
"""Fetches all books from the database.
Returns
-------
books: list -- a list of all books at database
"""
pass
@abc.abstractmethod
def get_by_isbn(self, isbn: str) -> Book:
"""Fetches a book by its ISBN from the database.
Params
------
isbn: str -- the ISBN of the book to be fetched
Returns
-------
book: Book -- the book with the chosen ISBN
"""
pass
@abc.abstractmethod
def get_by_name(self, name: str) -> list:
"""Fetches all books with a certain name from the database.
Params
------
name: str -- the name of the books to be fetched
Returns
-------
books: list -- a list of all books with the chosen name at database
"""
pass
@abc.abstractmethod
def get_by_author(self, author: str) -> list:
"""Fetches all books of a certain author from the database.
Params
------
author: str -- the name of the author from the books to be fetched
Returns
-------
books: list -- a list of all books written by the author at database
"""
pass
class UnitOfWork(abc.ABC):
"""The unit of work is an abstract base class for the usage of the Unit of
Work design pattern. Used to represent a bunch of commands that are to be
executed together.
Methods: __enter__, __exit__, commit, rollback, books
"""
@abc.abstractmethod
def __enter__(self):
"""Magic method for Python's 'with' usage. This command is executed
whenever a new with is created for a unit of work.
"""
pass
@abc.abstractmethod
def __exit__(self, type, value, traceback):
"""Magic method for Python's 'with' usage. This command is executed
whenever a with ends from a unit of work.
"""
pass
@abc.abstractmethod
def commit(self):
"""Used to store all changed data at database."""
pass
@abc.abstractmethod
def rollback(self):
"""Used to clear uncommited data."""
pass
@property
@abc.abstractmethod
def books(self) -> BookRepository:
"""A convenient access for an instance of a BookRepository.
Returns
-------
books: BookRepository -- an instance of a BookRepository for data
mutation
"""
pass
class UnitOfWorkManager(abc.ABC):
"""The unit of work manager is an abstract base class for the usage of the
Unit of Work design pattern. Used to instantiate new units of work.
Methods: start
"""
@abc.abstractmethod
def start(self) -> UnitOfWork:
"""The manager creates an instance of a UnitOfWork for database usage.
Returns
-------
unit_of_work: UnitOfWork -- a unit of work for database usage
"""
pass
"""
These are the abstract base class for senders to implement, or you could
say it is the application's SPI for sending messages.
An application can have different types of abstract base classes for
different types of senders like email, queues and logging. For this simple
app we are gonna implement only one base type, a queue type of sender.
"""
class QueueSender(abc.ABC):
"""This abstract base class should be implemented by all adapters used to
dispatch messages to queues in case a certain event occurs on the
application.
Methods: send
"""
@abc.abstractmethod
def send(self, msg):
"""The sender builds a message based on the event and sends it to its
queue.
Params
------
msg -- the msg to be sent
"""
pass
class MessageBus(object):
"""The message bus was developed following the Message Bus design pattern.
It is responsible for the execution of handlers subscribed to commands or
events. When a message concerning these commands and events arrives the
subscribed handlers are executed.
Methods: handle, subscribe
"""
def __init__(self):
"""MessageBus' constructor. Creates a list of subscribers."""
self.subscribers = defaultdict(list)
def handle(self, msg):
"""Handles the incoming message by executing the handlers associated
with it.
Params
------
msg -- a command or event instance that needs to be handled
"""
subscribers = self.subscribers[type(msg).__name__]
for subscriber in subscribers:
subscriber.handle(msg)
def subscribe(self, msg, handler):
"""Subscribes a handler to a command or event.
Params
------
msg -- the command or event class that the handler wants to subscribe
handler -- the handler that wants to subscribe
"""
subscribers = self.subscribers[msg.__name__]
# Commands should have a 1:1 relationship with handlers.
if msg.__name__ in messages.COMMANDS and len(subscribers) > 0:
raise CommandAlreadySubscribedError(
'The command \'{0}\' already has a handler subscribed to it.' \
.format(msg.__name__))
subscribers.append(handler)
| true |
a521f20aea8bf64df25b6d32fcd5300fe66a7590 | Python | billm79/COOP2018 | /Chapter05/U05_Ex09_WordCount.py | UTF-8 | 821 | 4.375 | 4 | [] | no_license | # U05_Ex09_WordCount.py
#
# Author: Bill Montana
# Course: Coding for OOP
# Section: A3
# Date: 11 Oct 2017
# IDE: PyCharm Community Edition
#
# Assignment Info
# Exercise: 9
# Source: Python Programming
# Chapter: 5
#
# Program Description
# Counts the number of words in a sentence entered by the user
#
# Algorithm (pseudocode)
# Print intro
# Get sentence from user
# Count words with len(str.split(' '))
# Print word count
def main():
# Print intro
print('This program counts the number of words in a sentence entered by the user.')
# Get sentence from user
inputStr = input('Please enter a sentence: ')
# Count words with len(str.split(' '))
wordCount = len(inputStr.split(' '))
# Print word count
print('Word count: {0}'.format(wordCount))
main() | true |
23cd2ca9bd87065375fabfcc4e0753c5fad18b7c | Python | karbekk/Python_Data_Structures | /Interview/DSA/Tree/sum_of_childes.py | UTF-8 | 803 | 3.65625 | 4 | [] | no_license | from Balanced_Trees.Binary_Search_Trees.Node import Node
root = Node(1)
root.leftChild = Node(2)
root.rightChild = Node(3)
root.leftChild.leftChild = Node(4)
root.leftChild.rightChild = Node(5)
root.rightChild.rightChild = Node(8)
root.rightChild.rightChild.leftChild = Node(6)
root.rightChild.rightChild.rightChild = Node(7)
def sum_of_child(root):
if root is None:
return 0
if root.leftChild is not None or root.rightChild is not None:
sum = sum_of_child(root.leftChild) + sum_of_child(root.rightChild)
root.data += sum
return root.data
def in_order(root):
if root is None:
return
in_order(root.leftChild)
print root.data
in_order(root.rightChild)
original_val = root.data
print sum_of_child(root) - original_val
print in_order(root)
| true |
a63075f769d70d22d62586198243f1dcf2969c90 | Python | Ibarguen/Histogram-Generator-Opencv | /his_app_tkinter.py | UTF-8 | 2,970 | 3.015625 | 3 | [] | no_license | import tkinter
import tkfilebrowser
import cv2
import imutils
from PIL import Image
from PIL import ImageTk
from matplotlib import pyplot as plt
def cargar_imagen():
global image1, img1, path
path = tkfilebrowser.askopenfilename(initialdir="/", title="Select file",
filetypes = [( "png" , "* .png" ), ( "jpeg" , "* .jpg" ), ( "all files" , "*" )])
image1 = cv2.imread(path)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
img1 = image1
image1 = Image.fromarray(image1).resize((500,480))
image1 = ImageTk.PhotoImage(image1)
show = tkinter.Label(image=image1)
show.image = image1
show.place(x=150, y=0)
def histogram_generator():
global image1
a = 0
color = ('b', 'g', 'r')
while(a<=1):
for i, c in enumerate(color):
hist = cv2.calcHist([img1], [i], None, [256], [0,256])
plt.plot(hist, color=c)
a+=1
plt.xlabel('Number of pixels')
plt.ylabel('pixel intensity')
plt.show()
def rgb_to_bw():
global path, image2, img2
image2 = cv2.imread(path)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
img2 = image2
image2 = Image.fromarray(image2).resize((500, 480))
image2 = ImageTk.PhotoImage(image2)
show2 = tkinter.Label(image=image2)
show2.image = image2
show2.place(x=150, y=0)
def hist_black_and_white():
global image2, img2
hist = cv2.calcHist([img2], [0], None, [256], [0,256])
plt.plot(hist, color='gray')
plt.show()
root = tkinter.Tk()
root.title('Histogram Generator')
frame = tkinter.Frame(root)
frame.config(width = 820, height=650)
frame.config(cursor='pirate')
frame.config(bg='blue')
frame.pack()
imagen1 = tkinter.PhotoImage(file='cargar.gif')
button1 = tkinter.Button(frame, image=imagen1, command=cargar_imagen)
button1.place(x=5, y=500)
imagen2 = tkinter.PhotoImage(file='boton_hist_1_.gif')
button2 = tkinter.Button(frame, image=imagen2, command=histogram_generator)
button2.place(x=170, y =500)
imagen3 = tkinter.PhotoImage(file="boton-b_a_w.gif")
button3 = tkinter.Button(frame, image=imagen3, command=rgb_to_bw)
button3.place(x=335, y =500)
imagen4 = tkinter.PhotoImage(file="histo_bn.gif")
button4 = tkinter.Button(frame, image=imagen4, command=hist_black_and_white)
button4.place(x=495, y=500)
imagen5 = tkinter.PhotoImage(file="readme.gif")
button5 = tkinter.Button(frame, image=imagen5)
button5.place(x=660, y=500)
text1=tkinter.Label(text="Load image")
text1.place(x=50, y=478)
text2= tkinter.Label(text="Generate Hist RGB")
text2.place(x=190, y=478)
text3= tkinter.Label(text="Converter RGB to B/W")
text3.place(x=340, y=478)
text4 = tkinter.Label(text="Generate hist B/W")
text4.place(x=520, y=478)
text5 = tkinter.Label(text="ReadMe")
text5.place(x=710, y=478)
root.mainloop()
| true |
06166b38249c667e190892aa87328f231e5383b5 | Python | M-S-Saurabh/RandomTrees-Kmeans-5525 | /adaboost.py | UTF-8 | 1,546 | 2.890625 | 3 | [] | no_license | import numpy as np
from sklearn.metrics import accuracy_score
from utils import display_results, read_data
from decision_tree import get_adaboost_learner
def adaboost_fit(X_train, y_train, num_learners):
N, D = X_train.shape
weights = np.full(N, 1/N)
learners = []
for i in range(num_learners):
stump, weights = get_adaboost_learner(X_train, y_train, weights)
learners.append(stump)
return learners
def adaboost_predict(model, X_test):
N, D = X_test.shape
g = np.zeros(N)
for stump in model:
neg_indices = (X_test[:, stump['attribute']] < stump['threshold'])
y_pred = np.ones(N)
y_pred[neg_indices] = -1
y_pred *= stump['flip']
g += (stump['alpha_t'] * y_pred)
return np.sign(g)
def adaboost(dataset: str) -> None:
X_train, X_test, y_train, y_test = read_data(dataset, test_size=0.5)
error_rates = []
max_learners = 100
model = adaboost_fit(X_train, y_train, max_learners)
for num_learners in range(0, max_learners+1):
if num_learners == 0: num_learners = 1
y_train_pred = adaboost_predict(model[:num_learners], X_train)
y_test_pred = adaboost_predict(model[:num_learners], X_test)
err_rate = ( num_learners,
1-accuracy_score(y_train, y_train_pred),
1-accuracy_score(y_test, y_test_pred) )
error_rates.append(err_rate)
display_results(error_rates)
if __name__ == "__main__":
np.random.seed(42)
adaboost('breast-cancer-wisconsin.data') | true |
3011ec209f7a7f5dcd74961a0db2ff7f8bcb74bb | Python | AnushaV1/blogly | /models.py | UTF-8 | 2,572 | 2.625 | 3 | [] | no_license | from flask_sqlalchemy import SQLAlchemy
import datetime
db = SQLAlchemy()
DEFAULT_IMAGE_URL = "https://cdn1.iconfinder.com/data/icons/user-pictures/100/unknown-512.png"
def connect_db(app):
db.app = app
db.init_app(app)
# models go below !
class User(db.Model):
""" User table """
__tablename__ = "users"
# def __repr__(self):
# u = self
# return f"<User id = {u.id} first_name= {u.first_name} last_name={u.last_name} image_url={u.image_url}>"
id = db.Column(db.Integer,
primary_key=True,
autoincrement=True)
first_name = db.Column(db.String(20),
nullable=False)
last_name = db.Column(db.String(20),
nullable=False)
image_url = db.Column(db.Text, nullable=False, default = DEFAULT_IMAGE_URL)
posts = db.relationship("Post", backref="user", cascade="all, delete-orphan")
@classmethod
def get_all_users(cls):
return cls.query.all()
@classmethod
def user_exist(cls, first_name, last_name):
return cls.query.filter_by(first_name =first_name, last_name= last_name).first()
@property
def full_name(self):
"""Return full name of user."""
return f"{self.first_name} {self.last_name}"
class Post(db.Model):
""" posts """
__tablename__ = "posts"
id = db.Column(db.Integer,
primary_key=True,
autoincrement=True)
title = db.Column(db.Text,
nullable=False)
content = db.Column(db.Text,
nullable=False)
created_at = db.Column(db.DateTime,
nullable=False,
default=datetime.datetime.now)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
@classmethod
def post_exist(cls, post_id):
return cls.query.filter_by(id = post_id).first()
@property
def friendly_date(self):
"""Return nicely-formatted date."""
return self.created_at.strftime("%m/%d/%Y, %H:%M:%S")
class Tag(db.Model):
""" Tag to posts """
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.Text, nullable = False)
posts = db.relationship('Post', secondary='posts_tags', backref = "tags")
class PostTag(db.Model):
""" tag post """
__tablename__ = "posts_tags"
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'), primary_key=True)
tag_id = db.Column(db.Integer, db.ForeignKey('tags.id'), primary_key= True) | true |
0b36bf9ac7887101be5503a0edce19e1111e5ca0 | Python | Zheta/CodeInPlace-PyGame-Project | /animation.py | UTF-8 | 633 | 3.296875 | 3 | [] | no_license | # Import other modules
from zelda_utilities.constants import *
# Helps establish the current frame for sprite animation/image changing
class Animation:
def __init__(self):
# Animation clock
self.next_frame = pygame.time.get_ticks()
# Starting frame
self.frame = 0
# ~12 frames/sec (1000ms // 12)
self.frame_time = 1000 // ANIMATION_RATE
def anim_sprite(self):
if pygame.time.get_ticks() > self.next_frame:
self.frame = (self.frame + 1) % (24 * ANIMATION_RATE) # reset > 20 sec
self.next_frame += self.frame_time
return self.frame
| true |
4b454b5a2a85453a7ca72ac24f72628bd0bd00d9 | Python | kilala-mega/practice_everyday | /number-of-ways-where-square-of-number-is-equal-to-product-of.py | UTF-8 | 761 | 3.25 | 3 | [] | no_license | """
time O(n**2) space O(n)
can improve to one pass
"""
class Solution:
def numTriplets(self, nums1: List[int], nums2: List[int]) -> int:
def find(target, nums):
seen = Counter() #use Counter so the default value is 0
ret = 0
for i in range(len(nums)):
if target % nums[i] == 0:
ret += seen[target/nums[i]]
seen[nums[i]] += 1
return ret
@lru_cache(None)
def twoProduct1(target):
return find(target, nums1)
@lru_cache(None)
def twoProduct2(target):
return find(target, nums2)
return sum(twoProduct2(a*a) for a in nums1) + sum(twoProduct1(a*a) for a in nums2)
| true |
52c0ec55216b82e4c9529ded1ab1babfd6f93cf5 | Python | esmijak/CS-IT7-Taxis | /python/models/newLinearRegression.py | UTF-8 | 6,801 | 2.640625 | 3 | [] | no_license | import numpy as np
from pyspark.sql import *
from schemas import *
from demand_cache import invDemandCache
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import DecisionTreeRegressor
from pyspark.ml.evaluation import RegressionEvaluator
import datetime
"""Cluster related constant: """
N_OF_CLUSTERS = 1000 # number of clusters used : all of them
"""Time related constant: """
TIME_SLOTS_WITHIN_DAY = 144 # day is divided into that number of slots
N_DAYS_JAN = 31
N_DAYS_FEB = 28
N_DAYS_MAR = 31
N_DAYS_APR = 30
N_DAYS_MAY = 31
N_DAYS_JUN = 30
FIRST_DAY_DAY_OF_WEEK = 3 # which day of the week was the first day of the year 2015 (0 - Monday, 1 - Tuesday, etc.)
N_DAYS_TRAIN = N_DAYS_JAN + N_DAYS_FEB + N_DAYS_MAR + N_DAYS_APR + N_DAYS_MAY # number of days used for the learning
N_OF_TIME_SLOTS_TRAIN = N_DAYS_TRAIN * TIME_SLOTS_WITHIN_DAY # number of time slots that are being used for training
N_DAYS_TEST = N_DAYS_JUN
N_OF_TIME_SLOTS_TEST = N_DAYS_TEST * TIME_SLOTS_WITHIN_DAY
"""Spark initialization: """
spark = SparkSession.builder.master('spark://csit7-master:7077').getOrCreate()
sqlCtx = SQLContext(spark.sparkContext, spark)
demandTable = loadDataFrame(sqlCtx, Table.FINAL_DATA)
#demandTable.show(10)
slotsTable = loadDataFrame(sqlCtx, Table.TIME_SLOTS)
slotsTableCol = slotsTable.collect()
#slotsTable.show()
slot_count = len(slotsTableCol)
time_per_slot = (60 * 60 * 24 * 180) / slot_count
start_time = datetime.datetime(2015, 1, 1, 0).timestamp()
def find_slot(time):
return int((time.timestamp() - start_time) / time_per_slot)
"""Tables requests: """
registerTable(sqlCtx, Table.FINAL_DATA)
registerTable(sqlCtx, Table.TIME_SLOTS)
""" df is data frame containing the time slot id, demand"""
df = spark.sql('SELECT pickup_timeslot_id, origin, amount, time_of_day_code,hour,minute FROM FINAL_DATA ORDER BY 1,2')
df.show(10)
errorsRMSE = []
errorsR2 = []
for curCluster in range (N_OF_CLUSTERS):
print('current cluster number is: ', curCluster)
""" df is data frame containing the time slot id, demand for the current cluster"""
df_for_one_cluster = df[df.origin == curCluster].select('pickup_timeslot_id', 'amount')
#df_for_one_cluster.show()
demandListDict = df_for_one_cluster.collect()
#ld_size = round(0.7 * slot_count)
rows_training = []
rows_testing = []
demandCount = 0
TOTAL_SLOTS_FOR_LOOP = N_OF_TIME_SLOTS_TEST + N_OF_TIME_SLOTS_TRAIN
print('total time slots in loop: ', TOTAL_SLOTS_FOR_LOOP)
for instance in slotsTableCol :
slot_nb = find_slot(instance[0])
"""Test whether the training and testing sets are filled with the aimed number of instances."""
if slot_nb > TOTAL_SLOTS_FOR_LOOP - 1 or demandCount > len(demandListDict) - 1:
break
"""Compute the time slot information: """
weekday = instance[0].weekday()
day = instance[0].day
week_nb = instance[0].isocalendar()[1]
hour = instance[0].hour
minute = instance[0].minute
"""Extract the demand of the given time slot for the current cluster: """
demandDict = demandListDict[demandCount].asDict()
demandSlot = demandDict['pickup_timeslot_id']
"""Since the table doesn't take into account the demand that are = 0: """
if slot_nb == demandSlot :
demand = demandDict['cnt']
demandCount += 1
elif slot_nb < demandSlot :
demand = 0
else :
print('coucou should not come here: ', slot_nb, demandSlot)
demand = 0
"""Add the current instance to the right test: """
if (len(rows_training) < N_OF_TIME_SLOTS_TRAIN):
rows_training.append((slot_nb, weekday, day, week_nb, hour, minute, demand))
else:
rows_testing.append((slot_nb, weekday, day, week_nb, hour, minute, demand))
print('train rows len: ', len(rows_training), 'test rows len: ', len(rows_testing))
"""Create 2 dataframes corresponding to the training and testing set previously computed: """
df_training = spark.createDataFrame(rows_training, ["slot_id", "day_of_week", "day_of_month", "week_nb", "hour", "minute", "demand"])
df_testing = spark.createDataFrame(rows_testing, ["slot_id", "day_of_week", "day_of_month", "week_nb", "hour", "minute", "demand"])
assembler = VectorAssembler(inputCols=["slot_id", "day_of_week", "day_of_month", "week_nb", "hour", "minute"], outputCol='features')
output_training = assembler.transform(df_training)
output_testing = assembler.transform(df_testing)
final_data_training = output_training.select('features', 'demand')
final_data_testing = output_testing.select('features', 'demand')
final_data_training.describe().show()
final_data_testing.describe().show()
""" Model and predictions :
decisionTree = DecisionTreeRegressor(labelCol='demand', maxDepth=3)
dt_model = decisionTree.fit(final_data_training)
predictions = dt_model.transform(final_data_testing)
print("Decision tree model max depth = %g" % decisionTree.getMaxDepth())
print(dt_model.toDebugString)"""
linearR = LinearRegression(labelCol='demand')
lr_model = linearR.fit(final_data_training)
predictions = lr_model.transform(final_data_testing)
print("Linear Regression model max depth = %g" % linearR.getMaxDepth())
print(lr_model.toDebugString)
""" Evaluation rmse : """
evaluatorRMSE = RegressionEvaluator(labelCol="demand", predictionCol="prediction", metricName="rmse")
rmse = evaluatorRMSE.evaluate(predictions)
errorsRMSE.append(rmse)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
evaluatorR2 = RegressionEvaluator(labelCol="demand", predictionCol="prediction", metricName="r2")
r2 = evaluatorR2.evaluate(predictions)
errorsR2.append(r2)
print("R Squared Error (R2) on test data = %g" % r2)
""" Writing the errors in the files : """
file = open("decision_tree_rmse.txt", "w")
file.write("Training set contains " + str(N_DAYS_TRAIN) + " days i.e. "+ str(N_OF_TIME_SLOTS_TRAIN) + " time slots \nTest set contains "+ str(N_DAYS_TEST)+ " days i.e. "+ str(N_OF_TIME_SLOTS_TEST) + " time slots \n")
for errorIndex in range(N_OF_CLUSTERS):
file.write("RMSE for cluster " + str(errorIndex) + " is " + str(errorsRMSE[errorIndex]) + "\n")
file.close()
file = open("decision_tree_r2.txt", "w")
file.write("Training set contains " + str(N_DAYS_TRAIN) + " days i.e. "+ str(N_OF_TIME_SLOTS_TRAIN) + " time slots \nTest set contains "+ str(N_DAYS_TEST)+ " days i.e. "+ str(N_OF_TIME_SLOTS_TEST) + " time slots \n")
for errorIndex in range(N_OF_CLUSTERS):
file.write("R2 for cluster " + str(errorIndex) + " is " + str(errorsR2[errorIndex]) + "\n")
file.close() | true |
9ed4c94e1fc1871ee0ea8d9a06ef96038d657f46 | Python | ksjohnson94/Coding_Dojo | /CodingDojo_Python/myEnvironments/hello_world.py | UTF-8 | 58 | 3.375 | 3 | [] | no_license | for count in range(0,1000)
print "looping -", count
| true |
80e6a1f2f1edb8483628538030717ade4a8a1ea1 | Python | FXIhub/hummingbird | /hummingbird/interface/plotdata.py | UTF-8 | 5,342 | 2.734375 | 3 | [
"BSD-2-Clause"
] | permissive | # --------------------------------------------------------------------------------------
# Copyright 2016, Benedikt J. Daurer, Filipe R.N.C. Maia, Max F. Hantke, Carl Nettelblad
# Hummingbird is distributed under the terms of the Simplified BSD License.
# -------------------------------------------------------------------------
"""Stores the data associated with a given broadcast"""
import numpy
from .ringbuffer import RingBuffer, RingBufferStr
class PlotData(object):
"""Stores the data associated with a given broadcast"""
def __init__(self, parent, title, maxlen=1000, group=None):
self._title = title
self._group = group
self._y = None # pylint: disable=invalid-name
self._x = None # pylint: disable=invalid-name
self._l = None # pylint: disable=invalid-name
self._num = None
self._parent = parent
self._maxlen = maxlen
self.restored = False
self.ishistory = (title[:7] == 'History')
self.recordhistory = False
self.clear_histogram = False
if title in parent.conf:
if('history_length' in parent.conf[title]):
self._maxlen = parent.conf[title]['history_length']
def append(self, y, x, l):
"""Append the new data to the ringbuffers"""
if(self._y is None):
if(isinstance(y, numpy.ndarray)):
# Make sure the image ringbuffers don't take more than
# 200 MBs. The factor of 2 takes into account the fact
# that the buffer is twice as big as its usable size
self._maxlen = max(1, min(self._maxlen, 1024*1024*200//(2*y.nbytes)))
self._y = RingBuffer(self._maxlen)
if(self._x is None):
self._x = RingBuffer(self._maxlen)
if(self._l is None):
self._l = RingBufferStr(self._maxlen)
self._y.append(y)
self._x.append(x)
self._l.append(l)
self._num = None
def sum_over(self, y, x, l, op='sum'):
if self._y is None or self._num is None:
self._y = RingBuffer(1)
self._x = RingBuffer(1)
self._l = RingBufferStr(1)
self._x.append(x)
self._y.append(y.astype('f8'))
self._l.append(l)
self._num = 1.
self._y._data[0] = y
else:
self._num += 1.
if(op == 'sum'):
self._y._data[0] = self._y._data[0] * (self._num-1)/self._num + y/self._num
elif(op == 'max'):
self._y._data[0] = numpy.maximum(self._y._data[0], y)
self._x.append(x)
self._l.append(l)
def resize(self, new_maxlen):
"""Change the capacity of the buffers"""
if(self._y is not None):
self._y.resize(new_maxlen)
if(self._x is not None):
self._x.resize(new_maxlen)
if(self._l is not None):
self._l.resize(new_maxlen)
self._maxlen = new_maxlen
def clear(self):
"""Clear the buffers"""
if(self._y is not None):
self._y.clear()
self._y = None
if(self._x is not None):
self._x.clear()
if(self._l is not None):
self._l.clear()
self.clear_histogram = True
@property
def title(self):
"""Returns the plot data title"""
return self._title
@property
def group(self):
"""Returns the plot group"""
return self._group
@property
def y(self):
"""Gives access to the y buffer"""
return self._y
@property
def x(self):
"""Gives access to the x buffer"""
return self._x
@property
def l(self):
"""Gives access to the l buffer"""
return self._l
@property
def maxlen(self):
"""Gives access to maximum size of the buffers"""
return self._maxlen
def __len__(self):
"""Returns the number of elements in the buffers"""
if(self._y is not None):
return len(self._y)
else:
return 0
@property
def nbytes(self):
"""Returns the number of bytes taken by the three buffers"""
if(self._y is not None):
return self._y.nbytes + self._x.nbytes + self._y.nbytes
return 0
def save_state(self, save_data=False):
"""Return a serialized representation of the PlotData for saving to disk"""
pds = {}
pds['data_source'] = [self._parent.hostname, self._parent.port, self._parent.ssh_tunnel]
if(save_data):
pds['x'] = self.x.save_state()
pds['y'] = self.y.save_state()
pds['l'] = self.l.save_state()
pds['title'] = self.title
pds['group'] = self.group
pds['maxlen'] = self.maxlen
pds['recordhistory'] = self.recordhistory
return pds
def restore_state(self, state, parent):
"""Restore a previous stored state"""
self.parent = parent
if 'x' in state:
self._x = RingBuffer.restore_state(state['x'])
self._y = RingBuffer.restore_state(state['y'])
self._l = RingBufferStr.restore_state(state['l'])
self.restored = True
self._title = state['title']
self._maxlen = state['maxlen']
self.recordhistory = state['recordhistory']
| true |
da9f6a41a34c60463be9e2c3d8103d9e0fa63a6c | Python | DineshSuresh60/SSN-Blog | /FlaskBlog/flaskblog/routes.py | UTF-8 | 4,877 | 2.671875 | 3 | [] | no_license | import secrets
from PIL import Image
import os
from flask import render_template,url_for, flash, redirect, request
from flaskblog.forms import RegistrationForm, LoginForm, UpdateAccountForm
from flaskblog import app, db, bcrypt
from flaskblog.models import User,Post
from flask_login import login_user, current_user, logout_user, login_required
posts = [
{
'author' : 'Chirag',
'title' : 'Blog post 1',
'content' : '''Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
proident, sunt in culpa qui officia deserunt mollit anim id est laborum.''',
'date_posted' : 'August 18 2021'
},
{
'author' : 'Joe',
'title' : 'Blog post 2',
'content' : '''Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
proident, sunt in culpa qui officia deserunt mollit anim id est laborum.''',
'date_posted' : 'August 18 2021'
}
]
@app.route('/') #homepage
@app.route('/home') #multiple routes using the same function
def home():
return render_template('home.html', posts=posts) #lhs - name of data passed to template
@app.route('/about') #homepage
def about():
return render_template('about.html', title='About')
if __name__ == '__main__':
app.run(debug=True) #to run via python
@app.route('/register', methods=['GET', 'POST']) #homepage
def register():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf8')
user = User(username=form.username.data, email=form.email.data, password=hashed_password)
db.session.add(user)
db.session.commit()
flash(f'Your account has been created! You will now be able to log in :)', 'success') #1 time alert
return redirect(url_for('home'))
return render_template('register.html', title='Register', form=form)
@app.route('/login', methods=['GET', 'POST']) #homepage
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful, please check your email and password!', 'danger')
return render_template('login.html', title='Login', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/Profile_pictures',picture_fn)
output_size=(125,125)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
@app.route('/account', methods=['GET', 'POST'])
@login_required
def account():
form = UpdateAccountForm()
if form.validate_on_submit():
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.image_file = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Your accont was successfully updated', 'success')
return redirect(url_for('account'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
image_file = url_for('static', filename='Profile_pictures/' + current_user.image_file)
return render_template('account.html', title='Account', image_file=image_file, form=form) | true |
e0b77779fa07555ad30450c54d741181bf02f9e2 | Python | Hitesh-Valecha/Face_detection | /faces-train.py | UTF-8 | 2,356 | 2.8125 | 3 | [
"MIT"
] | permissive | import cv2
import os
import numpy as np
from PIL import Image
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = [] #numbers related to labels
x_train = [] #numbers of actual pixel values
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg"):
path = os.path.join(root, file)
# os.path.dirname(path) = root {you can use root instead}
label = os.path.basename(os.path.dirname(path)).replace(" ", "-").lower()
#print(label, path)
if not label in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
#print(label_ids)
#y_labels.append(label) # some number value for labels
#x_train.append(path) #verify this image, turn into a NUMPY array, turn GRAY
pil_image = Image.open(path).convert("L") # L converts image to grayscale
size = (550, 550)
final_image = pil_image.resize(size, Image.ANTIALIAS)
image_array = np.array(final_image, "uint8")
#print(image_array)
"""every image has pixel values,
first we turn images into grayscale then we turned that grayscale image into a numpy array
and we use that as a list of numbers that are related to this image
and with that then we can actually start training it"""
faces = face_cascade.detectMultiScale(image_array, scaleFactor=1.5, minNeighbors=5)
for (x,y,w,h) in faces:
roi = image_array[y:y+h, x:x+w]
x_train.append(roi)
y_labels.append(id_)
#print(y_labels)
#print(x_train)
#pickle files can be saved with any extension pkg,picle not pickle as compulsory
with open("labels.pickle", 'wb') as f: #f is file & wb is writing bytes
pickle.dump(label_ids, f) #dump label_ids to that file
recognizer.train(x_train, np.array(y_labels))
recognizer.save("trainer.yml") | true |
0440d1977a315e7fd3f58c9c00aea65d43b65004 | Python | khalibartan/Bio-informatics | /spliced_motif.py | UTF-8 | 572 | 2.890625 | 3 | [] | no_license | def lcs(xstr, ystr):
a=xstr+','+ystr
if dic.get(a,0)!=0:
return dic[a]
if not xstr or not ystr:
return ""
x, xs, y, ys = xstr[0], xstr[1:], ystr[0], ystr[1:]
if x == y:
a=xs+','+ys
dic[a]=lcs(xs,ys)
return x + dic[a]
else:
return max(lcs(xstr, ys), lcs(xs, ystr), key=len)
import sys
inputs=sys.stdin.readlines()
l=[]
dic={}
s=''
for line in inputs:
if '>' in line:
if s!='':
l.append(s)
s=''
else:
s+=line.strip()
l.append(s)
print lcs(l[0],l[1])
| true |
51980124bd7bc5b200db86b1aaa51350639f4e73 | Python | CodeForContribute/Algos-DataStructures | /searching&Sorting/merge_sorting.py | UTF-8 | 1,168 | 4.15625 | 4 | [] | no_license | """
Time Complexity in all the cases (worst,average, best ) - O(nlog(n))
Auxiliary Space - O(n)
Algorithmic Paradigm - Divide & Conquer
Sorting in Place : No
Stable: Yes
Application:
1. Can be used in Sorting linked list in O(n(log(n))
as merge sort required space O(1) for merging linked list so efficient.
"""
def MergeSort(arr):
if len(arr)> 1:
mid = len(arr)//2
L = arr[:mid]
R = arr[mid:]
MergeSort(L)
MergeSort(R)
i = j = k =0
while i < len(L) and j < len(R):
if L[i] < R[j]:
arr[k] = L[i]
i += 1
else:
arr[k] = R[j]
j += 1
k += 1
while i < len(L):
arr[k] = L[i]
i += 1
k += 1
while j < len(R):
arr[k] = R[j]
j += 1
k += 1
def print_list(arr):
for i in range(len(arr)):
print(arr[i], end=" ")
if __name__ == '__main__':
arr = [12, 11, 13, 5, 6, 7]
print ("Given array is", end="\n")
print_list(arr)
MergeSort(arr)
print("Sorted array is: ", end="\n")
print_list(arr) | true |
38cbe2db3fa3a39a3c087e837ea0d53ffad0b443 | Python | gmagannaDevelop/ARN.py | /arnstruct/core/structure.py | UTF-8 | 1,689 | 3.671875 | 4 | [
"MIT"
] | permissive | from typing import List, Dict, Union, Set, Optional, NoReturn, Any, Callable, Type
class Structure(object):
"""
Define a
"""
__Character: Set[str] = set("(-)")
__complements: Dict[str, str] = {"(": ")", ")": "(", "-": "-"}
@staticmethod
def is_valid_structure(struct: str) -> bool:
""" Determine if input `struct` is a valid ARN structure """
return set(struct).issubset(Structure.__Character)
def __init__(self, struct: str):
"""Initialise a ARN sequence from a string,
verifying that all letters from the string
are valid ARN bases (A, U, C, G)"""
if not Structure.is_valid_structure(struct):
raise ValueError(
f"Entry sequence contains invalid characters. Valid character is {Structure.__Character}"
)
else:
self._struct: str = struct
@property
def structure(self):
return self._struct
def __iter__(self):
return iter(self._struct)
def motif_search_struct(self, other, th):
"""Search for common motifs between the two ARN and store them in a list"""
if isinstance(other, Structure):
other = other.structure
elif not isinstance(other, str):
raise TypeError(
f"argument other is of type {type(other)}. Please provide a string or Sequence"
)
l_s_motif = []
for i in range(len(self._struct)):
if (
self._struct[i : i + th] in other
and len(self._struct[i : i + th]) == th
):
l_s_motif.append(self._struct[i : i + th])
return l_s_motif
| true |
80878bc65bbdee1eaa9d7582478c90c4c02a4963 | Python | ArjunBhushan/imhungry | /watson.py | UTF-8 | 1,884 | 2.953125 | 3 | [] | no_license | from __future__ import print_function
import json
import re
from watson_developer_cloud import NaturalLanguageUnderstandingV1
from watson_developer_cloud.natural_language_understanding_v1 import Features, EntitiesOptions, SentimentOptions, KeywordsOptions
def getSentient(data):
natural_language_understanding = NaturalLanguageUnderstandingV1(
version='2017-02-27',
username='4cf22873-ff53-4542-a966-0ce007daea71',
password='VM2UsYDKHxVa')
response = natural_language_understanding.analyze(
text=data,
features=Features(entities=EntitiesOptions(), sentiment=SentimentOptions()))
print (json.dumps(response,indent=2))
return (json.dumps(response, indent=2))
def getScore(info):
info = getSentient(info)
a=re.search(r'\b(score)\b', info)
return float(info[a.end()+3:a.end()+7])
def findFood(score):
file = open("sentientdata.txt","w")
if score>=-1 and score<-0.8:
file.write("1500\n")
file.write("icecream,dessert\n")
file.write("1,2\n")
print("Sounds like you need some Ice Cream") #distance: close
elif score>=-0.8 and score<-0.6:
file.write("1500\n")
file.write("coffee,cafe\n")
file.write("1,2\n")
print("Let's go to a coffee shop")
elif score>=-0.6 and score<0.4:
file.write("5000\n")
file.write("burger,hotdog,fastfood\n")
file.write("1,2\n")
print("When in doubt, fast food never hurts") #distance not relavant
elif score>=0.4 and score<0.8:
file.write("5000\n")
file.write("chinese,italian,mexican\n")
file.write("1,2\n")
print("You seem pretty relaxed, how about some takeout")
else:
file.write("20000\n")
file.write("sushi,healthy,steak\n")
file.write("2,3,4\n")
print ("You seem happy, let's celebrate with some good food")
file.close()
| true |
a1d89c0ff8f03f0d5813c6cd4878c8efccb300d7 | Python | shahardekel/Bitcoin-mechanism--miner-vs.-user | /functions.py | UTF-8 | 9,815 | 2.890625 | 3 | [] | no_license | ############################
# Insert your imports here
import os
import pandas as pd
import csv
############## filtering data by current time, return as a dataframe #################
def filter_mempool_data(mempool_data,current_time):
mempool_data=mempool_data.loc[(mempool_data['time']<current_time)&(mempool_data['removed']>current_time)]
return mempool_data
############## creating a list of transactions that entered the block #################
def greedy_knapsack(block_size, all_pending_transactions):
fee_per_byte=all_pending_transactions['fee'] / all_pending_transactions['size']
sorted = all_pending_transactions.assign(f=fee_per_byte).sort_values('f', ascending=[0]).drop('f',axis=1)
tx_list = []
min_size = sorted['size'].min()
for index, row in sorted.iterrows():
if block_size < min_size:
break
if row['size'] < block_size:
tx_list.append(row['TXID'])
block_size = block_size - row['size']
return tx_list
def evaluate_block(tx_list, all_pending_transactions):
r = 0
for id in tx_list:
r += all_pending_transactions[all_pending_transactions['TXID'] == id]['fee'].values
return r[0]
# return a dict of tx_id as keys, for each tx_id its VCG price in satoshi]
def VCG_prices(block_size, tx_list, all_pending_transactions):
#takes 2 minutes
vcg = {}
V_tx_without_i={}
V_tx_excluding_i={}
fees={}
for id in tx_list:
for txid in range(all_pending_transactions.shape[0]):
if id==all_pending_transactions.iloc[txid,0]:
fees[all_pending_transactions.iloc[txid,0]]=all_pending_transactions.iloc[txid,1]
revenue = evaluate_block(tx_list, all_pending_transactions)
for tx in tx_list:
apt_i=all_pending_transactions[all_pending_transactions.TXID != tx]
tx_list_i=greedy_knapsack(block_size,apt_i)
r_i=evaluate_block(tx_list_i,apt_i)
V_tx_without_i[tx]=r_i
V_tx_excluding_i[tx]=revenue-fees[tx]
vcg[tx]=V_tx_without_i[tx]-V_tx_excluding_i[tx]
return vcg
def blocks_after_time_1510266000():
# all_mempool_data = all_mempool_data[all_mempool_data.time < 1510266000]
# all_mempool_data = all_mempool_data[all_mempool_data.removed > 1510266000]
# removed_times = all_mempool_data['removed']
# unique_times = removed_times.unique()
# unique_times.sort()
# return list(unique_times)[0:10]
return [1510266190, 1510266490, 1510267250, 1510267730, 1510267834,
1510268791, 1510269386, 1510269627, 1510270136, 1510270686]
def blocks_by_time_1510266000():
return [1510261800,
1510262800,
1510263100,
1510264600,
1510264700,
1510265400,
1510265600,
1510265900,
1510266200,
1510266500]
#PART B#
def load_my_TXs(my_TXs_full_path):
my_tx = pd.read_csv(my_TXs_full_path)
return my_tx
class BiddingAgent:
def __init__(self, time_begin_lin, time_end_lin, block_size):
self.time_begin_lin = time_begin_lin
self.time_end_lin = time_end_lin
self.block_size = block_size
class SimpleBiddingAgent(BiddingAgent):
def bid(self, TX_min_value, TX_max_value, TX_size, current_mempool_data, current_time):
mid_value = (TX_min_value + TX_max_value) / 2
bid = mid_value
if TX_size > 1000:
bid = bid * 1.2
return bid
class ForwardBiddingAgent(BiddingAgent):
def bid(self, TX_min_value, TX_max_value, TX_size, current_mempool_data, current_time):
#takes 21 minutes
## IMPLEMENT for Forward_agent part ##
t_min=self.time_begin_lin
t_max=self.time_end_lin
block_size=self.block_size
fee_per_byte = current_mempool_data['fee'] / current_mempool_data['size']
cmd_with_fpb = current_mempool_data.assign(f=fee_per_byte)
blocks={}
i=0
while cmd_with_fpb.empty==False and i<=t_max/60:
tx_list=greedy_knapsack(block_size,cmd_with_fpb)
blocks[i]=list()
for id in tx_list:
blocks[i].append(cmd_with_fpb[cmd_with_fpb['TXID'] == id]['f'].values[0])
cmd_with_fpb= cmd_with_fpb[cmd_with_fpb['TXID'] != id]
i+=1
t_z={}
#calculate t(z)
for z in range(5, 1000, 5):
for i in blocks.keys():
if min(blocks[i])<=z:
t_z[z]=i*60
break
dict_fee={}
dict_time={}
dict_gu={}
for z in t_z.keys():
value = 0
if (t_z[z]<t_min):
value=TX_max_value
elif t_z[z]>=t_min and t_z[z]<t_max:
value=TX_max_value-(((TX_max_value-TX_min_value)/(t_max-t_min))*(t_z[z]-t_min))
elif t_z[z]>=t_max:
value=0
#calculate fee
if TX_size<block_size:
fee_z = z * TX_size
else:
fee_z=0
#calculate GU
GU=value-fee_z
dict_fee[z]=fee_z
dict_time[z]=t_z[z]
dict_gu[z]=GU
#check for max GU
max_GU = max(dict_gu, key=dict_gu.get)
bid_best_z = dict_fee[max_GU]
time_if_z = dict_time[max_GU]
utility_if_z = dict_gu[max_GU]
if utility_if_z<=0:
utility_if_z=0
bid_best_z=-1
time_if_z=-1
return bid_best_z, time_if_z, utility_if_z
class CompetitiveBiddingAgent(BiddingAgent):
def bid(self, TX_min_value, TX_max_value, TX_size, current_mempool_data, current_time):
## IMPLEMENT for competitive part ##
#print("competitive starting")
t_min = self.time_begin_lin
t_max = self.time_end_lin
block_size = self.block_size
t0 = 1510262000
t_star = 1510268001
fee_per_byte = current_mempool_data['fee'] / current_mempool_data['size']
mempool_data = current_mempool_data.loc[
((current_mempool_data['time'] >= t0) & (current_mempool_data['time'] <= t_star))
| ((current_mempool_data['time'] <= t0) &
(current_mempool_data['removed'] - current_mempool_data['time'] < 1800))]
cmd_with_fpb = mempool_data.assign(f=fee_per_byte).drop('removed', axis=1)
blocks = {}
block_i_size={}
i = 0
while cmd_with_fpb.empty == False and i <= t_max / 60:
tx_list = greedy_knapsack(block_size, cmd_with_fpb)
blocks[i] = list()
sum=0
for id in tx_list:
sum+=cmd_with_fpb[cmd_with_fpb['TXID'] == id]['size'].values[0]
blocks[i].append(cmd_with_fpb[cmd_with_fpb['TXID'] == id]['f'].values[0])
cmd_with_fpb = cmd_with_fpb[cmd_with_fpb['TXID'] != id]
block_i_size[i]=sum
i += 1
t_z = {}
# calculate t(z)
for z in range(5, 1000):
for i in blocks.keys():
if TX_size+block_i_size[i]<=block_size:
t_z[z]=i*60
break
elif min(blocks[i]) <= z:
t_z[z] = i * 60
break
dict_fee = {}
dict_gu = {}
for z in t_z.keys():
value = 0
if (t_z[z] < t_min):
value = TX_max_value
elif t_z[z] >= t_min and t_z[z] < t_max:
value = TX_max_value - (((TX_max_value - TX_min_value) / (t_max - t_min)) * (t_z[z] - t_min))
elif t_z[z] >= t_max:
value = 0
# calculate fee
if TX_size < block_size:
fee_z = z * TX_size
else:
fee_z = 0
# calculate GU
GU = value - fee_z
dict_fee[z] = fee_z
dict_gu[z] = GU
# check for max GU
max_GU = max(dict_gu, key=dict_gu.get)
fee_comp = dict_fee[max_GU]
GU_comp = dict_gu[max_GU]
if GU_comp <= 0:
GU_comp = 0
fee_comp = 0
min_val = TX_min_value
max_val = TX_max_value
tx_size = TX_size
curr_time = current_time
curr_mempool=current_mempool_data
forwardAgent = ForwardBiddingAgent(t_min, t_max, block_size)
fee_forward, t_forward, GU_forward = forwardAgent.bid(min_val, max_val, tx_size, curr_mempool, curr_time)
if GU_comp < GU_forward:
fee_comp = fee_forward
if fee_comp == -1:
fee_comp = 0
bid_competitive=fee_comp
return bid_competitive
def write_file_ForwardAgent(tx_num,time_list,bid,utility_list):
"""writing lists to a csv files"""
filename = 'hw2_ForwardAgent.csv'
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file, lineterminator='\n')
fieldnames2 = ["Index","Time", "Bid", "Utility"]
writer.writerow(fieldnames2)
for i in range(len(utility_list)):
writer.writerow([tx_num[i],time_list[i], bid[i], utility_list[i]])
def write_file_CompetitiveAgent(tx_num,competitive_bid):
"""writing lists to a csv files"""
filename = 'hw2_CompetitiveAgent.csv'
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file, lineterminator='\n')
fieldnames2 = ["Index","Bid"]
writer.writerow(fieldnames2)
for i in range(len(competitive_bid)):
writer.writerow([tx_num[i],competitive_bid[i]]) | true |
6abe2f9ea7f22602ad661b4a40c18284864f021a | Python | navpreetsingh7/Assignments | /Assignment12.py | UTF-8 | 806 | 3.484375 | 3 | [] | no_license | #A12_Q1
try:
a=3
if a<4:
a=a/(a-3)
print(a)
except ZeroDivisionError:
print("num cannot be divided by zero")
#A12_Q2
try:
a = [1,2,3]
print(a[4])
except IndexError:
print("limit exceed")
#A12_Q3
#output:"AN EXCEPTION"
#A12_Q4
#OUTPUT:-5
#"a/b resut in 0"
#A12_Q5
try:
import abcff
print("nnn")
except ImportError:
print("import error")
try:
a=int(input("enter no:"))
print(a)
b=10/a
print(b)
except ValueError:
print("enter only integer")
try:
a = [1,2,3]
print(a[4])
except IndexError:
print("limit exceed")
#A12_Q6
class AgeTooSmallError(Exception):
pass
try:
age=int(input("enter age"))
print(age)
if age<18:
raise AgeTooSmallError
except AgeTooSmallError:
print("age is too small")
| true |
4c825914b84315e2da065b2eaa09efa14ecaa49c | Python | cjj208/bt-ctpbee-store | /samples/sample.py | UTF-8 | 4,638 | 2.515625 | 3 | [] | no_license | import json
import backtrader as bt
from ctpbeebt import CTPStore
from datetime import datetime, time
#Origin定义不要删除,ctpbee接口需要它
class Origin:
"""
"""
def __init__(self, data):
self.symbol = data._dataname.split('.')[0]
self.exchange = data._name.split('.')[1]
#说明在交易日上午8点45到下午3点,以及晚上8点45到凌晨2点45分,可进行实时行情模拟交易。
#中国期货交易时段(日盘/夜盘),只有在交易时段才能进行实时模拟仿真,其他时段只能进行非实时模拟仿真。双休日不能进行模拟仿真
DAY_START = time(8, 45) #日盘8点45开始
DAY_END = time(15, 0) #下午3点结束
NIGHT_START = time(20, 45) #夜盘晚上8点45开始
NIGHT_END = time(2, 45) #凌晨2点45结束
#是否在交易时段
def is_trading_period():
"""
"""
current_time = datetime.now().time()
trading = False
if ((current_time >= DAY_START and current_time <= DAY_END)
or (current_time >= NIGHT_START)
or (current_time <= NIGHT_END)):
trading = True
return trading
class SmaCross(bt.Strategy):
lines = ('sma',)
params = dict(
smaperiod=5,
store=None,
)
def __init__(self):
self.beeapi = self.p.store.main_ctpbee_api
self.buy_order = None
self.live_data = False
#self.move_average = bt.ind.MovingAverageSimple(self.data, period=self.params.smaperiod)
def prenext(self):
print('in prenext')
for d in self.datas:
print(d._name, d.datetime.datetime(0), 'o h l c ', d.open[0], d.high[0], d.low[0], d.close[0], ' vol ', d.volume[0])
def next(self):
print('------------------------------------------ next start')
for d in self.datas:
print('d._name', d._name, 'd._dataname', d._dataname, d.datetime.datetime(0), 'o h l c ', d.open[0], d.high[0], d.low[0], d.close[0], ' vol ', d.volume[0])
pos = self.beeapi.app.center.get_position(d._dataname)
print('position', pos)
#可以访问持仓、成交、订单等各种实盘信息,如何访问参考http://docs.ctpbee.com/modules/rec.html
trades = self.beeapi.app.center.trades
print('trades', trades)
account = self.beeapi.app.center.account
print('account', account)
if not self.live_data: #不是实时数据(还处于历史数据回填中),不进入下单逻辑
return
#开多仓
print('live buy')
#self.open_long(self.data0.close[0] + 3, 1, self.data0)
print('---------------------------------------------------')
def notify_order(self, order):
print('订单状态 %s' % order.getstatusname())
def notify_data(self, data, status, *args, **kwargs):
dn = data._name
dt = datetime.now()
msg = f'notify_data Data Status: {data._getstatusname(status)}'
print(dt, dn, msg)
if data._getstatusname(status) == 'LIVE':
self.live_data = True
else:
self.live_data = False
#以下是下单函数
def open_long(self, price, size, data):
self.beeapi.action.buy(price, size, Origin(data))
def open_short(self, price, size, data):
self.beeapi.action.short(price, size, Origin(data))
def close_long(self, price, size, data):
self.beeapi.action.cover(price, size, Origin(data))
def close_short(self, price, size, data):
self.beeapi.action.sell(price, size, Origin(data))
#主程序开始
if __name__ == '__main__':
with open('./params.json', 'r') as f:
ctp_setting = json.load(f)
cerebro = bt.Cerebro(live=True)
store = CTPStore(ctp_setting, debug=True)
cerebro.addstrategy(SmaCross, store=store)
#由于历史回填数据从akshare拿,最细1分钟bar,所以以下实盘也只接收1分钟bar
#https://www.akshare.xyz/zh_CN/latest/data/futures/futures.html#id106
data0 = store.getdata(dataname='ag2112.SHFE', timeframe=bt.TimeFrame.Minutes, #注意符号必须带交易所代码。
num_init_backfill=100 if is_trading_period() else 0) #初始回填bar数,使用TEST服务器进行模拟实盘时,要设为0
data1 = store.getdata(dataname='rb2201.SHFE', timeframe=bt.TimeFrame.Minutes, #注意符号必须带交易所代码。
num_init_backfill=100 if is_trading_period() else 0) #初始回填bar数,使用TEST服务器进行模拟实盘时,要设为0
cerebro.adddata(data0)
cerebro.adddata(data1)
cerebro.run()
| true |
3b3e1ddabc9eb99cd0f70fec8925665ad2e0ee74 | Python | ChanDanchen/weibo | /weibo_analysis_and_visualization/map.py | UTF-8 | 7,226 | 2.515625 | 3 | [
"MIT"
] | permissive | from pyecharts import options as opts
from pyecharts.charts import Geo, Page
from pyecharts.faker import Collector
from pyecharts.globals import ChartType, SymbolType
import numpy as np
import pickle
content_comment = pickle.load(open('Agu.pkl', 'rb'))
provin = '北京市,天津市,上海市,重庆市,河北省,山西省,辽宁省,吉林省,黑龙江省,江苏省,浙江省,安徽省,福建省,江西省,山东省,河南省,湖北省,湖南省,广东省,海南省,四川省,贵州省,云南省,陕西省,甘肃省,青海省,台湾省,内蒙古自治区,广西壮族自治区,西藏自治区,宁夏回族自治区,新疆维吾尔自治区,香港特别行政区,澳门特别行政区,北京,天津,上海,重庆,河北,山西,辽宁,吉林,黑龙江,江苏,浙江,安徽,福建,江西,山东,河南,湖北,湖南,广东,海南,四川,贵州,云南,陕西,甘肃,青海,台湾,内蒙古,广西,西藏,宁夏,新疆,香港,澳门'
provin = provin.split(',')
count = {}
for i in content_comment:
print(i)
for word in i[1]:
if word in provin:
count[word] = count.get(word, 0) + 1
count2 = {}
provin1 = '北京,天津,上海,重庆,河北,山西,辽宁,吉林,黑龙江,江苏,浙江,安徽,福建,江西,山东,河南,湖北,湖南,广东,海南,四川,贵州,云南,陕西,甘肃,青海,台湾,内蒙古,广西,西藏,宁夏,新疆,香港,澳门'
provin2 = '北京市,天津市,上海市,重庆市,河北省,山西省,辽宁省,吉林省,黑龙江省,江苏省,浙江省,安徽省,福建省,江西省,山东省,河南省,湖北省,湖南省,广东省,海南省,四川省,贵州省,云南省,陕西省,甘肃省,青海省,台湾省,内蒙古自治区,广西壮族自治区,西藏自治区,宁夏回族自治区,新疆维吾尔自治区,香港特别行政区,澳门特别行政区'
provin1 = provin1.split(',')
provin2 = provin2.split(',')
# 将上海和上海市,北京和北京市等进行汇总
for i in zip(provin1, provin2):
count2[i[0]] = count.get(i[0], 0) + count.get(i[1], 0)
values = []
for i in count2.values():
values.append(i)
# 将所有城市的热度值做归一化处理,按照热度最大城市为100进行同比例调整
values = np.array(values)
try:
Max = values.max()
Min = values.min()
if Max != Min:
values = ((values - Min) / (Max - Min) * 100).astype(np.int)
else:
pass
except:
pass
values = values.tolist()
provin_list = []
for key, value in zip(provin1, values):
list1 = [key, value]
provin_list.append(list1)
print(provin_list)
# provin_list = []
# for key, value in count2.items():
# list1 = [key, value]
# provin_list.append(list1)
##############################################
# 上面是数据预处理,下面是作图
C = Collector()
@C.funcs
def geo_base() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("城市", provin_list)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="微博城市热度分析"),
)
)
return c
@C.funcs
def geo_visualmap_piecewise() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add("城市", provin_list)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True),
title_opts=opts.TitleOpts(title="微博城市热度分析"),
)
)
return c
@C.funcs
def geo_effectscatter() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"城市",
provin_list,
type_=ChartType.EFFECT_SCATTER,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(title_opts=opts.TitleOpts(title="微博城市热度分析"))
)
return c
@C.funcs
def geo_heatmap() -> Geo:
c = (
Geo()
.add_schema(maptype="china")
.add(
"城市",
provin_list,
type_=ChartType.HEATMAP,
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(),
title_opts=opts.TitleOpts(title="微博城市热度分析"),
)
)
return c
# @C.funcs
# def geo_guangdong() -> Geo:
# c = (
# Geo()
# .add_schema(maptype="广东")
# .add(
# "geo",
# [list(z) for z in zip(Faker.guangdong_city, Faker.values())],
# type_=ChartType.HEATMAP,
# )
# .set_series_opts(label_opts=opts.LabelOpts(is_show=False))
# .set_global_opts(
# visualmap_opts=opts.VisualMapOpts(),
# title_opts=opts.TitleOpts(title="Geo-广东地图"),
# )
# )
# return c
# @C.funcs
# def geo_lines() -> Geo:
# c = (
# Geo()
# .add_schema(maptype="china")
# .add(
# "",
# [("广州", 55), ("北京", 66), ("杭州", 77), ("重庆", 88)],
# type_=ChartType.EFFECT_SCATTER,
# color="white",
# )
# .add(
# "geo",
# [("广州", "上海"), ("广州", "北京"), ("广州", "杭州"), ("广州", "重庆")],
# type_=ChartType.LINES,
# effect_opts=opts.EffectOpts(
# symbol=SymbolType.ARROW, symbol_size=6, color="blue"
# ),
# linestyle_opts=opts.LineStyleOpts(curve=0.2),
# )
# .set_series_opts(label_opts=opts.LabelOpts(is_show=False))
# .set_global_opts(title_opts=opts.TitleOpts(title="Geo-Lines"))
# )
# return c
#
#
# @C.funcs
# def geo_lines_background() -> Geo:
# c = (
# Geo()
# .add_schema(
# maptype="china",
# itemstyle_opts=opts.ItemStyleOpts(color="#323c48", border_color="#111"),
# )
# .add(
# "",
# [("广州", 55), ("北京", 66), ("杭州", 77), ("重庆", 88)],
# type_=ChartType.EFFECT_SCATTER,
# color="white",
# )
# .add(
# "geo",
# [("广州", "上海"), ("广州", "北京"), ("广州", "杭州"), ("广州", "重庆")],
# type_=ChartType.LINES,
# effect_opts=opts.EffectOpts(
# symbol=SymbolType.ARROW, symbol_size=6, color="blue"
# ),
# linestyle_opts=opts.LineStyleOpts(curve=0.2),
# )
# .set_series_opts(label_opts=opts.LabelOpts(is_show=False))
# .set_global_opts(title_opts=opts.TitleOpts(title="Geo-Lines-background"))
# )
# return c
Page().add(*[fn() for fn, _ in C.charts]).render(u'./map.html')
# print([list(z) for z in zip(Faker.provinces, Faker.values())])
# print(Faker.provinces)
# print(type(Faker.provinces))
| true |
86aebb7e4b489542b959442fda86bf74888ef938 | Python | greg-norton/GameShazy | /TestLibrary.py | UTF-8 | 1,335 | 2.640625 | 3 | [] | no_license | import unittest
import pygame
import math
import copy
import os
import library
class TestLibrary(unittest.TestCase):
def test_load_text(self):
filename = 'resources/event_scrolls/credits.asset'
with open(filename) as f:
expected = f.readlines()
result = library.load_text(filename)
self.assertEqual(result, expected)
filename = 'thisfiledoesnotexist.asset'
flag = False
try:
result = library.load_text(filename)
except FileNotFoundError as fnfe:
flag = True
finally:
self.assertTrue(flag)
filename = None
flag = False
try:
result = library.load_text(filename)
except TypeError as te:
flag = True
finally:
self.assertTrue(flag)
def test_load_sound(self):
pass
def test_load_background_music(self):
pass
def test_load_image(self):
pass
def test_draw_text(self):
pass
def test_draw_vertical_bar(self):
pass
def test_draw_boss_bar(self):
pass
def test_draw_player_lives(self):
pass
def test_draw_bombs_remaining(self):
pass
def test_draw_button(self):
pass
if __name__ == "__main__":
unittest.main(exit=False) | true |
603c2771f62188efd75c1d8471cb09ce8aced09c | Python | julienGautier77/motors | /moteurRSAI.py | UTF-8 | 7,867 | 2.59375 | 3 | [] | no_license | # -*- coding: UTF-8
"""
Pilotage des controleurs RSAI via les ddl PilmotTango.dll et openMD.dll
python 3.X pyQT6
system 64 bit (at least python MSC v.1900 64 bit (Intel))
@author: Gautier julien loa
Created on Tue Jan 4 10:42:10 2018
modified on Tue May 23 15:49:32 2023
"""
#%% Imports
import ctypes
import time
import sys
import logging
try:
from PyQt6.QtCore import QSettings
except:
print('erro pyQT6 import)')
#%% DLL
if sys.maxsize <2**32:
print('you are using a 32 bits version of python use 64 bits or change RSAI dll')
dll_file = 'DLL/PilMotTango.dll'
#modbus_file='DLL/OpenModbus.dll'
try:
#PilMot=ctypes.windll.PilMotTango # Chargement de la dll PilMotTango et OpenMD .dll
PilMot = ctypes.windll.LoadLibrary(dll_file)
#modbus=ctypes.windll.LoadLibrary(modbus_file)
except AttributeError as s:
print('########################################################')
print("Error when loading the dll file : %s" % dll_file)
print("Error : %s" % s)
print("PilMot() is then a dummy class.")
print('########################################################')
class PilMot():
""" dummy class """
def rEtatConnexion(i):
return i
def Start(i, s):
return 0
def rPositionMot(i, j):
return 10.
def wCdeMot(i, j, k, l, m):
return
def wPositionMot(i, j, k):
return
def rEtatMoteur(i, j):
return 0
def Stop():
return 0
#%% ENTREE
# liste adresse IP des modules in class MotorRSAI self.numEsim is the number of the rack in this list (start from 0)
IP = b"10.0.5.10\0 "
IPs_C = ctypes.create_string_buffer(IP, 16) # permet d avoir la liste comme demander dans la dll Here 10.0.5.10 numEsim =0
# for 2 racks
#IP = b"10.0.5.10\0 10.0.5.11\0 "
#IPs_C = ctypes.create_string_buffer(IP, 32) # permet d avoir la liste comme demander dans la dll Here 10.0.5.10 numEsim =0 Here 10.0.5.11 numEsim =1
#conf = QSettings(QSettings.IniFormat, QSettings.UserScope, "configMoteur", "configMoteurRSAI")
confRSAI = QSettings('fichiersConfig/configMoteurRSAI.ini', QSettings.Format.IniFormat)
#%% Functions connections RSAI
def startConnexion():
""" Ouverture d'une connexion avec les racks RSAI """
print("RSAI initialisation ...")
argout = 0
argoutetat = PilMot.rEtatConnexion( ctypes.c_int16(0) ) # numero equipement
if argoutetat != 3:
argout = PilMot.Start(ctypes.c_int(3), IPs_C) # nb equipement , liste IP
if argout == 1 :
print('RSAI connection : OK RSAI connected @\n', IP)
else:
print('RSAI connexion failed')
return argout
def stopConnexion():
""" arret des connexions """
print('RSAI connexion stopped ')
PilMot.Stop() # arret de toute les connexion
def testConnection():
argout = PilMot.rEtatConnexion(ctypes.c_int16(0)) # numero equipement
if argout == 3:
print('Test connection OK')
elif argout == 1:
print('Already connected at \n', IP)
else :
print('Test connexion failed')
return argout
#%% class MOTORSAI
class MOTORRSAI():
def __init__(self, mot1='',parent=None):
# RSAI motor class for 1 motor defined by mot1=str()
#mot1=name of the motor (group name) in the ini file return all properties (numesim,nunMoteur , value of step , ref position, user name )
# ini file is the folder :fichiersConfig/configMoteurRSAI.ini
self.moteurname=mot1
try:
self.numEsim=ctypes.c_int16(int(confRSAI.value(self.moteurname+'/numESim')))
except:
print('configuration file error : motor name or motortype is not correct ')
try:
self.numMoteur=ctypes.c_int16(int(confRSAI.value(self.moteurname+'/numMoteur')) )
except:
print('configuration file error : motor name or motortype is not correct ')
# sys.exit()
date=time.strftime("%Y_%m_%d")
fileNameLog='motorLog/logMotor_'+date+'.log'
logging.basicConfig(filename=fileNameLog, encoding='utf-8', level=logging.INFO,format='%(asctime)s %(message)s')
def stopMotor(self): # stop le moteur motor
""" stopMotor(motor): stop le moteur motor """
regCde = ctypes.c_uint(8) # 8 commande pour arreter le moteur
PilMot.wCdeMot( self.numEsim , self.numMoteur, regCde, 0, 0)
print("Stop")
def move(self, pos, vitesse=10000):
"""
move(self.moteurname,pos,vitesse): mouvement absolu du moteur (motor) a la position pos avec la vitesse vitesse
"""
regCde = ctypes.c_uint(2) # commande mvt absolue
posi = ctypes.c_int(int(pos))
vit = ctypes.c_int( int(vitesse) )
print(time.strftime("%A %d %B %Y %H:%M:%S"))
print(self.moteurname, "position before ", self.position(), "(step)")
PilMot.wCdeMot(self.numEsim , self.numMoteur, regCde, posi, vit)
print(self.moteurname, "move to", pos, "(step)")
tx='motor ' +self.moteurname +' absolute move to ' + str(pos) + ' step ' + ' position is : ' + str(self.position())+'step'
logging.info(tx)
def rmove(self, posrelatif, vitesse=1000):
"""
rmove(motor,posrelatif,vitesse): : mouvement relatif du moteur (motor) a la position posrelatif avec la vitesse vitesse
"""
regCde = ctypes.c_uint(2) # commande mvt absolue
posActuel = self.position()
print(time.strftime("%A %d %B %Y"))
print(self.moteurname,"position before ",posActuel,"(step)")
pos = int(posActuel+posrelatif)
posi = ctypes.c_int(pos)
vit = ctypes.c_int(int(vitesse))
PilMot.wCdeMot(self.numEsim , self.numMoteur, regCde, posi, vit)
print(self.moteurname, "relative move of", posrelatif, "(step)")
tx='motor ' +self.moteurname +' rmove of ' + str(posrelatif) + ' step ' + ' position is : ' + str(self.position())+'step'
logging.info(tx)
def setzero(self):
"""
## setzero(self.moteurname):Set Zero
"""
regCde=ctypes.c_int(1024) # commande pour zero le moteur (2^10)
a=PilMot.wCdeMot(self.numEsim , self.numMoteur,regCde,ctypes.c_int(0),ctypes.c_int(0))
print (self.moteurname,"zero set",a)
tx='motor '+ self.moteurname + 'set to : ' + ' '+ str(0)
logging.info(tx)
#%% Functions ETAT Moteur
def etatMotor(self):
""" Etat du moteur (alimentation on/off)
a verifier certain etat manque ...
"""
a=PilMot.rEtatMoteur(self.numEsim , self.numMoteur)
a=hex(a)
# print('mot err',a)
if a=='0x2030' or a=='0x30' :
etat='mvt'
elif a=='0x2012' or a=='0x12' or a=='0x92' or a=='0x2082':
etat='FDC-'
elif a=='0x2011' or a=='0x11' or a=='0x91' or a=='0x2082':
etat='FDC+'
elif a=='0x2010' or a=='0x10' :
etat='ok'
elif a=='0x2090' or a=='0x90' :
etat='ok'
elif a=='0x2090' or a=='0x90' :
etat='ok'
elif a=='0x890' or a=='0x2890' :
etat='Power off'
else:
etat='?'
return etat
def position(self):
""" position (self.moteurname) : donne la postion de motor """
pos=PilMot.rPositionMot(self.numEsim , self.numMoteur) # lecture position theorique en nb pas
return pos
#%% Demarage connexion
startConnexion()
#%%
if __name__ == "__main__":
print("test")
#startConnexion()
| true |
1dfa94dad2d719f523418c02a29f8a39ae79b9dd | Python | DylanMeeus/CompetitiveProgramming3 | /UVa/chapter1/11799.py | UTF-8 | 303 | 3.125 | 3 | [] | no_license | import sys
if __name__ == '__main__':
case = 0
for line in sys.stdin:
if case == 0:
# skip the first one
case += 1
continue
m = max(list(map(lambda k: int(k), line.split(" "))))
print("Case {}: {}".format(case,m))
case += 1
| true |
684f2492477577ec369174cf3ba015e49fa26111 | Python | taku-y/pymc3 | /pymc3/glm/families.py | UTF-8 | 3,175 | 2.796875 | 3 | [
"AFL-2.1",
"Apache-2.0"
] | permissive | import numbers
from copy import copy
import theano.tensor as tt
from ..model import modelcontext
from .. import distributions as pm_dists
__all__ = ['Normal', 'StudentT', 'Binomial', 'Poisson', 'NegativeBinomial']
# Define link functions
# Hack as assigning a function in the class definition automatically binds
# it as a method.
class Identity():
def __call__(self, x):
return x
identity = Identity()
logit = tt.nnet.sigmoid
inverse = tt.inv
exp = tt.exp
class Family(object):
"""Base class for Family of likelihood distribution and link functions.
"""
priors = {}
link = None
def __init__(self, **kwargs):
# Overwrite defaults
for key, val in kwargs.items():
if key == 'priors':
self.priors = copy(self.priors)
self.priors.update(val)
else:
setattr(self, key, val)
def _get_priors(self, model=None, name=''):
"""Return prior distributions of the likelihood.
Returns
-------
dict : mapping name -> pymc3 distribution
"""
if name:
name = '{}_'.format(name)
model = modelcontext(model)
priors = {}
for key, val in self.priors.items():
if isinstance(val, numbers.Number):
priors[key] = val
else:
priors[key] = model.Var('{}{}'.format(name, key), val)
return priors
def create_likelihood(self, name, y_est, y_data, model=None):
"""Create likelihood distribution of observed data.
Parameters
----------
y_est : theano.tensor
Estimate of dependent variable
y_data : array
Observed dependent variable
"""
priors = self._get_priors(model=model, name=name)
# Wrap y_est in link function
priors[self.parent] = self.link(y_est)
if name:
name = '{}_'.format(name)
return self.likelihood('{}y'.format(name), observed=y_data, **priors)
def __repr__(self):
return """Family {klass}:
Likelihood : {likelihood}({parent})
Priors : {priors}
Link function: {link}.""".format(klass=self.__class__, likelihood=self.likelihood.__name__, parent=self.parent, priors=self.priors, link=self.link)
class StudentT(Family):
link = identity
likelihood = pm_dists.StudentT
parent = 'mu'
priors = {'lam': pm_dists.HalfCauchy.dist(beta=10, testval=1.),
'nu': 1}
class Normal(Family):
link = identity
likelihood = pm_dists.Normal
parent = 'mu'
priors = {'sd': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
class Binomial(Family):
link = logit
likelihood = pm_dists.Bernoulli
parent = 'p'
class Poisson(Family):
link = exp
likelihood = pm_dists.Poisson
parent = 'mu'
priors = {'mu': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
class NegativeBinomial(Family):
link = exp
likelihood = pm_dists.NegativeBinomial
parent = 'mu'
priors = {'mu': pm_dists.HalfCauchy.dist(beta=10, testval=1.),
'alpha': pm_dists.HalfCauchy.dist(beta=10, testval=1.)}
| true |
360fcb8f46b2d11b5e3453bdb8a2c5c11837570b | Python | JesseDiGiacomo/PersonalCourseFiles | /CursoPython/mindstorms.py | UTF-8 | 1,555 | 4.03125 | 4 | [] | no_license | import turtle
def draw_mandala ():
#Funcao desenha uma mandala com o quadrado.
mandala = turtle.Turtle()
mandala.shape ("turtle")
mandala.color ("green")
mandala.speed (10)
cnt = 0
i = 0
while (cnt < 36): #loop para rotacionar 360 graus em incrementos de 10
if (i<4): #desenha o quadrado
mandala.forward (100)
mandala.right (90)
i = i + 1
else:
mandala.right (10) #inclina o desenho
cnt = cnt + 1
i = 0
def draw_square ():
#Funcao desenha um quadrado
square = turtle.Turtle()
square.shape ("turtle")
square.color ("black")
square.speed (10)
i=0
while (i<4):
square.forward (100)
square.right (90)
i = i + 1
def draw_circle ():
#Funcao desenha um circulo
circle = turtle.Turtle()
circle.shape ("arrow")
circle.color ("blue")
circle.speed (10)
circle.circle (100)
def draw_triangle ():
#Funcao desenha um triangulo
tri = turtle.Turtle()
tri.shape ("arrow")
tri.color ("white")
tri.speed (10)
i=0
while (i<3):
tri.forward (100)
tri.right (120)
i = i + 1
def main ():
#Funcao principal.
#Desenha o canvas, chamas as demais funcoes, e permite a finalizacao ao final
window = turtle.Screen ()
window.bgcolor ("red")
#draw_square ()
#draw_circle ()
#draw_triangle ()
draw_mandala ()
window.exitonclick()
main()
| true |
8c52a126c97868edcf89fc1a2e972d075d9a5efb | Python | 7enTropy7/RSA_cryptography | /Decryption_Server.py | UTF-8 | 3,169 | 2.96875 | 3 | [
"MIT"
] | permissive | from __future__ import unicode_literals
import socket
from math import sqrt
import random
from random import randint as rand
import pickle
host = socket.gethostname()
port = 5000
s = socket.socket()
s.bind((host, port))
s.listen(2)
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
def mod_inverse(a, m):
for x in range(1, m):
if (a * x) % m == 1:
return x
return -1
def isprime(n):
if n < 2:
return False
elif n == 2:
return True
else:
for i in range(1, int(sqrt(n)) + 1):
if n % i == 0:
return False
return True
#initial two random numbers p,q
p = rand(1, 1000)
q = rand(1, 1000)
def generate_keypair(p, q,keysize):
# keysize is the bit length of n so it must be in range(nMin,nMax+1).
# << is bitwise operator
# x << y is same as multiplying x by 2**y
# i am doing this so that p and q values have similar bit-length.
# this will generate an n value that's hard to factorize into p and q.
nMin = 1<<(keysize-1)
nMax = (1<<keysize) - 1
primes=[2]
# we choose two prime numbers in range(start, stop) so that the difference of bit lengths is at most 2.
start = 1<<(keysize//2-1)
stop = 1<<(keysize//2+1)
if start >= stop:
return []
for i in range(3, stop + 1, 2):
for p in primes:
if i % p == 0:
break
else:
primes.append(i)
while(primes and primes[0] < start):
del primes[0]
#choosing p and q from the generated prime numbers.
while primes:
p = random.choice(primes)
primes.remove(p)
q_values = [q for q in primes if nMin <= p * q <= nMax]
if q_values:
q = random.choice(q_values)
break
n = p * q
phi = (p - 1) * (q - 1)
#generate public key 1<e<phi(n)
e = random.randrange(1, phi)
g = gcd(e, phi)
#as long as gcd(1,phi(n)) is not 1, keep generating e
while True:
e = random.randrange(1, phi)
g = gcd(e, phi)
#generate private key
d = mod_inverse(e, phi)
if g==1 and e!=d:
break
#public key (e,n)
#private key (d,n)
return ((e, n), (d, n))
def decrypt(msg_ciphertext, package):
d, n = package
msg_plaintext = [chr(pow(c, d, n)) for c in msg_ciphertext]
# No need to use ord() since c is now a number
# After decryption, we cast it back to character
# to be joined in a string for the final result
return (''.join(msg_plaintext))
public, private = generate_keypair(p, q, 8)
print(host)
conn, address = s.accept()
print("Connected to: " + str(address))
conn.send(str(public[0]).encode())
conn.send(str(public[1]).encode())
print("Public Key: ",public)
while True:
encoded_data = pickle.loads(conn.recv(1024*4))
for i in range(len(encoded_data)):
encoded_data[i]=int(encoded_data[i])
if not encoded_data:
break
#print(''.join(map(lambda x: str(x), encoded_data)))
decoded_data = decrypt(encoded_data, private)
print("Client : " + str(decoded_data))
conn.close()
| true |
e587f36410c9f3ad8528beb5c6efc7f2a19530f9 | Python | barbocz/MachineLearning | /NeuralNetworks/multi_class_classifier.py | UTF-8 | 1,083 | 2.515625 | 3 | [] | no_license | import numpy as np
import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
import pandas as pd
# https://www.kaggle.com/datamunge/sign-language-mnist
train_data=pd.read_csv('sign_mnist_test.csv')
print(train_data.shape)
train_data_splitted=np.hsplit(train_data,[1])
labels=train_data_splitted[0].to_numpy()
images=train_data_splitted[1].to_numpy().reshape(labels.size,28,28)
print(np.unique(labels).size)
print(labels.shape)
print(images.shape)
images = np.expand_dims(images, axis=3)
labels = np.expand_dims(labels, axis=3)
# x = np.arange(40.0).reshape(8, 5)
# print(x)
# sx=np.hsplit(x,[1])
# print(np.unique(sx[0]).size)
# print(sx[1].reshape(8,2,2))
training_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
trd=training_datagen.flow(
images,
labels,
batch_size=28
)
print('ok') | true |
1d9eb9e8bce4c5aefdacfbd33ca49f8d92f4dd40 | Python | greenfox-zerda-lasers/h-attila | /week-04/day-4/test.py | UTF-8 | 88 | 3.03125 | 3 | [] | no_license | list = [1, 2, [3, 4], 1, [1, [2, 4]]]
if type(list[1]) == type([]):
print('hello') | true |
69ac8aef2091cc1762ea5fd8391655f2e7bb02c8 | Python | santigijon/Environmental-Sound-Classification-Using-Convolutional-Neural-Networks | /santiago_data_preprocessing.py | UTF-8 | 13,793 | 3.140625 | 3 | [] | no_license |
######################################################################################################
############ A MODULE WHERE THE NECESSARY FUNCTIONS TO PROCESS THE AUDIO FILES ARE INCLUDED ##########
######################################################################################################
# All rights reserved to: Santiago Álvarez-Buylla Puente
# Last modification: 24/06/2018
import numpy as np
import librosa
import math
def extend_file(input_file, sampling_rate, desired_length_seconds):
#A helper function to extend shorter-than-desired-duration audio files into a desired-duration file. It appends the same
#audio file at the end until achieving desired length
#Inputs:
# input_file - 1D array of the audio wav file (resampled)
# sampling_rate - sampling rate of the file
# desired_file_duration - desired length of the audio files (in seconds)
#Returns:
# extended_file, an array containing the extended input_file
desired_length_samples = int(sampling_rate * desired_length_seconds)
needed_length = desired_length_samples - len(input_file)
how_many = math.ceil(needed_length / len(input_file))
extended_file = input_file
for i in range(how_many):
extended_file = np.concatenate((extended_file,input_file))
extended_file = extended_file[0:desired_length_samples]
extended_file.reshape(desired_length_samples,1)
return extended_file
def voss(nrows, ncols=16, seed = 1):
#Generates pink noise using the Voss-McCartney algorithm. code taken from: https://www.dsprelated.com/showarticle/908.php
#Inputs:
#nrows: number of values to generate
#rcols: number of random sources to add
#Returns:
#NumPy array
array = np.empty((nrows, ncols))
array.fill(np.nan)
array[0, :] = np.random.random(ncols)
array[:, 0] = np.random.random(nrows)
# the total number of changes is nrows
n = nrows
cols = np.random.geometric(0.5, n)
cols[cols >= ncols] = 0
rows = np.random.randint(nrows, size=n)
array[rows, cols] = np.random.random(n)
df = pd.DataFrame(array)
df.fillna(method='ffill', axis=0, inplace=True)
total = df.sum(axis=1)
return total.values
def normalize(input_file, amp = 1):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = np.absolute(np.amax(input_file)), np.absolute(np.amin(input_file))
return amp * input_file / np.maximum(high, low)
def add_noise(input_file, db_SNR, index):
'''Add pink noise to an audio file at a SNR specified
by the user
Dependency: function voss'''
#Generate normalized pink noise
noise = normalize(voss(len(input_file)))
# Shift it so that it has mean 0
noise = noise - np.mean(noise)
#RMS value of the noise
rms_noise = np.sqrt(np.mean(noise**2))
#RMS value of the input file
rms_file = np.sqrt(np.mean(input_file**2))
# For numerical stability: Avoid taking log(0)
if rms_file == 0.0:
print('problem in file %s' %index)
rms_file = 10**(-8)
#RMS value required for the noise to provide the specified db_SNR
rms_noise_required = 10**(-db_SNR/10 + math.log10(rms_file))
#Coefficient to multiply the noise vector
coeff = rms_noise_required / rms_noise
#Scale the noise vector with the calculated coeff
noise_scaled = noise * coeff
#Add the scaled noise to the input file
input_file_noised = input_file + noise_scaled
return input_file_noised
def zero_pad(input_file, sampling_rate, desired_length_seconds, noise = False, noise_all = False, db_SNR = 0, index = 0):
#A helper function to extend shorter-than-desired-duration audio files into a desired-duration file.
#Inputs:
# input_file - 1D array of the audio wav file (resampled)
# sampling_rate - sampling rate of the file
# desired_length_seconds - desired length of the audio file
# noise - Boolean. Whether to apply noise or not
# noise_all - Boolean. Whether to apply noise to the whole extended file or only to original file
# db_SNR - If noise = True, Signal to Noise Ratio (in dB) to be applied to the file.
#Returns:
# zero_padded_file, a vector containing the zero-padded input_file
desired_length_samples = sampling_rate * desired_length_seconds
needed_length = int(desired_length_samples - len(input_file))
if noise == True:
if noise_all == True:
input_file, noise_scaled = add_noise(input_file, db_SNR, index)
times_add_noise = math.ceil(needed_length / len(noise_scaled))
noise_to_add = []
for i in range(times_add_noise):
noise_to_add.append(noise_scaled)
noise_to_add = np.squeeze(np.array(noise_to_add).reshape(1,-1))
zero_padded_file = np.concatenate((input_file,noise_to_add[0:needed_length]), axis = 0)
else:
input_file = add_noise(input_file, db_SNR, index)
zero_padded_file = np.concatenate((input_file,np.zeros(needed_length)), axis = 0)
else:
zero_padded_file = np.concatenate((input_file, np.zeros(needed_length)), axis = 0)
return zero_padded_file
def get_spectrogram(x, window_size, hop_length):
#A function to calculate the power spectrogram of an audio file. The values of the spectrogram are in dB
#relative to the maximum value of each spectrogram.
#Inputs:
# x - Array containing the single channel input audio file, size = (sampling_rate*duration, number_of_examples)
# window_size - the time window size of the STFT frames, in samples
# hop length - the step between consecutive windows, in samples
#Returns:
# features - An array of size = (frequency_bins_fft, time_frames, number_of_examples)
output = []
for l in range(x.shape[1]): # i.e. number of examples
out,_ = librosa.core.spectrum._spectrogram(x[:,l], n_fft = window_size, hop_length = hop_length, power = 2)
ref = np.amax(out)
spectrogram_in_db = librosa.power_to_db((out), ref=ref)
output.append(spectrogram_in_db)
features = np.transpose(np.array(output), axes =(1,2,0))
return features
def get_mel_spectrogram(x, sampling_rate, number_mel_bands, window_size, hop_length):
#A function that calculates the spectrogram (S) and then builds a Mel filter (mel_basis = filters.mel(sr, n_fft, **kwargs)) and
#returns np.dot(mel_basis, S)
#Inputs:
# x - matrix with all the audio clips from get_files_and_resample, dimensions (duration*sampling_rate_new, number_of_examples)
# sampling_rate - sr of the audio clips (it matters for the computation of the different features)
# number_mel_bands - the number of mel frequency bands to compute
# window_size - the time window size of the STFT frames, in samples
# hop length - the step between consecutive windows, in samples
#Returns:
# features - 3D matrix of dimensions (number_mel_bands, number_time_frames , number_of_examples) containing...
# ...the values of the mel-spectrogram
output = []
for l in range(x.shape[1]): # i.e. number of examples
out = librosa.feature.melspectrogram(x[:,l], sampling_rate , n_fft = window_size, hop_length = hop_length, n_mels = number_mel_bands, power = 2)
ref = np.amax(out)
spectrogram_in_db = librosa.power_to_db((out), ref=ref)
output.append(spectrogram_in_db)
features = np.transpose(np.array(output), axes =(1,2,0))
return features
def order_files(x, y, class_ids):
# A helper function to order the audio files in order to make easier their
# posterior mixing. The files are ordered by their class id. From 0 to 9
#Inputs:
# x - array with all the audio clips from get_files_and_resample, size = (duration*sampling_rate_new, number_of_examples)
# y - array with the label ids
ordered_list_files = []
ordered_list_labels = []
for i in range(len(class_ids)):
for ii in range(x.shape[1]):
if class_ids[i] == y[ii]:
ordered_list_files.append(x[:,ii])
ordered_list_labels.append(y[ii])
ordered_x = np.array(ordered_list_files)
ordered_y = np.array(ordered_list_labels)
return np.transpose(ordered_x), ordered_y
def mix_files(x, y, examples_per_class = 28):
# A function to mix the files in pairs. The mixing is performed in such a way that
# for ten files corresponding to ten different classes in one folder, 45 (9+8+7+...+1) combinations are created
#Inputs:
# x - array of size (audio_samples, examples)
# y - array containing the labels of each example, size = (examples,)
# class_ids - list containing
# examples_per_class - integer, representing the number of audio examples per class and folder that are to be combined
#Returns:
# array with the mixed audio files, array of size = (audio_samples, 28*45)
# y_list - list containing the mixed classes ids, in the way [class_id_1-class_id_2,...]
x_list = []
y_list = []
number_classes = 10
for index in range(number_classes): # The three for loops informally explained:
# For each class in the present folder, run through every example of that class
for i in range(examples_per_class): # and combine it with one example of each other class (thus the 3rd loop)
for ii in range(number_classes):
if (index * examples_per_class + i) >= (ii * examples_per_class + i):
continue
x_list.append(x[:, index * examples_per_class + i] + x[:, ii * examples_per_class + i])
y_list.append(str(y[index * examples_per_class + i]) + '-' + str(y[ii * examples_per_class + i]))
return np.transpose(np.array(x_list)), y_list
def one_hot_encode_mine(y_mixed):
# A function to one hot encode the labels of the combined files. That is, for every label, two "1" will appear in the
# array, such as for example: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0]
#Inputs:
#y_mixed - a list containing the combined ids of the audio examples in the form [3-2, 4-8, ...] for example
#Returns:
#y_mixed_one_hot - an array containing the one hot encoded class ids, size = (number_of_examples, 10)
y_mixed_one_hot = []
for i in range(len(y_mixed)):
lista = [0,0,0,0,0,0,0,0,0,0]
a = y_mixed[i].split('-')[0]
b = y_mixed[i].split('-')[1]
lista[int(a)] = 1
lista[int(b)] = 1
y_mixed_one_hot.append(np.array(lista))
y_mixed_one_hot = np.array(y_mixed_one_hot)
return y_mixed_one_hot
def split_into_clips (input_file, sampling_rate, clip_duration):
# A helper function to split files into equal length segments. Note: not used in the project
#Inputs:
# input_file - 1D array of the audio wav file (resampled)
# sampling_rate - sampling rate of the file
# clip_duration - length of the clips we want to obtain (in seconds)
#Returns:
# clips, an array of dimensions (samples per clip, number_of_clips)
samples_per_clip = int(sampling_rate*clip_duration)
number_of_clips = len(input_file)//samples_per_clip #if shorter than duration, will be discarded since number_of_clips = 0
clips = np.zeros((samples_per_clip,number_of_clips))
for l in range(number_of_clips):
clips[:,l] = input_file[samples_per_clip*l:samples_per_clip*(l+1)]
return clips
def split_ordered_files(x_ordered, y_ordered, test_percentage = 10, num_classes = 10):
x_train = []
y_train = []
x_val = []
y_val = []
x_test = []
y_test = []
test_examples = x_ordered.shape[1] * test_percentage / 100
train_examples = x_ordered.shape[1] - 2 * test_examples
examples_per_class = int(x_ordered.shape[1] / num_classes)
train_examples_per_class = int(train_examples / num_classes)
test_examples_per_class = int(test_examples / num_classes)
for i in range(num_classes):
x_train.append(x_ordered[:, (i * examples_per_class) : (i * examples_per_class) + train_examples_per_class])
y_train.append(y_ordered[(i * examples_per_class) : (i * examples_per_class) + train_examples_per_class])
x_val.append(x_ordered[:, (i * examples_per_class) + train_examples_per_class : (i * examples_per_class) + train_examples_per_class + test_examples_per_class])
y_val.append(y_ordered[(i * examples_per_class) + train_examples_per_class : (i * examples_per_class) + train_examples_per_class + test_examples_per_class])
x_test.append(x_ordered[:, (i * examples_per_class) + train_examples_per_class + test_examples_per_class : (i * examples_per_class) + train_examples_per_class + 2 * test_examples_per_class])
y_test.append(y_ordered[(i * examples_per_class) + train_examples_per_class + test_examples_per_class : (i * examples_per_class) + train_examples_per_class + 2 * test_examples_per_class])
return np.array(x_train), np.array(y_train).flatten(), np.array(x_val), np.array(y_val).flatten(), np.array(x_test), np.array(y_test).flatten()
| true |
f8d85fc2b1f5fcb0e83ab4a5243e6b05f0f2fe01 | Python | theanht1/flask_api_template | /tests/__init__.py | UTF-8 | 1,141 | 2.65625 | 3 | [] | no_license | import json
from base64 import b64encode
class AppTestClient:
def __init__(self, app, username, password):
self.client = app.test_client()
self.auth = "Basic " + b64encode(
(username + ":" + password).encode("utf-8")
).decode("utf-8")
def send(self, url, request, data=None, headers=None):
if not headers:
headers = {}
headers["Authorization"] = self.auth
headers["Content-Type"] = "application/json"
# Convert json data to string
data = json.dumps(data) if data else None
rv = request(url, data=data, headers=headers)
return rv, json.loads(rv.data.decode("utf-8"))
def get(self, url, headers=None):
return self.send(url, self.client.get, headers=headers)
def post(self, url, data=None, headers=None):
return self.send(url, self.client.post, data=data, headers=headers)
def put(self, url, data=None, headers=None):
return self.send(url, self.client.put, data=data, headers=headers)
def delete(self, url, headers=None):
return self.send(url, self.delete, headers=headers)
| true |
a782701546f596259bfeabec85e2e13ecefc1b09 | Python | ShengyuPei/IEEE-- | /IEEE论文PDF自动下载1.0.py | UTF-8 | 6,439 | 2.6875 | 3 | [] | no_license | import time
import requests
from selenium import webdriver
from bs4 import BeautifulSoup
import os
from os import path
def getHTMLText(url):
try:
r = requests.get(url,timeout=30)#
r.raise_for_status() #如果状态不是200,引发异常
r.encoding = 'utf - 8' #无论原来是什么编码,全部改用utf-8
return r.text
except:
return ""
#无界面浏览器设置
def brower_int():
# 使用headless无界面浏览器模式
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless') # 增加无界面选项
chrome_options.add_argument('--disable-gpu') # 如果不加这个选项,有时定位会出现问题
#chrome_options.add_argument(r'--user-data-dir=C:\Users\king\AppData\Local\Google\Chrome\User Data') # 设置成用户自己的数据目录
return chrome_options
#刷新进度条
def proces_bar(scale,i,t):
a = '*' * i
b = '.' * (scale - i)
c = (i / scale) * 100
print("\r{:^3.0f}%[{}->{}]{:.2f}s".format(c, a, b, -t), end='')
time.sleep(0.05)
#打开要搜索论文的txt文本
def get_paper_names():
try:
fo = open('names.txt','r') #打开names文件
names=[]
for line in fo:
line = line.replace('\n','')
names.append(line)
fo.close()
return names
except:
print("names文件打开失败,请检查原因")
#获取要搜索论文的链接
def get_paper_links(names):
urls=[]
for name in names:
url = 'https://ieeexplore.ieee.org/search/searchresult.jsp?newsearch=true&queryText='+name
urls.append(url)
return urls
#selemium获取单个论文编号
def get_paper_nums(browser,url,num=1):#num:爬取单个论文名字的编号个数
browser.get(url)
ele_nums = []
try:
for link in browser.find_elements_by_xpath("//*[@data-artnum]"):#//*表示无视位置进行遍历,@表示选取元素,@data-artnum即选取元素data-artnum
if link.get_attribute('className')=='icon-pdf':#这一步是去重
ele_num = link.get_attribute('data-artnum')
ele_nums.append(ele_num)
if len(ele_nums) == num:
break
return ele_nums
except:
print("failure")
#获取下载论文的名字
def get_paper_title(ele_nums):
titles=[]
print("获取论文名字开始".center(50 // 2, '-'))
t = time.process_time()
i=1
for ele_num in ele_nums:
url='https://ieeexplore.ieee.org/document/'+ele_num
html=getHTMLText(url)
soup=BeautifulSoup(html,'html.parser')
title=soup.title.string
titles.append(title)
t -= time.process_time()
proces_bar(len(ele_nums), i, t)
i=i+1
print("\n" + "获取论文名字结束".center(50 // 2, '-'))
return titles
#爬取下载链接
def get_download_links(ele_nums):
srcs=[]
print("获取论文下载链接开始".center(50 // 2, '-'))
t = time.time()
i=1
for ele_num in ele_nums:
url = 'https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber='+ele_num
html=getHTMLText(url)
soup=BeautifulSoup(html,'lxml')#lxml html.parser
while(soup.find_all('iframe') == []):
html = getHTMLText(url)
soup = BeautifulSoup(html, 'lxml') # lxml html.parser
src=soup.iframe.attrs['src']
srcs.append(src)
proces_bar(len(ele_nums) , i, t-time.time())
i = i + 1
print("\n" + "获取论文下载链接结束".center(50 // 2, '-'))
return srcs
#下载一个大文件
def DownOneFile(srcUrl, localFile):
# print('%s\n --->>>\n %s' % (srcUrl, localFile))
startTime = time.time()
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36','Accept-Encoding': 'gzip, deflate'}
with requests.get(srcUrl, timeout=80,stream=True,headers=headers) as r:
contentLength = int(r.headers['content-length'])
downSize = 0
with open(localFile, 'wb') as f:
for chunk in r.iter_content(8192):
if chunk:
f.write(chunk)
downSize += len(chunk)
line = '\r%d KB/s - %.2f MB, 共 %.2f MB'
line = line % (downSize / 1024 / (time.time() - startTime), downSize / 1024 / 1024, contentLength / 1024 / 1024)
print(line,end='')
if downSize >= contentLength:
break
timeCost = time.time() - startTime
line = ' 共耗时: %.2f s, 平均速度: %.2f KB/s'
line = line % (timeCost, downSize / 1024 / timeCost)
print(line)
#批量下载pdf文件
def DownFiles(srcUrls,titles):
print("执行开始".center(50 // 2, '-')+"\n" )
#创建下载论文目录
fpath = os.getcwd() # 获取当前工作目录路径
fpath = path.join(fpath, '论文')
if not os.path.isdir('论文'): # 判断有没有这个文件夹,没有则创建
os.makedirs(fpath)
print('总共要下载{}个文件'.format(len(srcUrls)))
for i in range(len(srcUrls)):
print('正在下载第{}个文件:{}'.format(i+1,titles[i]))
filename = path.join(fpath, titles[i] + '.pdf')
DownOneFile(srcUrls[i], filename)
print("\n" + "执行结束".center(50 // 2, '-'))
if __name__ == '__main__':
names=get_paper_names()
urls=get_paper_links(names)
browser = webdriver.Chrome(options=brower_int())
ele_nums=[]
none_nums=[]
print("获取论文编码开始".center(50 // 2, '-'))
t = time.time()
proces_bar(len(urls), 0, 0)
for i in range(len(urls)):#获取要下载论文的编号
ele_num=get_paper_nums(browser, urls[i], 3)
ele_nums=ele_nums+ele_num
if len(ele_num) == 0:
none_nums.append(i)
proces_bar(len(urls),i+1,t-time.time())
browser.close()
print("\n" + "获取下载论文编号结束".center(50 // 2, '-'))
print(ele_nums)
for num in none_nums:
print('{}获取不到论文编码'.format(names[num]))
srcs = get_download_links(ele_nums) # ['7363526']
titles = get_paper_title(ele_nums)
DownFiles(srcs, titles) | true |
086990932ecb7842716baa5473d1f47f5646b882 | Python | statick64/python_class | /first_class.py | UTF-8 | 117 | 3.71875 | 4 | [] | no_license | import random
word = ["brother","mother","sister","mister","father"]
for char in random.choice(word):
print(char) | true |
859ca6950b7f77d342a2782a9b40d361c0579369 | Python | emilywright/P.O.D | /PODui/PODViewExperiment.py | UTF-8 | 4,680 | 2.6875 | 3 | [] | no_license | import sys
import os
import csv
from PyQt5 import QtCore, QtGui, QtWidgets
class viewExperiment(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.init_ui(self)
def init_ui(self, *args):
self.setWindowTitle("P.O.D.")
# far down on page, far left on page, width, height
self.setGeometry(325, 100, 650, 400)
self.setStyleSheet("background-color:#FFFFFF");
#98FB98
# Create new experiment labels
self.createNewLabel = QtWidgets.QLabel(self)
self.createNewLabel.setText('View Experiments')
self.createNewLabel.move(300, 15)
self.createFont = QtGui.QFont("Times", 24, QtGui.QFont.Bold)
self.createNewLabel.setFont(self.createFont)
self.createNewLabel.setStyleSheet("background-color:#FFFFFF")
# labels
# Background label
self.menuLabel = QtWidgets.QLabel(self)
self.menuLabel.setGeometry(QtCore.QRect(0, 0, 175, 70))
self.menuLabel.setStyleSheet("background-color:#98FB98")
# Image logo
self.podLogo = QtWidgets.QLabel(self)
self.originalpixmap = QtGui.QPixmap('podLogo1.png')
self.adjustedPixmap = self.originalpixmap.scaled(150, 150, QtCore.Qt.KeepAspectRatio, QtCore.Qt.FastTransformation)
self.podLogo.setPixmap(self.adjustedPixmap)
self.podLogo.setStyleSheet("background-color:#98FB98")
self.podLogo.move(15, 10)
# Button font
self.buttonFont = QtGui.QFont("Helvetica", 12)
self.exitButton = QtWidgets.QPushButton(self)
self.exitButton.setText('Exit')
self.exitButton.move(70, 300)
self.exitButton.resize(100, 40);
self.exitButton.setStyleSheet("background-color:#FFFFFF")
self.exitButton.setFont(self.buttonFont)
self.exitButton.clicked.connect(self.close)
self.currDir = 0
# print(os.getcwd() + "/currentExperiments")
self.directory = os.fsencode(os.getcwd() + "/currentExperiments")
# print(self.directory)
self.dirlist = os.listdir(self.directory) # dir is your directory path
self.number_files = len(self.dirlist)
# print(self.number_files)
self.tableWidget = QtWidgets.QTableWidget(self)
# set row count
self.tableWidget.setRowCount(self.number_files)
# set column count
self.tableWidget.setColumnCount(13)
self.tableWidget.setHorizontalHeaderLabels(('Experiment Name', 'Start Date', 'Start Time', 'End Date', 'End Time', 'Water Delay', 'Water Duration', 'Light Delay', 'Light Error', 'Temp Delay', 'Temp Error', 'Photo Delay', 'CSV File'))
self.tableWidget.move(10, 75)
self.tableWidget.resize(625, 275)
self.tableWidget.setColumnWidth(0, 150)
# # simple version for working with CWD
# print(len([self.name for self.name in os.listdir('.') if os.path.isfile(self.name)]))
for self.file in os.listdir(self.directory):
self.filename = os.fsdecode(self.file)
# print(self.filename)
with open("currentExperiments/" + self.filename, "r") as self.fileInput:
self.reader = csv.reader(self.fileInput)
self.fileContents = list(self.reader)
# print(str(self.fileContents[0]).strip('[]'))
self.tableWidget.setItem(self.currDir, 5, QtWidgets.QTableWidgetItem(str(self.fileContents[3]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 6, QtWidgets.QTableWidgetItem(str(self.fileContents[4]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 7, QtWidgets.QTableWidgetItem(str(self.fileContents[5]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 8, QtWidgets.QTableWidgetItem(str(self.fileContents[6]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 9, QtWidgets.QTableWidgetItem(str(self.fileContents[7]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 10, QtWidgets.QTableWidgetItem(str(self.fileContents[8]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 11, QtWidgets.QTableWidgetItem(str(self.fileContents[9]).strip("[]").strip("''")))
# self.tableWidget.setItem(self.currDir, 12, QtWidgets.QTableWidgetItem(str(self.fileContents[11]).strip("[]").strip("''")))
self.tableWidget.setItem(self.currDir, 12, QtWidgets.QTableWidgetItem(str(self.fileContents[2]).strip("[]").strip("''")))
self.currDir = self.currDir + 1
self.show()
| true |
815d09668b21b28fc55e0335abe4a3d807281036 | Python | andrei47w/Fundamentals-of-Programming | /Lab1/pb12.py | UTF-8 | 1,236 | 4.34375 | 4 | [] | no_license | """
Determine the age of a person, in number of days.
Take into account leap years, as well as the date of birth and current date
(year, month, day).
Do not use Python's inbuilt date/time functions.
"""
def days_of_month(m):
if m==4 or m==6 or m==9 or m==10:
return 30
if m==2:
return 28
return 31
def is_leap(y):
"""
checks if it is a leap year
"""
if y%400==0:
return True
return False
def nr_days(d, m, y):
"""
calculates the number of days passed
"""
days=int(d)
if m>2 and is_leap(y):
days+=1
for month in range(1, m):
days+=days_of_month(m)
return days
def age_in_days(d1, m1, y1, d2, m2, y2):
"""
calculates the age of a person in number of days
"""
age=int(0)
for year in range(y1, y2):
if is_leap(year):
age+=1
age+=365
return age-nr_days(d1, m1, y1)+nr_days(d2, m2, y2)
if __name__ == '__main__':
print("give birth date: ", end="")
d1,m1,y1=int(input()), int(input()), int(input())
print("give current date: ", end="")
d2,m2,y2=int(input()), int(input()), int(input())
print("age in number of days is:", age_in_days(d1, m1, y1, d2, m2, y2), end="")
| true |
8ff0e650a5fe3fddf86037f733819e4ec57df6be | Python | prathmachowksey/introductory-ml | /face-detection/face-detection.py | UTF-8 | 968 | 3.078125 | 3 | [] | no_license |
import cv2 as cv
# Read image from your local file system
original_image = cv.imread('./faces2.jpeg')
# Convert color image to grayscale for Viola-Jones
grayscale_image = cv.cvtColor(original_image, cv.COLOR_BGR2GRAY)
#Haar Cascade for face detection
face_cascade = cv.CascadeClassifier('./opencvfiles/haarcascade_frontalface_alt.xml')
detected_faces = face_cascade.detectMultiScale(grayscale_image,scaleFactor=1.3,minNeighbors=5)
for (column, row, width, height) in detected_faces: #x,y,w,h
cv.rectangle(
original_image,
(column, row),#x,y of top left corner
(column + width, row + height), #x,y of bottom right corner
(0, 255, 0), #color in rgb
2 #line thickness
)
cv.imshow('Image', original_image)
cv.waitKey(0) #so that image window does not close until a key is pressed
cv.destroyAllWindows()
#This works only for frontal faces. Notice how all the faces in the file 'faces.jpeg' are identified, but not those in the file 'faces2.jpeg'
| true |
48cb400ecfe58f9993a93ed2dfafd3c82b87011a | Python | AchintyaX/Lung_segmentation_detection | /ct_viz.py | UTF-8 | 1,957 | 2.734375 | 3 | [] | no_license | import SimpletITK as sitk
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
from skimage import measure, feature
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# Ensure that we have both .mhd and .raw files for the ct_scan in the same directory
class ct_visualization:
def __init__(self, path):
self.path = path
self.data = sitk.ReadImage(self.path)
self.spacing = self.data.GetSpacing()
self.scan = sitk.GetArrayFromImage(self.data)
# plotting all the 2D slices for a ct-scan
def plot_ct_scan(self, num_column=4, jump=1):
# counting the number of slices for the scan
num_slices = len(self.scan)
num_row = (num_slices//jump + num_column - 1) // num_column
f, plots = plt.subplots(num_row, num_column, figsize=(num_column*5, num_row*5))
for i in range(0, num_row*num_column):
plot = plots[i % num_column] if num_row == 1 else plots[i // num_column, i % num_column]
plot.axis('off')
if i < num_slices//jump:
plot.imshow(self.scan[i*jump], cmap=plt.cm.bone)
def plot_3d(self, threshold=-400):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = self.scan.transpose(2,1,0)
# p = p[:,:,::-1]
verts,faces = measure.marching_cubes_classic(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=0.1)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
| true |
ce1fbf6d0843a98bb0f6fd84c3d42a6009fc6450 | Python | ricardomdesa/bufunfa-back-end | /src/utils/authentication_utils.py | UTF-8 | 774 | 3.0625 | 3 | [] | no_license | import binascii
import hashlib
import os
def verify_password(stored_password, password):
ITERATIONS = 100000
SALT_SIZE = 64
salt = stored_password[:SALT_SIZE]
stored_password = stored_password[SALT_SIZE:]
password_hash = hashlib.pbkdf2_hmac("sha512", password.encode("utf-8"), salt.encode("ascii"), ITERATIONS)
password_hash = binascii.hexlify(password_hash).decode("ascii")
return password_hash == stored_password
def hash_password(password):
ITERATIONS = 100000
salt = hashlib.sha256(os.urandom(60)).hexdigest().encode("ascii")
password_hash = hashlib.pbkdf2_hmac("sha512", password.encode("utf-8"), salt, ITERATIONS)
password_hash = binascii.hexlify(password_hash)
return (salt + password_hash).decode("ascii")
| true |
9739fc529f766bf842ac1dd705beb4688cc7a3f3 | Python | maxcool73/PyCB | /ch01/dequeinthreads.py | UTF-8 | 499 | 3.0625 | 3 | [] | no_license | from _collections import deque
from time import sleep
from threading import Thread
que = deque()
def fun1(que: deque):
for p in range(10, 110, 10):
sleep(1)
que.append(str(p) + '%')
def fun2(que: deque):
for p in range(10, 110, 10):
sleep(1)
if que:
print(que.popleft())
if __name__ == '__main__':
p1 = Thread(target=fun1, args=(que,))
p2 = Thread(target=fun2, args=(que,))
p1.start()
p2.start()
p1.join()
p2.join()
| true |
75a5d8004fca281d413bff56b8da0319e846e02c | Python | michaeljpitcher/Lung-Network-Model | /Previous/V3_Metapop_edge_weighted/Tests/v3_Tests.py | UTF-8 | 7,313 | 2.5625 | 3 | [] | no_license | import math
import unittest
import numpy as np
from Previous import V3_Metapop_edge_weighted as MWN
class v3BasicTestCase(unittest.TestCase):
def setUp(self):
self.edges = [(0,1), (1,2), (1,3), (1,4), (4,5), (5,0)]
self.weights = dict()
count = 10
for edge in self.edges:
self.weights[edge] = count
count -= 1
self.pos = [(1,1), (2,2), (2,3), (3,3), (3,2), (2,1)]
self.infections = dict()
self.infections[1] = 30
self.infections[5] = 10
self.ptransmit = 0.5
self.pgrowth = - 0.1
self.timelimit = 1
self.network = MWN.MetapopulationWeightedNetwork(self.edges, self.weights, self.pos, self.infections,
self.ptransmit, self.pgrowth, self.timelimit)
def test_initialise(self):
self.assertItemsEqual(self.network.nodes(), range(0,6))
for e1, e2 in self.edges:
self.assertTrue((e1,e2) in self.network.edges() or (e2,e1) in self.network.edges())
self.assertEqual(self.network.edge[0][1]['weight'], 10)
self.assertEqual(self.network.edge[1][2]['weight'], 9)
self.assertEqual(self.network.edge[1][3]['weight'], 8)
self.assertEqual(self.network.edge[1][4]['weight'], 7)
self.assertEqual(self.network.edge[4][5]['weight'], 6)
self.assertEqual(self.network.edge[5][0]['weight'], 5)
self.assertSequenceEqual(self.network.positioning, self.pos)
self.assertItemsEqual(self.network.infected_nodes, self.infections.keys())
for n in range(0,6):
if n in self.infections:
self.assertEqual(self.network.node[n]['count'], self.infections[n])
else:
self.assertEqual(self.network.node[n]['count'], 0.0)
self.assertEqual(self.network.rates['p_transmit'], self.ptransmit)
self.assertEqual(self.network.rates['p_growth'], self.pgrowth)
self.assertEqual(self.network.timestep, 0.0)
self.assertEqual(self.network.time_limit, self.timelimit)
self.assertItemsEqual(self.network.data.keys(), [0.0])
self.assertItemsEqual(self.network.data[0.0].keys(), range(0,6))
for n in range(0, 6):
if n in self.infections:
self.assertEqual(self.network.data[0.0][n], self.infections[n])
else:
self.assertEqual(self.network.data[0.0][n], 0.0)
self.assertEqual(self.network.max_count, max(self.infections.values()))
self.assertEqual(self.network.total_possible_transmission, 0.0)
self.assertEqual(self.network.total_bacteria, 0.0)
def test_record_data(self):
self.network.timestep = 1.0
self.network.node[3]['count'] = 99
self.network.record_data()
self.assertEqual(self.network.data.keys(), [0.0, 1.0])
for n in range(0,6):
# at t = 0.0
if n in self.infections:
self.assertEqual(self.network.data[0.0][n], self.infections[n])
else:
self.assertEqual(self.network.data[0.0][n], 0.0)
# at t = 1.0
if n in self.infections:
self.assertEqual(self.network.data[1.0][n], self.infections[n])
elif n == 3:
self.assertEqual(self.network.data[1.0][n], 99)
else:
self.assertEqual(self.network.data[1.0][n], 0.0)
def test_update_node(self):
# Positive - already infected
self.network.update_node(1, 1)
self.assertEqual(self.network.node[1]['count'], 31)
self.assertItemsEqual(self.network.infected_nodes, [1, 5])
self.assertEqual(self.network.max_count, 31)
# Positive - newly infected
self.network.update_node(0, 1)
self.assertEqual(self.network.node[0]['count'], 1)
self.assertItemsEqual(self.network.infected_nodes, [0, 1, 5])
self.assertEqual(self.network.max_count, 31)
# Negative - stays infected
self.network.update_node(5,-1)
self.assertEqual(self.network.node[5]['count'], 9)
self.assertItemsEqual(self.network.infected_nodes, [0, 1, 5])
self.assertEqual(self.network.max_count, 31)
# Negative - become susceptible
self.network.update_node(1, -31)
self.assertEqual(self.network.node[1]['count'], 0)
self.assertItemsEqual(self.network.infected_nodes, [0, 5])
self.assertEqual(self.network.max_count, 9)
def test_update_node_fails(self):
with self.assertRaises(AssertionError) as context:
self.network.update_node(1, -99)
self.assertTrue('update_node: Count cannot drop below zero' in context.exception)
def test_update_totals(self):
self.network.update_totals()
# Sum of (Count at infected node * sum of weights of edges from infected node)
self.assertEqual(self.network.total_possible_transmission, 30 * (4) + 10 * (2))
# Sum of count at all infected node
self.assertEqual(self.network.total_bacteria, 30 + 10)
def test_calculate_dt(self):
np.random.seed(100) # x = 0.543404941791
dt = self.network.calculate_dt(100.0)
self.assertAlmostEqual(dt, (1.0 / 100.0) * math.log(1.0 / 0.543404941791))
def test_choose_transition(self):
# Picks a transmit
self.network.update_totals()
transitions = self.network.transitions()
total = transitions[0][0] + transitions[1][0]
np.random.seed(100) # x = 370.05876536
function = self.network.choose_transition(total, transitions)
self.assertEqual(function,0)
# Picks a growth
# Alter network - easier to get a growth
growth_network = MWN.MetapopulationWeightedNetwork(self.edges, self.weights, self.pos, self.infections, 0.001,
0.5, self.timelimit)
growth_network.update_totals()
transitions = growth_network.transitions()
total = transitions[0][0] + transitions[1][0]
np.random.seed(100) # x = 17.0357449251
function = growth_network.choose_transition(total, transitions)
self.assertEqual(function, 1)
def test_transmit(self):
self.network.update_totals()
np.random.seed(100) # r = 97.8128895224, r2=9.46455909319 - should pick edge 1 -> 0
self.network.transmit()
self.assertEqual(self.network.node[1]['count'], 29)
self.assertEqual(self.network.node[0]['count'], 1)
self.network.update_totals()
np.random.seed(2135) # r = 154.801986527, r2 = 8.31990523483 - should pick edge 5 -> 4
self.network.transmit()
self.assertEqual(self.network.node[5]['count'], 9)
self.assertEqual(self.network.node[4]['count'], 1)
def test_growth(self):
self.network.update_totals()
np.random.seed(100) # r = 21.7361976716 - picks node 1
self.network.growth()
self.assertEqual(self.network.node[1]['count'], 29)
self.network.update_totals()
np.random.seed(65765) # r = 37.9819896608 - picks node 5
self.network.growth()
self.assertEqual(self.network.node[5]['count'], 9)
if __name__ == '__main__':
unittest.main()
| true |
face6b982d3ce98252a0b934765bc081a70526af | Python | pdeitel/PythonFundamentalsLiveLessons | /examples/ch16/HadoopMapReduce/length_reducer.py | UTF-8 | 1,718 | 3.015625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/env python3
# length_reducer.py
"""Counts the number of words with each length."""
import sys
from itertools import groupby
from operator import itemgetter
def tokenize_input():
"""Split each line of standard input into a key and a value."""
for line in sys.stdin:
yield line.strip().split('\t')
# produce key-value pairs of word lengths and counts separated by tabs
for word_length, group in groupby(tokenize_input(), itemgetter(0)):
try:
total = sum(int(count) for word_length, count in group)
print(word_length + '\t' + str(total))
except ValueError:
pass # ignore word if its count was not an integer
##########################################################################
# (C) Copyright 2019 by Deitel & Associates, Inc. and #
# Pearson Education, Inc. All Rights Reserved. #
# #
# DISCLAIMER: The authors and publisher of this book have used their #
# best efforts in preparing the book. These efforts include the #
# development, research, and testing of the theories and programs #
# to determine their effectiveness. The authors and publisher make #
# no warranty of any kind, expressed or implied, with regard to these #
# programs or to the documentation contained in these books. The authors #
# and publisher shall not be liable in any event for incidental or #
# consequential damages in connection with, or arising out of, the #
# furnishing, performance, or use of these programs. #
##########################################################################
| true |
3bf117cc7d8f594599d4b470681a8e8c5eca849d | Python | huberf/clara-pip | /clara/utils/convo_reader.py | UTF-8 | 2,169 | 2.71875 | 3 | [
"MIT"
] | permissive | import json
def convert_to_json(raw):
# Setup variable to return at end
convo = []
formatted = raw.split('Q: ')
for a in formatted:
if len(a) > 0:
actual_data = a.split('\nR: ')
# Separate out queries and generate multiple if ; exists
queries = actual_data[0].split('; ')
# Strip all newlines
actual_data[1] = actual_data[1].replace('\n', '')
replies = []
for i in actual_data[1].split('; '):
data = i.split('|')
refined = data[0].split('\\')
modifiers = [] # These are things that modify the systems emotions and make changes that would kick off events
response_states = [] # These merely gives context to the response and enables one to have conversation flow
try:
mods = refined[1].split('.')
for z in mods:
parts = z.split('=')
if len(parts) == 2:
modifiers += [ {'name': parts[0], 'val': int(parts[1])} ]
elif len(parts) == 1:
starting = False
name = parts[0]
if parts[0][0] == '^':
starting = True
name = parts[0][1:]
response_states += [ { 'name': name, 'starting': starting} ]
except:
doNothing = True
to_add = {'text': refined[0], 'weight': 1}
to_add['modifiers'] = modifiers
to_add['context'] = response_states
try:
converted = json.loads(data[1])
try:
to_add['image'] = converted['image']
except:
do_nothing = True
except:
do_nothing = True
replies += [to_add]
convo += [{
'starters': queries,
'replies': replies
}]
return convo
| true |
b7e287d58da020412d4bcaab8c334ef89c3ac747 | Python | cferrara212/worksheet1python | /worksheet.py | UTF-8 | 2,544 | 4.0625 | 4 | [] | no_license | import random
# problem 1
day_of_week = 'monday'
print(day_of_week)
day_of_week = 'friday'
print(f"I can't wait for {day_of_week}")
#problem 2
animal_input = input('What is your favorite animal?')
color_input = input('What is your favorite color?')
print(f"I've never seen a {color_input} {animal_input}")
#conditionals
#favorite breakfast is eggs, lunch is ruben, and dinner is chicken pasta
time_of_day = 1100
favorite_meal_for_time = ''
meal_of_day=''
if time_of_day <1200:
meal_of_day = 'Eggs for breakfast'
elif time_of_day > 1200 and time_of_day <1700:
meal_of_day = 'Ruben for lunch!'
else:
meal_of_day = 'chicken pasta is for dinner!'
print(meal_of_day)
#random number
random_number= random.randint(0,10)
if random_number < 3:
print('Beatles')
elif random_number > 2 and random_number <6:
print('Stones')
elif random_number > 5 and random_number < 9:
print('Floyd')
else:
print('Hendrix')
print(random_number)
# for loop iterating a certain number of times
for thing in range(7):
print('python is cool!')
number=0
for number in range(11):
print(number)
number += 1
for word in range(5):
print('hello'+'\n'+ 'goodbye')
# while loops make sure to end loop somehow
height = 40
while height < 48:
print('you cant ride this ride')
height += 1
#magic number!
magic_number = 50
guess = input('guess the magic number')
guess_int = int(guess)
while guess_int:
if guess_int == magic_number:
print(f'{magic_number} is the magic number!')
break
if guess_int < magic_number:
guess = input('too low, try again')
guess_int = int(guess)
if guess_int > magic_number:
guess = input('too high, try again')
guess_int = int(guess)
if guess_int in range(magic_number-10, magic_number+10):
print('getting warmer')
# favorite movie void function
def print_movie_name():
my_favorite_movie= 'kill bill'
print(my_favorite_movie)
print_movie_name()
#favorite band
def user_favorite_band():
band_input = input('what is your favorite band')
return band_input
users_band = user_favorite_band()
print(users_band)
# concert_display(users_band)
def concert_display(musical_act):
my_street = input('what street do you live on?')
print(f'it would be great if {musical_act} played on {my_street}.')
#desktop items
desktop_items = ['tablet', 'phone', 'computer']
print(desktop_items[1])
desktop_items.append('infinity gauntlet')
for items in desktop_items:
print(items) | true |
b3eea6c8710ab898b06501191baff37364077963 | Python | darkfusion90/PyPoem | /PyPoem.py | UTF-8 | 2,368 | 3 | 3 | [
"MIT"
] | permissive | import bs4
from bs4 import BeautifulSoup
import requests
import time
class PyPoem:
def __init__(self):
self.poem = ""
self.poet = ""
self.title = ""
name = input('Poem name?\n')
print('Connecting to server...')
self.getPoem(name)
beg = '*' * 50 + '\n'
end = '\n'+'*'*50
with open('hello.poem', 'w') as file:
#file.write(self.title)
file.write(beg+self.poem+end)
print('Successful. Extracted to file hello.poem')
def getPoem(self, poem_query):
poem_url = self.getPoemURL(poem_query)
poem_html_page = self.fetchPage(poem_url)
poem_soup = BeautifulSoup(poem_html_page.content, 'html.parser')
self.title = self.getPoemTitle(poem_soup)
self.poet = self.getPoetName(poem_soup)
self.poem = self.getPoemContents(poem_soup)
return self.poem
def getPoemTitle(self, poem_soup):
pass
def getPoemContents(self, poem_page):
poem_div = poem_page.find('div', {'class': ['o-poem', 'isActive']})
poem_contents = self.formatPoem(poem_div)
return poem_contents
def formatPoem(self, poem_div):
poem_contents = ""
for child in poem_div.children:
if child.name == 'div':
for string in child.stripped_strings:
poem_contents += string
poem_contents += "\n"
print(poem_contents)
return poem_contents
def getPoetName(self, page_content):
poet_name_div = page_content.find(
'div', {'class': ['c-feature-sub', 'c-feature-sub_vast']})
poet_name = poet_name_div.div.span.text.replace('By', '').strip()
return poet_name
def getPoemURL(self, poem_title):
url = self.fetchPage(query=poem_title)
soup = BeautifulSoup(url.content, 'html.parser')
poem_url = soup.find('h2', {'class': ['c-hdgSans', 'c-hdgSans_2']})
return poem_url.a['href']
def fetchPage(self, url="https://www.poetryfoundation.org/", query=None):
if (query == None):
# print("Fetching Page ", url)
return requests.get(url)
query = query.replace(' ', '+')
url = url+"search?query=%s" % query
# print("Fetching Page ", url)
return requests.get(url)
load = PyPoem()
| true |
d966e5391c3a53e60bda2fb5aa2beff1bb1e777a | Python | KiraUnderwood/MobileTestAutomation | /Tests/conftest.py | UTF-8 | 2,199 | 2.65625 | 3 | [] | no_license | import pytest
import configparser
from appium import webdriver
from ConfigsForTests.Capability import FillCapabilities
'''
parsing the BaseOptions.ini file to get capabilities
'''
config = configparser.RawConfigParser()
config.optionxform = lambda option: option
config.read('../ConfigsForTests/BaseOptions.ini')
appium_server = config['AppiumServer']['server']
'''
pytest fixtures representing test setup and teardown
test creator should only use driver_for_native or driver_for_web fixtures
'''
@pytest.fixture(scope="function")
def params_for_native():
"""
fixture checking the passed dict and determining if it is for Native apps
:return: checked dict appropriate for native Apps
"""
options = FillCapabilities(**config['NativeApp'])
if not options.is_native():
raise Exception('The capabilities are not for native app!')
return options
@pytest.fixture(scope="function")
def driver_for_native(params_for_native):
"""
Pytest fixture to initialize the driver for testing native app
:param params_for_native: key=value dict with capabilities already checked by the named fixture
:return: driver
"""
if isinstance(appium_server, str) and isinstance(params_for_native, dict):
driver = webdriver.Remote(appium_server, params_for_native)
yield driver
driver.quit()
@pytest.fixture(scope="function")
def params_for_web():
"""
fixture checking the passed dict and determining if it is for Web apps
:return: checked dict appropriate for web Apps
"""
options = FillCapabilities(**config['WebApp'])
if not options.is_web():
raise Exception('The capabilities are not for web app!')
return options
@pytest.fixture(scope="function")
def driver_for_web(params_for_web):
"""
Pytest fixture to initialize the driver for testing web apps
:param params_for_web: key=value dict with capabilities already checked by the named fixture
:return: driver
"""
if isinstance(appium_server, str) and isinstance(params_for_web, dict):
driver = webdriver.Remote(appium_server, params_for_native)
yield driver
driver.quit()
| true |
52d950b188f652cae4abf7825daad144f426b465 | Python | france5289/language-model-playground | /lmp/dset/_ch_poem.py | UTF-8 | 3,897 | 2.984375 | 3 | [
"Beerware"
] | permissive | r"""Chinese poetry dataset."""
import os
from io import TextIOWrapper
from typing import ClassVar, List, Optional
from zipfile import ZipFile
import pandas as pd
import lmp.dset.util
import lmp.path
from lmp.dset._base import BaseDset
class ChPoemDset(BaseDset):
r"""Chinese poem dataset.
Collection of poems dating way back to ancient Chinese dynasty.
See https://github.com/Werneror/Poetry for more details on dataset.
Some poems are preprocessed as follow:
- Combine scattered files into one (including ``宋``, ``明``, ``清``)
- Remove empty content (with value ``無正文。``)
Parameters
==========
ver: str, optional
Version of the dataset.
If ``ver is None``, then use default version ``ChPoemDset.df_ver`` of
the dataset.
Version must be available.
Available versions are named after their appearing time, including
``元``, ``元末明初``, ``先秦``, ``南北朝``, ``唐``, ``唐末宋初``, ``宋``, ``宋末元初``,
``宋末金初``, ``明``, ``明末清初``, ``民國末當代初``, ``清``, ``清末民國初``, ``清末近現代初``,
``漢``, ``當代``, ``秦``, ``近現代``, ``近現代末當代初``, ``遼``, ``金``, ``金末元初``,
``隋``, ``隋末唐初``, ``魏晉``, ``魏晉末南北朝初``.
Defaults to ``None``.
Attributes
==========
df_ver: ClassVar[str]
Default version is ``唐``.
dset_name: ClassVar[str]
Dataset name is ``chinese-poem``.
Used for command line argument parsing.
file_name: ClassVar[str]
Download dataset file name.
Used only for downloading dataset files.
lang: ClassVar[str]
Use Chinese as primary language.
spls: List[str]
All samples in the dataset.
ver: str
Version of the dataset.
vers: ClassVar[List[str]]
All available versions of the dataset.
Used to check whether specified version ``ver`` is available.
url: ClassVar[str]
URL for downloading dataset files.
Used only for downloading dataset files.
Raises
======
TypeError
When ``ver`` is not and instance of ``str``.
ValueError
When dataset version ``ver`` is not available.
See Also
========
lmp.dset.BaseDset
Examples
========
>>> from lmp.dset import ChPoemDset
>>> dset = ChPoemDset(ver='唐')
>>> dset[0][:10]
風淅淅。夜雨連雲黑。
"""
df_ver: ClassVar[str] = '唐'
dset_name: ClassVar[str] = 'chinese-poem'
file_name: ClassVar[str] = '{}.csv.zip'
lang: ClassVar[str] = 'zh'
vers: ClassVar[List[str]] = [
'元', '元末明初', '先秦', '南北朝', '唐', '唐末宋初', '宋', '宋末元初', '宋末金初', '明',
'明末清初', '民國末當代初', '清', '清末民國初', '清末近現代初', '漢', '當代', '秦', '近現代',
'近現代末當代初', '遼', '金', '金末元初', '隋', '隋末唐初', '魏晉', '魏晉末南北朝初',
]
url: ClassVar[str] = ''.join([
'https://github.com/ProFatXuanAll',
'/demo-dataset/raw/main/ch-poem',
])
def __init__(self, *, ver: Optional[str] = None):
super().__init__(ver=ver)
file_path = os.path.join(
lmp.path.DATA_PATH,
self.__class__.file_name.format(self.ver),
)
# Read text file inside chinese poem zip file.
with ZipFile(os.path.join(file_path), 'r') as input_zipfile:
with TextIOWrapper(
input_zipfile.open(f'{self.ver}.csv', 'r'),
encoding='utf-8',
) as input_text_file:
df = pd.read_csv(input_text_file)
# Normalized dataset.
spls = df['內容'].apply(str).apply(lmp.dset.util.norm).tolist()
self.spls = spls
| true |
f85fd47bd80f7b6eda0860889b7169c1bd232def | Python | dtran22/Discard | /Discard_Main/classes/Cards/EnumuratedTerms.py | UTF-8 | 1,648 | 2.796875 | 3 | [] | no_license | import enum
#I realized that it was getting way too easy to misspell some terms.
#so, I began to create enums to eventually replace some of the strings.
class DCategory(enum.Enum):
"""Short for Damage Category. All the Types of damage there are."""
Kinetic = 0
Element = 1
Polarity = 2
Anything = 3
def __str__(self):
return '{}'.format(self.name)
def category(self):
if self.value == 3:
return DCategory.Anything
elif self.value==0:
return DCategory.Kinetic
elif self.value==1:
return DCategory.Element
elif self.value==2:
return DCategory.Polarity
class SpellState(enum.Enum):
"All states a spell card can be in."
Dormant = 0
Set = 1
Active = 2
def __str__(self):
return '{}'.format(self.name)
class SpellResult(enum.Enum):
"All return statuses for a Activated Spell Card."
Finished = 0
Cancelled = 1
Interrupt = 2
def __str__(self):
return '{}'.format(self.name)
class DTags(enum.Enum):
"""Short for Damage Tags. All the Types of damage there are."""
Absolute = 0
Blunt = 1
Slash = 2
Gun = 3
Fire = 4
Ice = 5
Wind = 6
Elec = 7
Mag = 8
PSI = 9
Light = 10
Dark = 11
def __str__(self):
return '{}'.format(self.name)
def category(self):
if self.value == 0:
return DCategory.Anything
elif self.value<=3:
return DCategory.Kinetic
elif self.value<=9:
return DCategory.Element
elif self.value <= 11:
return DCategory.Polarity
| true |
e658a32b10d987d7ca4e2dee8d7edfe8e549f97d | Python | Karry/pi3d_demos | /GtkCube.py | UTF-8 | 3,756 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
import demo
import pi3d
import threading
import time
import gtk
import gtk.gdk as gdk
''' For gtk to work with python 3 you need to have installed gi
sudo apt-get install python3-gi
then instead of 'import gtk'
import gi
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import GdkPixbuf
However I couldn't get the new_from_data() function to work to work
Also, when running on ubuntu laptop the pixel copying often has patterns of
'holes' and there are occasional serious error (seg faults) when closing
the application. Any further help with this would be appreciated!
'''
W, H = 640, 480
''' This system works by copying the pi3d screen image and 'pasting' it
onto a gtk image.
NB to get the pi3d layer 'behind' the X windows you need to set its
value to < -127
'''
DISPLAY = pi3d.Display.create(w=W, h=H, frames_per_second=20, layer=-128)
shader = pi3d.Shader('uv_light')
tex = pi3d.Texture('textures/PATRN.PNG')
cube = pi3d.Cuboid(z=2)
cube.set_draw_details(shader, [tex])
cube.show_flag = True # additional attribute stuck onto Cuboid instance
cube.last_x = None
cube.last_y = None
def rotX(widget, cube):
cube.rotateIncX(5.0)
cube.show_flag = True
def rotY(widget, cube):
cube.rotateIncY(15.0)
cube.show_flag = True
def rotZ(widget, cube):
cube.rotateIncZ(20.0)
cube.show_flag = True
def close_app(widget, data):
DISPLAY.destroy()
win.destroy()
gtk.main_quit()
quit(0)
def mot(widget, ev, cube):
if ev.is_hint:
x, y, st = ev.window.get_pointer()
else :
x, y, st = ev.x, ev.y, ev.state
if st & gtk.gdk.BUTTON1_MASK :
if cube.last_x is None:
cube.last_x = x
cube.last_y = y
cube.rotateIncY(cube.last_x - x)
cube.rotateIncX(cube.last_y - y)
cube.last_x = x
cube.last_y = y
cube.show_flag = True
else:
cube.last_x = None
cube.last_y = None
win = gtk.Window()
win.set_title('GTK minimal demo')
win.connect('delete_event', close_app)
box1 = gtk.HBox(False, 0)
win.add(box1)
box2 = gtk.VBox(False, 0)
box1.pack_start(box2, True, True, 1)
image_box = gtk.EventBox()
box1.add(image_box)
img_flag = 0
img_gtk = gtk.Image() # Create gtk.Image() only once
image_box.add(img_gtk) # Add Image in the box, only once
button1 = gtk.Button('Rotate X 5deg')
button1.connect('clicked', rotX, cube)
box2.pack_start(button1, True, True, 0)
button2 = gtk.Button('Rotate Y 15deg')
button2.connect('clicked', rotY, cube)
box2.pack_start(button2, True, True, 0)
button3 = gtk.Button('Rotate Z 20deg')
button3.connect('clicked', rotZ, cube)
box2.pack_start(button3, True, True, 0)
message = gtk.Label('Left click and\ndrag on image or\nclick these buttons')
message.set_justify(gtk.JUSTIFY_CENTER)
box2.pack_start(message, True, True, 5)
image_box.connect("motion_notify_event", mot, cube)
image_box.set_events(gtk.gdk.EXPOSURE_MASK
|gtk.gdk.LEAVE_NOTIFY_MASK
|gtk.gdk.BUTTON_PRESS_MASK
|gtk.gdk.POINTER_MOTION_MASK
|gtk.gdk.POINTER_MOTION_HINT_MASK)
box1.show()
box2.show()
win.show_all()
''' gtk needs to run in its own thread to allow the pi3d drawing to happen
at the same time'''
gdk.threads_init()
t = threading.Thread(target=gtk.main, name='GTK thread')
t.daemon = True
t.start()
while DISPLAY.loop_running():
if cube.show_flag:
cube.draw()
img_gtk.set_from_pixbuf(gtk.gdk.pixbuf_new_from_array(
pi3d.screenshot(), gtk.gdk.COLORSPACE_RGB, 8))
''' see note above about python 3
img_gtk.set_from_pixbuf(GdkPixbuf.Pixbuf.new_from_data(pi3d.screenshot(),
GdkPixbuf.Colorspace.RGB, True, 8, W, H, W * 4)) '''
win.show_all()
cube.show_flag = False
| true |
dc91866d0a4f1b94ba76840fe45d5af7b726a8a2 | Python | aorura/machine_learning | /day3/Day3Quiz.py | UTF-8 | 1,601 | 2.734375 | 3 | [] | no_license | import numpy as np
import pandas as pd
'''
data1 = pd.DataFrame({"color":['red','blue','green','yello','black'],"size":['s','m','l','xl','xs'],\
'gender':['m','w','m','w','m'],'part':['shirt','shirt','pant','shirt','pant'],'sale':['yes','no','no','yes','yes']})
data2 = pd.DataFrame({"Farbe":['red','blue','green','yello','black'],"size":['s','m','l','xl','xs'],\
'brand':['tommy','eagle','guess','zara','ck'],'part':['shirt','shirt','pant','shirt','pant'],'soldout':['yes','no','no','yes','yes']})
data3 = pd.merge(data1,data2,left_on='color', right_on='Farbe',how='outer',suffixes=('_left','_right'))
print('\n',data3.drop(['size_left','part_left','size_right','part_right'],axis='columns'))
'''
df1 = pd.DataFrame({
'매출': [10000000, 12000000, 9000000, 6000000, 8000000, 1100000],
'비용': [15000000, 1300000, 1200000, 9000000, 9900000, 9500000]},
index=['1월', '2월', '3월', '4월', '5월', '6월'])
df2 = pd.DataFrame({
'매출': [13000000, 14000000, 17000000, 15400000, 16500000, 16600000],
'비용': [11000000, 10400000, 11000000, 12100000, 9000000, 9500000]},
index=['7월', '8월', '9월', '10월', '11월', '12월'])
df3 = pd.DataFrame([],columns=['매출','비용','이익'],index=['1월', '2월', '3월', '4월', '5월', '6월','7월', '8월', '9월', '10월', '11월', '12월'])
print('\n',df1)
print('\n',df2)
print('\n',df3)
print('\n', pd.concat([df1,df2]))
print('\n', df3.add(pd.concat([df1,df2]), fill_value=0))
df4 = df3.add(pd.concat([df1,df2]), fill_value=0)
#df4.fillna(method=lambda profit )
| true |
18a27bb7b98916820deac873615b671470cdc824 | Python | teddygroves/figure_skating | /src/fake_data_generation.py | UTF-8 | 1,479 | 2.75 | 3 | [
"MIT"
] | permissive | """Function for generating fake data."""
from cmdstanpy import CmdStanModel
import numpy as np
import pandas as pd
from .model_configurations_to_try import SIMPLE_POSTERIOR as TRUE_MODEL_CONFIG
# True values for each variable in your program's `parameters` block. Make sure
# that the dimensions agree with `TRUE_MODEL_FILE`!
TRUE_PARAM_VALUES = {
"mu": 0.1,
"cutpoint_diffs": [0.5, 1.0, 0.6, 0.8, 1.2, 0.5, 0.6, 0.4, 0.9],
}
def generate_fake_measurements(real_data: pd.DataFrame) -> pd.DataFrame:
"""Fake a table of measurements by simulating from the true model.
You will need to customise this function to make sure it matches the data
generating process you want to simulate from.
:param real_data: dataframe of real data to copy
"""
true_param_values = TRUE_PARAM_VALUES.copy()
true_param_values["ability"] = np.random.normal(
0, 1, real_data["name"].nunique()
)
fake_data = real_data.copy()
fake_data["score"] = 0
name_to_ability = dict(
zip(pd.factorize(fake_data["name"])[1], true_param_values["ability"])
)
fake_data["true_ability"] = fake_data["name"].map(name_to_ability)
model = CmdStanModel(stan_file=TRUE_MODEL_CONFIG.stan_file)
stan_input = TRUE_MODEL_CONFIG.stan_input_function(fake_data)
mcmc = model.sample(
stan_input, inits=true_param_values, fixed_param=True, iter_sampling=1
)
return fake_data.assign(score=mcmc.stan_variable("yrep")[0] - 6)
| true |
1fdccf8bf5e0b88fcf560fbca5fe24a02ae30de3 | Python | kjwilsondev/telio | /dstructures/set_test.py | UTF-8 | 3,091 | 3.390625 | 3 | [] | no_license | #!python
from set import Set
import unittest
class SetTest(unittest.TestCase):
def test_init(self):
s = Set()
assert s.size == 0
def test_contains(self):
s = Set(4, ['a', 'b', 'c'])
assert s.contains('b') is True
assert s.contains('d') is False
assert s.contains('c') is True
def test_add(self):
s = Set(4, ['a', 'b', 'c'])
assert s.size is 3
s.add('dont fail me!')
assert s.size is 4
s.add('im working hard to write tests!')
assert s.size is 5
s.add('ill finish tonight!')
assert s.size is 6
def test_remove(self):
s = Set(4, ['a', 'b', 'c'])
assert s.size is 3
s.remove('a')
assert s.size is 2
s.remove('b')
assert s.size is 1
s.remove('c')
assert s.size is 0
def test_union(self):
s = Set(4, ['yes', 'no', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.union(t).contains('yes') and s.union(t).contains('yea')
s = Set(4, ['bro', 'bruh', 'brother'])
t = Set(4, ['bud', 'buddy', 'browski'])
assert s.union(t).contains('bro') and s.union(t).contains('browski')
s = Set(4, ['yea', 'na', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.union(t).contains('yea') and s.union(t).contains('na') and s.union(t).contains('maybe')
def test_intersection(self):
s = Set(4, ['yes', 'no', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.union(t).contains('maybe')
s = Set(4, ['yea', 'no', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.union(t).contains('yea') and s.union(t).contains('maybe')
s = Set(4, ['yea', 'na', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.union(t).contains('yea') and s.union(t).contains('na') and s.union(t).contains('maybe')
def test_difference(self):
s = Set(4, ['yes', 'no', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.difference(t).contains('yes') and s.difference(t).contains('yea')
assert s.difference(t).contains('maybe') is False
s = Set(4, ['bro', 'bruh', 'brother'])
t = Set(4, ['bud', 'buddy', 'browski'])
assert s.difference(t).contains('bro') and s.difference(t).contains('bud')
s = Set(4, ['bro', 'bruh', 'brother'])
t = Set(4, ['bro', 'buddy', 'browski'])
assert s.difference(t).contains('bruh') and s.difference(t).contains('browski')
assert s.difference(t).contains('bro') is False
def test_is_subset(self):
s = Set(4, ['yes', 'no', 'maybe'])
t = Set(4, ['yea', 'na', 'maybe'])
assert s.is_subset(t) is False
s = Set(4, ['yes', 'no', 'maybe'])
t = Set(4, ['yes', 'no', 'maybe', 'finally'])
assert s.is_subset(t) is True
s = Set(4, ['yes', 'no', 'maybe'])
t = Set(4, ['yes', 'no', 'maybe', 'finally'])
assert t.is_subset(s) is False
if __name__ == '__main__':
unittest.main() | true |
de6013743df9f3a7ce140684061aee8bf0a00a24 | Python | tungyr/skillbox-study | /Data Science/2. Data Scientist. Аналитика. Начальный уровень/probe.py | UTF-8 | 170 | 3.234375 | 3 | [] | no_license | import random
operators = ['+', '-', '*', '/', '//', '%']
for i in range(21):
print(f'{random.randint(0, 100)} {random.choice(operators)} {random.randint(0, 100)}') | true |
03e9552580f3eeada28267dfe563b8a1b04e4891 | Python | pyatl/jam-sessions | /2012-03/pythonchallenge-level4/mezner.py | UTF-8 | 629 | 3.28125 | 3 | [] | no_license | import urllib
import re
nothing = "44827"
def find_last_nothing(nothing):
prefix = "http://www.pythonchallenge.com/pc/def/linkedlist.php?nothing="
exp = r"[0-9]+"
for i in range(1, 400):
url = prefix + nothing
c = urllib.urlopen(url).read()
match = re.search(exp, c)
if match == None:
print "finalized @ " + nothing
return nothing
nothing = match.group(0)
print "following nothing " + nothing
s = find_last_nothing("44827")
print find_last_nothing(str(int(s) / 2))
print "content tells you to next use 63579"
print find_last_nothing("63579")
| true |
3bb9d2e02f09b7467730bd62ad80c61ad738ec85 | Python | neer1304/CS-Reference | /Scripting/Python_Scripting/while.py | UTF-8 | 159 | 3.046875 | 3 | [] | no_license | a=0
b=1
while b < 100 : # all statements inside loop body must start with a tab
print b # no start..end char or symbols are required for this
a=b
b=a+b
| true |
bd043fe964a31dfc7130c6d4255c1cb7b31b8a89 | Python | aperkins7446/csd-310 | /module_10/outland_addRecordsv2.py | UTF-8 | 8,504 | 2.921875 | 3 | [] | no_license | #Group name: Charlie Group
#Members: Jacob Breault, Angela Perkins, Skyler Millburn, William Silknitter III, Cameron Frison
#7/11/2021
#Module 10.3 ADD RECORDS Outland Adventures Script
"""import statements"""
import mysql.connector
from datetime import date, datetime
from mysql.connector import errorcode
"""database config object"""
config = {
"user": "outland_adventures_user",
"password": "Angel@84",
"host": "127.0.0.1",
"database": "outland_adventures",
"raise_on_warnings": True
}
""" try/catch block for handling potential MySQL database errors """
try:
db = mysql.connector.connect(**config) # connect to outland_adventures database.
cursor = db.cursor()
"""Truncate all tables; makes sure we have empty tables before entering data"""
cursor.execute("SET FOREIGN_KEY_CHECKS = 0")
cursor.execute("TRUNCATE inventory")
cursor.execute("TRUNCATE employee_trek_history")
cursor.execute("TRUNCATE trek_history")
cursor.execute("TRUNCATE trek")
cursor.execute("TRUNCATE orders")
cursor.execute("TRUNCATE employees")
cursor.execute("TRUNCATE customers")
cursor.execute("SET FOREIGN_KEY_CHECKS = 1")
"""Executemany values into customers table"""
customerSQL = "INSERT INTO customers(f_name, l_name, address, city, state, zip_code, phone) VALUES (%s, %s, %s, %s, %s, %s, %s)"
customerRecord = [
('John', 'Smith', '123 Apple Street', 'Bigcity', 'Indiana', '12345', '123-456,7890'),
('Lily', 'Smith', '123 Apple Street', 'SmallCity', 'Kentucky', '54321', '098-765-4321'),
('Jake', 'Johnson', '456 Pear Ave', 'MedCity', 'Ohio', '44444', '111-111-1111'),
('Mary', 'Lee', '555 Orange Rd', 'OtherCity', 'Florida', '22222', '222-222-2222'),
('Thomas', 'Jones', '999 West St', 'Sidecity', 'Arizona', '66666', '999-999-9999'),
('Isacc', 'Lee', '555 Orange Rd', 'OtherCity', 'Florida', '22222', '222-222-2222')
]
cursor.executemany(customerSQL, customerRecord)
"""Display all from customers table"""
cursor.execute("SELECT * FROM customers")
customer = cursor.fetchall()
print(" -- DISPLAYING ENTRIES FROM CUSTOMER TABLE -- \n\n")
for customers in customer:
print(customers)
print("\n")
"""Executemany values into employees table"""
print(datetime(1990,1,10))
employeeSQL = "INSERT INTO employees(f_name, l_name, date_of_birth, title, supervisor_id) VALUES (%s, %s, %s, %s, %s)"
employeeRecord = [
('Luke', 'Johnson', datetime(1990, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'), 'Boss', 1),
('Bryan', 'Smith', datetime(1991, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'), 'Associate', 2),
('John', 'Jones', datetime(1992, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'), 'Manager', 3),
('Percy', 'Smith', datetime(1993, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'), 'Marketing', 4),
('Ash', 'Williams', datetime(1994, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'), 'Human Resources', 5),
('Terrance', 'Fletcher', datetime(1995, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'), 'Co-Boss', 6),
]
cursor.executemany(employeeSQL, employeeRecord)
"""Display all from customers table"""
cursor.execute("SELECT * FROM employees")
employee = cursor.fetchall()
print("\n\n -- DISPLAYING ENTRIES FROM EMPLOYEE TABLE -- \n\n")
for employees in employee:
print(employees)
print("\n")
"""Executemany values into trek table"""
trekSQL = "INSERT INTO trek(trek_name, country, requires_visa, required_immunizations) VALUES (%s, %s, %s, %s)"
trekRecord = [
('Trek 1', 'Canada', 1, 1),
('Trek 2', 'U.S.A.', 1, 1),
('Trek 3', 'Mexico', 1, 1),
('Trek 4', 'Quebec', 1, 1),
('Trek 5', 'U.S.A.', 1, 1),
('Trek 6', 'Canada', 1, 1)
]
cursor.executemany(trekSQL, trekRecord)
"""Display all from trek table"""
cursor.execute("SELECT * FROM trek")
trek = cursor.fetchall()
print("\n\n -- DISPLAYING ENTRIES FROM TREK TABLE -- \n\n")
for treks in trek:
print(treks)
print("\n")
"""Executemany values into trek_history table"""
trekHistorySQL = "INSERT INTO trek_history(customer_id, trek_name, trip_cost, trip_date) VALUES (%s, %s, %s, %s)"
trekHistoryRecord = [
(1, 'Trek 1', 500, datetime(2020, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(2, 'Trek 2', 300, datetime(2020, 2, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(3, 'Trek 3', 200, datetime(2020, 3, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(4, 'Trek 4', 175, datetime(2020, 4, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(5, 'Trek 5', 1000, datetime(2020, 5, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(6, 'Trek 6', 600, datetime(2020, 6, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'))
]
cursor.executemany(trekHistorySQL, trekHistoryRecord)
"""Display all from trek_history table"""
cursor.execute("SELECT * FROM trek_history")
trek_history = cursor.fetchall()
print("\n\n -- DISPLAYING ENTRIES FROM TREK HISTORY TABLE -- \n\n")
for trekHis in trek_history:
print(trekHis)
print("\n")
"""Executemany values into inventory table"""
inventorySQL = "INSERT INTO inventory(gear_id, gear_name, for_rent, purchase_date) VALUES (%s, %s, %s, %s)"
inventoryRecord = [
(1, 'Boots', 1, datetime(2013, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(2, 'Lantern', 1, datetime(2018, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(3, 'Tent', 1, datetime(2012, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(4, 'Ligher', 1, datetime(2020, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(5, 'Hydro Flasks', 1, datetime(2015, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(6, 'Backpack', 1, datetime(2016, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'))
]
cursor.executemany(inventorySQL, inventoryRecord)
"""Display all from inventory table"""
cursor.execute("SELECT * FROM inventory")
inventory = cursor.fetchall()
"""Executemany values into orders table"""
orderSQL = "INSERT INTO orders(customer_id, trek_history_id, gear_id, quantity, order_cost, order_date) VALUES (%s, %s, %s, %s, %s, %s)"
orderRecord = [
(1, 1, 1, 10, 50, datetime(2020, 1, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(2, 2, 2, 20, 100, datetime(2020, 2, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(3, 3, 3, 3, 10, datetime(2020, 3, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(4, 4, 4, 7, 110, datetime(2020, 4, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(5, 5, 5, 11, 100, datetime(2020, 5, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S')),
(6, 6, 6, 14, 200, datetime(2020, 6, 10, 0, 0, 0).strftime('%Y-%m-%d %H:%M:%S'))
]
cursor.executemany(orderSQL, orderRecord)
"""Display all from orders table"""
cursor.execute("SELECT * FROM orders")
order = cursor.fetchall()
print("\n\n -- DISPLAYING ENTRIES FROM ORDERS TABLE -- \n\n")
for orders in order:
print(orders)
print("\n")
print("\n\n -- DISPLAYING ENTRIES FROM INVENTORY TABLE -- \n\n")
for stock in inventory:
print(stock)
print("\n")
"""Executemany values into employee_trek_history table"""
empTrekSQL = "INSERT INTO employee_trek_history(trek_history_id, employee_id, supervisor_id) VALUES (%s, %s, %s)"
empTrekRecord = [
(1, 1, 1),
(2, 2, 2),
(3, 3, 3),
(4, 4, 4),
(5, 5, 5),
(6, 6, 6)
]
cursor.executemany(empTrekSQL, empTrekRecord)
"""Display all from employee_trek_history table"""
cursor.execute("SELECT * FROM employee_trek_history")
empTrek = cursor.fetchall()
print("\n\n -- DISPLAYING ENTRIES FROM EMPLOYEE TREK HISTORY TABLE -- \n\n")
for history in empTrek:
print(history)
print("\n")
db.commit()
except mysql.connector.Error as err:
""" on error code """
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print(" The supplied username or password are invalid")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print(" The specified database does not exist")
else:
print(err)
finally:
""" close the connection to MySQL """
db.close()
| true |
5ca144477b6ec16a9d7a628d007e42b99ad07633 | Python | seeraven/vagrancyCtrl | /vagrancy/cli/parser_cmd_print.py | UTF-8 | 6,131 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020 by Clemens Rabe <clemens.rabe@clemensrabe.de>
# All rights reserved.
# This file is part of vagrancyCtrl (https://github.com/seeraven/vagrancyCtrl)
# and is released under the "BSD 3-Clause License". Please see the LICENSE file
# that is included as part of this package.
#
"""Parser of the print command of vagrancyCtrl.
Attributes:
DESCRIPTION (str): The usage description of the subparser for the print command.
"""
# -----------------------------------------------------------------------------
# Module Import
# -----------------------------------------------------------------------------
import argparse
import os
import sys
from ..client import Vagrancy
# -----------------------------------------------------------------------------
# Module Variables
# -----------------------------------------------------------------------------
DESCRIPTION = """
Get a list of all available boxes on the vagrancy server. Without any arguments,
the names of all boxes are printed:
%(prog)s
You can also limit the list by specifiing a pattern:
%(prog)s base/*18.04*
Using the '--provider' option, you can limit the output list to a certain
provider (virtualbox, libvirt), e.g., by listing only virtualbox images:
%(prog)s --provider virtualbox
To get more information about the boxes, such as the provider, versions, etc.,
you can use the '--verbose' option:
%(prog)s --verbose base/*18.04*
To use the information in a script, you can specifiy the '--cvs' option to get
a comma separated list consisting of:
- name of box on vagrancy
- provider
- directory to the box file relative to the vagrancy file store
- latest version
- next version
Return codes:
0 - Communication was successfull.
1 - Communication failed.
"""
# -----------------------------------------------------------------------------
# Exported Functions
# -----------------------------------------------------------------------------
def exec_print_cmd(args):
"""Execute the print command.
Args:
args: The arguments object.
"""
vagrancy = Vagrancy(args.base_url)
box_list = vagrancy.get_boxes(args.box_name, args.provider)
if args.csv:
for box in box_list:
for provider in sorted(box.provider_version_map.keys()):
latest_version = box.provider_latest_versions[provider]
next_version = box.provider_next_versions[provider]
if next_version is None:
next_version = ''
print("%s,%s,%s,%s,%s" % (box.box_name, provider,
os.path.join(box.box_name, latest_version, provider),
latest_version, next_version))
sys.exit(0)
if args.verbose:
for box in box_list:
print("%s:" % box.box_name)
for provider in sorted(box.provider_version_map.keys()):
latest_version = box.provider_latest_versions[provider]
next_version = box.provider_next_versions[provider]
if next_version is None:
next_version = 'undefined'
print(" Provider: %s" % provider)
print(" Available Versions: %s" % ' '.join(sorted(box.version_provider_map.keys())))
print(" Latest Version: %s" % latest_version)
print(" Next Version: %s" % next_version)
print(" Download URL: %s" % os.path.join(args.base_url,
box.box_name,
latest_version,
provider))
print(" Upload URL: %s" % os.path.join(args.base_url,
box.box_name,
next_version,
provider))
print()
sys.exit(0)
for box in box_list:
print("%s" % box.box_name)
def get_subparser_print(subparsers):
"""Return the subparser to configure and handle the print command.
Args:
subparsers: The subparsers object of the main argparse.ArgumentParser.
Returns:
argparse.ArgumentParser: The new subparser object.
"""
parser_print = subparsers.add_parser("print",
help = "Print the contents of the vacrancy server.",
description = DESCRIPTION,
formatter_class = argparse.RawTextHelpFormatter)
parser_print.add_argument("-p", "--provider",
action = "store",
help = "Only print boxes that have the specified "
"provider, e.g., libvirt or virtualbox.",
default = "*")
parser_print.add_argument("-v", "--verbose",
action = "store_true",
help = "Be verbose and print all available "
"information about the boxes.",
default = False)
parser_print.add_argument("--csv",
action = "store_true",
help = "Print a csv list instead of only the names.",
default = False)
parser_print.add_argument("box_name",
action = "store",
help = "Print only names that match the given "
"pattern. Default: %(default)s",
default = "*",
nargs = "?")
parser_print.set_defaults(func = exec_print_cmd)
return parser_print
# -----------------------------------------------------------------------------
# EOF
# -----------------------------------------------------------------------------
| true |
10dec41db22ca39d82c617c03af169b590e124c5 | Python | CSG3101-AP-Project-5-AR-mHealth/vitals-infer-bot | /main.py | UTF-8 | 1,480 | 2.953125 | 3 | [
"MIT"
] | permissive | import time
import json
from tqdm import tqdm
import matplotlib.pyplot as plt
import utils
# Opening JSON file
f = open('heart_rate.json',"r")
strdata = f.read()
# returns JSON object as
# a dictionary
data = json.loads(strdata)
# Closing file
f.close()
# start time
start_time = time.time()
X, Y= utils.parse_bpm(data)
print(time.time()- start_time, "seconds")
Beacon_list, Beacon_points = utils.find_max_min(X)
Beacon_list1, Beacon_points1 = utils.find_INF_points(X)
print('length of the beacon-data : ', len(Beacon_list))
print('length of the beacon-data-points : ', len(Beacon_points))
print('length of the beacon-data1 : ', len(Beacon_list1))
print('length of the beacon-data-points1 : ', len(Beacon_points1))
# plotting the points
plt.figure(figsize=(15, 5))
# plot mins and maxs
plt.plot(Beacon_points, Beacon_list,color='g',label ='min_max')
# plot inf_v final
plt.plot(Beacon_points1, Beacon_list1,color='b',label ='final INF_V')
# plot original data
plt.plot(Y, X,color='r', label ='Orignal')
# Function add a legend
plt.legend()
# naming the x axis
plt.xlabel('x - axis')
# naming the y axis
plt.ylabel('y - axis')
# giving a title to my graph
plt.title('Graph Analysis')
# function to show the plot
plt.show()
So, Su, Sl, S = utils.find_areas(X,Beacon_list1)
AR = (So-S)/So*100
print('Accuracy Rate : ',AR)
SR = ((len(X)- len(Beacon_list1))/len(X))*100
print('Saving Rate : ',SR) | true |
5e066a0124b537152f4ede9a2eb905a6a37bb31e | Python | sys505moon/python_study | /mnist_step1.py | UTF-8 | 1,182 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 21:15:08 2017
@author: moon
"""
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot = True)
x = tf.placeholder(tf.float32, [None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices = 1))
train_step = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict = {x:batch_xs, y_:batch_ys})
if i % 100 == 0:
print(sess.run(W), sess.run(b))
print(sess.run(accuracy, feed_dict = {x:mnist.test.images, y_:mnist.test.labels}))
| true |
406bfd55f4a02e3a6cb87c13ea525a14386beaa1 | Python | nihathalici/Break-The-Ice-With-Python | /Python-Files/Day-21/Question-86.py | UTF-8 | 290 | 4.21875 | 4 | [] | no_license | """
Question 86
Question
By using list comprehension, please write a program to print
the list after removing the value 24 in [12,24,35,24,88,120,155].
Hints
Use list's remove method to delete a value.
"""
li = [12, 24, 35, 24, 88, 120, 155]
li = [ x for x in li if x != 24 ]
print(li)
| true |
1f96d78cf59a93d6681102eb706064f13b1215d2 | Python | ChendongCai/Build-machine-learning-models-from-scratch | /knn | UTF-8 | 1,027 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 27 14:03:59 2017
@author: chendongcai
"""
import numpy as np
import operator
from pandas import DataFrame
def knnclassifier(inX,dataset,label,k):
datasetsize=dataset.shape[0]
diffmatrix=np.tile(inX,(datasetsize,1))-dataset
distance=(diffmatrix**2).sum(axis=1)**0.5
sortdistanceindices=distance.argsort()
classcount={}
#select the k closest points
for i in range(k):
votelabel=label[sortdistanceindices[i]]
classcount[votelabel]=classcount.get(votelabel,0)+1
sortclasscount=sorted(classcount.items(),key=operator.itemgetter(1),reverse=True)
return sortclasscount[0][0]
#create sample data to test the model
group=np.array([[1.0,1.0],[1.0,1.1],[0,0],[0,0.1]])
label=['A','A','B','B']
#define the value of k
k=3
inputx=np.array([[0,0],[0.5,0.5],[1.5,1.1]])
result=DataFrame(inputx)
r=[]
for i in inputx:
r.append(knnclassifier(i,group,label,k))
result['class']=r
print(result)
| true |
0759cbdfccc71bc0dce461677646535572bb62ca | Python | brent549/maps | /town_analysis_orig.py | UTF-8 | 2,169 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python3
import json
import pprint
import os
def sites(file):
a=[]
with open(file, "r") as f:
for line in f:
if not line.startswith("#"):
a.append(line.rstrip())
return a
def files(dir):
return [x for x in os.listldir(outdir) if x.endswith(".json") and not x.beginswith('all')]
def convert_to_min(s):
#convert string to min
# 1 hour 20 mins
# 49 mins
total=0
a = s.split()
# should be an even num
if (len(a) % 2) != 0:
print ("{} isnt even".s)
if 'hour' in a[1]:
return (int(a[0])*60) + int(a[2])
return int(a[0])
origins = sites('cities.txt')
outdir = 'json'
results={}
for o in origins:
print(o)
file="%s/%s.json" % (outdir,o.replace(' ','_').replace(',',''))
if not os.path.isfile(file):
continue
with open(file, "r") as fd:
data=[]
for line in fd:
data.append(json.loads(line.strip()))
pprint.pprint(data)
#{origin}->{dest}->time: | distance: | mode:
#{origin}->{dest}->[(time,distance,mode)]
for x in data:
if 'error_message' in x:
print(x['error_message'])
continue
mode=x['transit_mode']
origin=x['origin_addresses'][0]
d = x['rows'][0]['elements']
if origin not in results:
results[origin] = {}
for i in range(len(d)):
print(d[i])
dist = d[i]['distance']['text']
dur = d[i]['duration']['text']
dest = x['destination_addresses'][i]
#print("%s -> %s" % (dist,dur))
t=(convert_to_min(dur),dist,mode)
if dest not in results[origin]:
results[origin][dest] = []
results[origin][dest].append(t)
print(results)
for orig,locs in results.items():
for loc,tups in locs.items():
l = sorted(tups, key = lambda x: x[0])
shortest = l[0]
if 'New York' in loc:
shortest = l[1]
print("{} {} {}".format(orig,loc,shortest))
| true |
eb963b7ffb23d59c4be2524097579a6268483deb | Python | arsenypoga/CSC-425-Project-1 | /test/test_informed_solver.py | UTF-8 | 778 | 2.96875 | 3 | [] | no_license | import unittest
from game.state import State
from game.informed_search import InformedSearchSolver
import numpy as np
class TestInformedSolver(unittest.TestCase):
def test_state_walk(self):
init_tile = np.array([[1, 2, 3], [0, 4, 6], [7, 5, 8]])
# init_tile = np.array([[1, 2, 3], [4, 5, 0], [7, 8, 6]])
init = State(init_tile, 0, 0)
goal_tile = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 0]])
goal = State(goal_tile, 0, 0)
informed_solver = InformedSearchSolver(init, goal)
while not informed_solver.current_state == informed_solver.target_state:
informed_solver.next_state()
# print(succ)
self.assertTrue(
np.all(goal.tile_seq == informed_solver.current_state.tile_seq))
| true |
fa9d66eaafb92906386a8f87179ba090106c31d9 | Python | wwward/socialvolunteer | /socialvolunteernet/model/test_volunteer.py | UTF-8 | 3,602 | 2.859375 | 3 | [] | no_license |
import logging
class Volunteer(object):
def create_new(self, **kw):
logging.debug("Created new volunteer %s" % repr(kw))
return True
def get_volunteer_by_username(self, username):
return {"name": "Dirk", "volunteer_id": "987", "username": username}
# Adds a friend to the current volunteer
def add_friend(self, volunteer_id, friend_volunteer_id):
logging.debug("Added friends %s and %s" % (volunteer_id, friend_volunteer_id))
return True
def remove_friend(self, volunteer_id, friend_volunteer_id):
logging.debug("Deleted friends %s and %s" % (volunteer_id, friend_volunteer_id))
return True
# Returns a list of friend data (all friend data, not just the id)
def get_friends(self, volunteer_id):
return [{"name": "buddy 1", "volunteer_id": "345", "username": "adsfasd"},
{"name": "buddy 2", "volunteer_id": "222", "username": "adsfasdf"}]
def get_score(self, volunteer_id):
return 666
# Returns a list of the scores of all of my friends
def get_friend_score(self, volunteer_id):
return [{"username": "asdfasd", "score": 324, "name": "winner"},
{"username": "asdas", "score": 224, "name": "loser"}]
def get_global_scores(self):
return [{"username": "sssss", "score": 324000, "name": "GOD"},
{"username": "kkkk", "score": 224000, "name": "KITTEH"}]
# Returns the top activity across the site
# www3 - what constitutes activity?
def get_friend_activity(self, volunteer_id):
return [
{"username": "Bob", "job_id": "1243", "title": "This is job 1"},
{"username": "Alice", "job_id": "4", "title": "This is job 2"},
] # Not sure what this will look like ATM
# Returns a list of jobs that I have completed
def get_completed_jobs(self, volunteer_id):
return [{"job_id": "2342", "description": "hell if i know", "date": "yesterday"},
{"job_id": "3", "description": "dunno", "date": "a week ago"}]
# Returns a list of jobs that are in progress (e.g. I have been checked in but not checked out)
def get_current_jobs(self, volunteer_id):
return [{"job_id": "222", "description": "right now", "date": "today", "title": "Job 1"},
{"job_id": "333", "description": "current", "date": "today", "title": "Job 2"}]
# Returns a list of the jobs that I have signed up for but not started
def get_future_jobs(self, volunteer_id):
return [{"job_id": "999", "description": "future time", "date": "a week from now", "title": "Job 1"},
{"job_id": "777", "description": "laters", "date": "next tuesday", "title": "Job 1"}]
# Get volunteer information based on a volunteer_id
def get_info(self, volunteer_id):
return {"name": "this guy", "volunteer_id": volunteer_id, "location": "valhalla"}
# Edit user details, the modified fields are in the kw dictionary
def edit_volunteer_data(self, **kw):
logging.debug("Edited volunteer %s" % repr(kw))
return True
# Sign up for a new job
def add_job(self, volunteer_id, job_id):
logging.debug("Added job %s to %s" % (job_id, volunteer_id) )
return True
# Delete a job. Note that you cannot delete things that you have been checked in or completed
def delete_job(self, volunteer_id, job_id):
logging.debug("Deleted job %s to %s " % (job_id, volunteer_id))
return True
| true |
5d473fb36e34e2ee7c1c1b2fd9b1520a6eee1728 | Python | mparvezrashid/Automated-Software-Engineering | /hw/4/Abcd.py | UTF-8 | 4,711 | 2.53125 | 3 | [] | no_license | import re,math
from collections import Counter, defaultdict
from table import Tbl
from ZeroR import ZeroR
from NB import NB
class Abcd:
def __init__(self):
self.db = "Data"
self.num = 0
self.rx = "rx"
self.a = defaultdict(lambda: 0)
self.b = defaultdict(lambda: 0)
self.c = defaultdict(lambda: 0)
self.d = defaultdict(lambda: 0)
self.known = defaultdict(lambda: 0)
self.yes = 0
self.no = 0
def Abcds(self, file, wait, classify):
linecount = 0
'''for line in file:
linecount+=1
line = re.sub(r'([\n\t\r]|#.*)', '', line.strip())
if len(line)>0:
line = line.split(',')
if wait<linecount:
self.Abcd1(line[-1],zr.classify(line))
zr.train(tbl,linecount,line)
#tbl.Tbl1(linecount,line)
self.AbcdReport()
linecount=0'''
for line in file:
linecount += 1
line = re.sub(r'([\n\t\r]|#.*)', '', line.strip())
if len(line) > 0:
line = line.split(',')
if wait < linecount:
#print(line[0])
#print(line[-1] + classify.classify(line))
#print(classify.classify(line)+line[-1])
self.Abcd1(line[-1], classify.classify(line))
#self.Abcd1('yes', 'yes')
classify.train(linecount, line)
'''print(classify.tbl.cols)
print(classify.tbl.nums)
print(classify.tbl.syms)
print(classify.tbl.goals)
# tbl.Tbl1(linecount,line)'''
#print(classify.subtbl)
self.AbcdReport()
def Abcd1(self, want, got, x=0):
self.num += 1
if self.known[want] == 0:
self.known[want] += 1
self.a[want] = self.yes + self.no
# print(want,self.a[want])
'''if self.known[want] == 1:
self.a[want]= self.yes + self.no
print(want,self.a[want]) '''
if self.known[got] == 0:
self.known[got] += 1
self.a[got] = self.yes + self.no
# print(got, self.a[got])
'''if self.known[got] == 1:
self.a[got]= self.yes + self.no
print(got, self.a[got])'''
if want == got:
self.yes += 1
else:
self.no += 1
# print(self.known)
for x in self.known:
# print(x)
if want == x:
if want == got:
self.d[x] += 1
else:
self.b[x] += 1
else:
if got == x:
self.c[x] += 1
else:
self.a[x] += 1
# print(x,self.a[x])
# def AbcdReport(self,x,p,q,r,s,ds,pd,pf pn,prec,g,f,acc,a,b,c,d) {
def AbcdReport(self):
p = " %4.2f"
q = " %4s"
r = " %5s"
s = " |"
ds = "----"
'''print(r s r s r s r s r s r s r s q s q s q s q s q s q s " class\n",
"db","rx","num","a","b","c","d","acc","pre","pd","pf","f","g")
print(r s r s r s r s r s r s r s q s q s q s q s q s q s "-----\n",
ds,ds,"----",ds,ds,ds,ds,ds,ds,ds,ds,ds,ds)'''
print(
" db | rx | num | a | b | c | d | acc | pre | pd | pf | f | g | class\n")
print(
" ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | -----\n")
for x in self.known:
pd = pf = pn = prec = g = f = acc = 0
a = self.a[x]
b = self.b[x]
c = self.c[x]
d = self.d[x]
if b + d > 0: pd = round(d / (b + d), 2)
if a + c > 0: pf = round(c / (a + c), 2)
if a + c > 0: pn = round((b + d) / (a + c), 2)
if c + d > 0: prec = round(d / (c + d), 2)
if 1 - pf + pd > 0: g = round(2 * (1 - pf) * pd / (1 - pf + pd), 2)
if prec + pd > 0: f = round(2 * prec * pd / (prec + pd), 2)
if self.yes + self.no > 0:
# print(self.yes, self.yes + self.no)
acc = round(self.yes / (self.yes + self.no), 2)
# print(acc)
print("{:7s}|".format(self.db) + "{:7s}|".format(self.rx) + "{:7d}|".format(self.num) + "{:7d}|".format(
a) + "{:7d}|".format(b) + "{:7d}|".format(c) + "{:7d}|".format(d) + "{:7f}|".format(
acc) + "{:7f}|".format(prec) + "{:7f}|".format(pd) + "{:7f}|".format(pf) + "{:7f}|".format(
f) + "{:7f}|".format(g) + "{:7s}".format(x) + '\n')
| true |
f903fac8fb0725d7f5f893841639b9b53b1ab565 | Python | KTH-EXPECA/CognitiveAssistanceTraces | /lego/HeadTrace1/process_steps.py | UTF-8 | 1,338 | 2.6875 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python3
import pathlib
from typing import Tuple
import json
import struct
STEPDIR = './frames'
KEY_FRAMES = [245, 158, 159, 274, 162, 239, 308]
def get_steps() -> Tuple:
p = pathlib.Path(STEPDIR)
steps_p = [x for x in p.iterdir() if x.is_dir()]
return tuple(sorted(steps_p))
def process_step(step_dir: pathlib.Path) -> None:
index = int(step_dir.parts[-1][-2:])
name = 'step_' + str(index)
frames = [x for x in step_dir.iterdir()
if str(x).startswith('frame') and str(x).endswith('.jpeg')]
frames.sort()
header = {
'name': name,
'index': index,
'num_frames': len(frames),
'key_frame': KEY_FRAMES[index - 1]
}
header_b = json.dumps(header, separators=(',', ':')).encode('utf-8')
with open(name + '.trace', 'wb') as f:
packed_header = struct.pack('>I{len}s'.format(len=len(header_b)),
len(header_b), header_b)
f.write(packed_header)
for frame in frames:
data = frame.read_bytes()
packed_frame = struct.pack('>I{len}s'.format(len=len(data)),
len(data), data)
f.write(packed_frame)
if __name__ == '__main__':
step_dirs = get_steps()
for step in step_dirs:
process_step(step)
| true |
5b3fbd57d0d913a020f02ac8eba83596f3638d72 | Python | mingxiao/cltc-smap | /sensor_driver.py | UTF-8 | 1,657 | 2.96875 | 3 | [] | no_license | """
smap driver to read via bluetooh from a arduino board and publish
data to smap archiver.
"""
import smap.driver as driver
import smap.util as util
import bluetooth
import re
class sensor_driver(driver.SmapDriver):
def parse_reading(self,data):
"""
@param data - string of data
Returns a number if a reading is found otherwise return -1
"""
pat =re.compile('([1-9][0-9]*)')
datum = data.split('\n')
#print datum
for d in datum:
m = pat.search(d)
if m is not None:
return float(m.group(1))
return float(-1)
def setup(self,opts):
self.addr = opts.get('bt_addr')
self.port = int(opts.get('port'))
self.timeout = 4
#connect to bluetooth
try:
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.settimeout(self.timeout)
self.sock.connect((self.addr,self.port))
except Exception,e:
print 'Exception: %s' %e
#raise Exception('%s connection error at port %s'%(self.addr,self.port))
self.rate = float(opts.get('Rate'))
self.add_timeseries('/sensorX','LUX',data_type='double',milliseconds=True)
def start(self):
util.periodicSequentialCall(self.read).start(self.rate)
def read(self,bytes_to_read=256):
reading = self.sock.recv(bytes_to_read)
#we can read in data, now to parse it to make sure we have a number to read
#print 'READ',reading
data = self.parse_reading(reading)
print 'Parse',data
self.add('/sensorX',data)
pass
| true |
c0e8bd1f86c1b2c9bf90cf3ff2f97e152123c2f0 | Python | timothycryals/SmallProjects | /ComputerScience1101/RyalsAssignment3 (2) (1).py | UTF-8 | 2,814 | 4.125 | 4 | [] | no_license | #Name: Chase Ryals
#File: Assignment 3 Turtle in a Box
#Date: September 27, 2015
#Purpose: Write a game where the user tries to get a turtle in a square
import turtle
import random
s=0 #S is the variable containing the score
cont='y'
while cont=='y':
#This section draws the first box
turtle.showturtle()
turtle.penup()
x = random.randint(-200, 200)
y = random.randint(-200, 200)
turtle.setx(x)
turtle.sety(y)
turtle.pendown()
turtle.forward(200)
turtle.left(90)
turtle.forward(200)
turtle.left(90)
turtle.forward(200)
turtle.left(90)
turtle.forward(200)
turtle.penup
x_lowerright =(x+200)
y_lowerright = y
x_upperright =(x+200)
y_upperright =(y+200)
x_upperleft = x
y_upperleft = (y+200)
#This section draws the second box
aturtle=turtle.Turtle()
aturtle.showturtle()
aturtle.penup()
aturtle.goto(x, y)
aturtle.left(90)
aturtle.forward(50)
aturtle.right(90)
aturtle.forward(50)
aturtle.pencolor("red")
aturtle.pendown()
aturtle.forward(100)
aturtle.left(90)
aturtle.forward(100)
aturtle.left(90)
aturtle.forward(100)
aturtle.left(90)
aturtle.forward(100)
aturtle.penup()
x2=x+50
y2=y+50
x2_lowerright=(x2+100)
y2_lowerright=y2
x2_upperright=(x2+100)
y2_upperright=(y2+100)
x2_upperleft=x2
y2_upperleft=(y2+100)
#This section starts the input given by the user
userTurtle = turtle.Turtle()
userTurtle.showturtle()
userTurtle.penup()
xcor = turtle.numinput("Player Input","Please enter the x-coordinate")
ycor = turtle.numinput("Player Input","Please enter the y-coordinate")
userTurtle.goto(xcor, ycor)
if x<xcor<x_lowerright:
if y<ycor<y_upperright:
## turtle.write("You're in!")
if x2<=xcor<=x2_lowerright:
if y2<=ycor<=y_upperright:
s=s+2 #Adds 2 points to the score
turtle.write("You're in! Score: %s" % s)
else:
s=s+1 #Adds 1 point to the score
turtle.write("Almost there! Score: %s" % s)
else:
s = s+1 #Adds 1 point to the score
turtle.write("Almost there! Score: %s" % s)
else:
turtle.write("We missed you! Score: %s" % s)
else:
turtle.write("We missed you! Score: %s" % s)
cont=turtle.textinput("Next Round","Continue? (y/n)")
aturtle.reset()#Erases the inner box
turtle.reset() #Erases the outer box
userTurtle.reset() #Erases the user's turtle
turtle.write("GAME OVER! Your score is %s" % s)
| true |
eb7764702e725ee38b4e035ccbec1e2f20b36de2 | Python | Vincent105/python | /04_The_Path_of_Python/T-resource_Python_201904/ex/ex17_5.py | UTF-8 | 877 | 3.546875 | 4 | [] | no_license | # ex17_5.py
from PIL import Image, ImageDraw
newImage = Image.new('RGBA', (300, 300), "Yellow") # 建立300*300黃色底的影像
drawObj = ImageDraw.Draw(newImage)
# 繪製點
for x in range(100, 200, 3):
for y in range(100, 200, 3):
drawObj.point([(x,y)], fill='Green')
# 繪製線條, 繪外框線
drawObj.line([(0,0), (299,0), (299,299), (0,299), (0,0)], fill="Black")
# 繪製右上角美工線
for x in range(150, 300, 10):
drawObj.line([(x,0), (300,x-150)], fill="Blue")
# 繪製左下角美工線
for y in range(150, 300, 10):
drawObj.line([(0,y), (y-150,300)], fill="Blue")
# 繪製左上角美工線
for y in range(150, 0, -10):
drawObj.line([(0,y), (150-y,0)], fill="Blue")
# 繪製右下角美工線
for x in range(150, 300, 10):
drawObj.line([(x,300), (300,450-x)], fill="Blue")
print(x)
newImage.save("fig17_5.png")
| true |
43bf568a08c860352b32da40f674da0dc2e8cfec | Python | zhangcj5131/tensorflow_teaching | /p33.3dense.py | UTF-8 | 1,585 | 2.953125 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math
NUM = 200
HIDDEN_UNITS = 200
LINES = 2
def get_samples(num = NUM):
start = -math.pi
delta = (2*math.pi) / (num - 1)
result = []
for _ in range(num):
result.append((start, math.sin(start), start ** 2))
start += delta
return result
#xs:[200]
#ys:[200, 2}
def plot(*para):#(xs, ys)
for x, y in para:
plt.plot(x, y)
def predict(xs, ys, lr = 0.01, epoches = 2000, hidden_units = HIDDEN_UNITS):
x = tf.placeholder(tf.float32, [None], 'x')
m = tf.layers.dense(tf.reshape(x, [-1, 1]), hidden_units, activation=tf.nn.relu, name = 'dense1')
y_predict = tf.layers.dense(m, LINES, use_bias=False, name = 'dense2')#200,2
y = tf.placeholder(tf.float32, [None, LINES], 'y')
loss = tf.reduce_mean(tf.square(y - y_predict))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
loss = tf.sqrt(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(epoches):
feed_dict = {
x: xs,
y: ys
}
_, lo = sess.run([train_op, loss], feed_dict)
print('loss = %.8f' % lo)
return sess.run(y_predict, {x: xs})
if __name__ == '__main__':
samples = get_samples()
t = np.transpose(samples, [1, 0])
xs = t[0]
ys = [t[i] for i in range(1, LINES + 1)]#2, 200
ys = np.transpose(ys, [1, 0])#200, 2
plot((xs, ys))
y_predict = predict(xs, ys)
plot((xs, y_predict))
plt.show() | true |
6ef0235076a07f8c8761dd12ddc2750f3fa70005 | Python | ddanford/csci568 | /project05/similarityMetrics.py | UTF-8 | 5,001 | 3.546875 | 4 | [] | no_license | import random
from math import sqrt
#Find the Euclidean (linear) distance between two data objects
#Returns a number in the range [0,1]
def euclidean(subject, target):
if type(subject) != type([]) or type(target) != type([]):
print 'Parameters must both be lists of attributes.\n'
return
if len(subject) != len(target):
print 'Paramters must have the same number of attributes.\n'
return
distance = 0
for attr in range(0,len(subject)):
distance += (subject[attr]-target[attr])**2
distance = sqrt(distance)
return 1/(1+distance)
#Find the Simple Matching Coefficient
#This value will always be in the range [0,1]
def smc(subject, target):
if type(subject) != type([]) or type(target) != type([]):
print 'Parameters must both be lists of attributes.\n'
return
if len(subject) != len(target):
print 'Paramters must have the same number of attributes.\n'
return
num00 = 0.0
num01 = 0.0
num10 = 0.0
num11 = 0.0
for attr in range(0,len(subject)):
if int(subject[attr])==0 and int(target[attr])==0:
num00+= 1
elif int(subject[attr])==0 and int(target[attr])==1:
num01+= 1
elif int(subject[attr])==1 and int(target[attr])==0:
num10+= 1
elif int(subject[attr])==1 and int(target[attr])==1:
num11+= 1
similarity = (num00+num11)/len(subject)
return similarity
#Find the Jaccard Similarity
#This value will always be in the range [0,1]
def jaccard(subject, target):
if type(subject) != type([]) or type(target) != type([]):
print 'Parameters must both be lists of attributes.\n'
return
if len(subject) != len(target):
print 'Paramters must have the same number of attributes.\n'
return
num00 = 0.0
num01 = 0.0
num10 = 0.0
num11 = 0.0
for attr in range(0,len(subject)):
if int(subject[attr])==0 and int(target[attr])==0:
num00+= 1
elif int(subject[attr])==0 and int(target[attr])==1:
num01+= 1
elif int(subject[attr])==1 and int(target[attr])==0:
num10+= 1
elif int(subject[attr])==1 and int(target[attr])==1:
num11+= 1
similarity = num11/(num01+num10+num11)
return similarity
#Standard Deviation function for use in Pearson Correlation
def stdev(list):
mean = sum(list)/len(list)
std = 0
for i in range(0,len(list)):
std += (list[i]-mean)**2
std = sqrt(std/(len(list)-1.0))
return std
#Find Pearson Correlation Coefficient
#Returns a value in the range [-1,1]
def pearson(subject, target):
if type(subject) != type([]) or type(target) != type([]):
print 'Parameters must both be lists of attributes.\n'
return
if len(subject) != len(target):
print 'Paramters must have the same number of attributes.\n'
return
sbar = sum(subject)/len(subject)
tbar = sum(subject)/len(subject)
sstd = stdev(subject)
tstd = stdev(target)
pcc = 0
for attr in range(0,len(subject)):
pcc += (subject[attr]-sbar)/sstd*(target[attr]-tbar)/tstd
pcc = pcc/(len(subject)-1)
return pcc
#Find the Cosine Similarity
#Returns a value in the range [0,1]
def cosine(subject, target):
if type(subject) != type([]) or type(target) != type([]):
print 'Parameters must both be lists of attributes.\n'
return
if len(subject) != len(target):
print 'Paramters must have the same number of attributes.\n'
return
dot = 0
mags = 0
magt = 0
for attr in range(0,len(subject)):
dot += subject[attr]*target[attr]
mags += subject[attr]**2
magt += target[attr]**2
mags = sqrt(mags)
magt = sqrt(magt)
similarity = dot/(mags*magt)
return (similarity+1)/2
def roundedRandom():
return round(random.random())
def test():
if round(euclidean([1.0,3.0,4.0,6.0],[8.0,7.0,5.0,10.0]),5) != 0.09945:
print 'Problem with Euclidean!'
print euclidean([1.0,3.0,4.0,6.0],[8.0,7.0,5.0,10.0])
if smc([0,0,1],[0,0,1]) != 1:
print 'Problem with SMC!'
print smc([0,0,1],[0,0,1])
if jaccard([0,1,1],[0,1,0]) != .5:
print 'Problem with Jaccard!'
print jaccard([0,1,1],[0,1,0])
if round(pearson([1.0,3.0,4.0,6.0],[8.0,7.0,5.0,10.0]),5) != 0.30769:
print '\nProblem with Pearson!'
print pearson([1.0,3.0,4.0,6.0],[8.0,7.0,5.0,10.0])
if round(cosine([1.0,3.0,4.0,6.0],[8.0,7.0,5.0,10.0]),5) != 0.94865:
print '\nProblem with Cosine!'
print cosine([1.0,3.0,4.0,6.0],[8.0,7.0,5.0,10.0])
if __name__ == "__main__":
subject = [roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom()]
target = [roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom(), roundedRandom()]
test()
print '\nSubject is '
print subject
print '\nTarget is '
print target
print '\nEuclidean distance is ' + str(euclidean(subject, target))
print 'Simple Matching Coefficient is ' + str(smc(subject, target))
print 'Jaccard coefficient is ' + str(jaccard(subject, target))
print 'Pearson Corelation Coefficient is ' + str(pearson(subject, target))
print 'Cosine similarity is ' + str(cosine(subject, target))
| true |
837d4da15ea9d85af5147679fe647911dc72e612 | Python | amirunpri2018/MachineLearning-2 | /Assignment 3/main_part1.py | UTF-8 | 8,043 | 3.15625 | 3 | [
"MIT"
] | permissive | import math
import numpy as np
import matplotlib.pyplot as plt
from getDataset import getDataSet
from sklearn.linear_model import LogisticRegression
# Starting codes
# step 1: generate dataset that includes both positive and negative samples,
# where each sample is described with two features.
# 250 samples in total.
[X, y] = getDataSet() # note that y contains only 1s and 0s
# create figure for all charts to be placed on so can be viewed together
fig = plt.figure()
def func_DisplayData(dataSamplesX, dataSamplesY, chartNum, titleMessage):
idx1 = (dataSamplesY == 0).nonzero() # object indices for the 1st class
idx2 = (dataSamplesY == 1).nonzero()
ax = fig.add_subplot(1, 3, chartNum)
# no more variables are needed
plt.plot(dataSamplesX[idx1, 0], dataSamplesX[idx1, 1], 'r*')
plt.plot(dataSamplesX[idx2, 0], dataSamplesX[idx2, 1], 'b*')
# axis tight
ax.set_xlabel('x_1')
ax.set_ylabel('x_2')
ax.set_title(titleMessage)
# plotting all samples
func_DisplayData(X, y, 1, 'All samples')
# number of training samples
nTrain = 120
######################PLACEHOLDER 1#start#########################
# choosing random elements from the dataset
maxIndex = len(X)
randomTrainingSamples = np.random.choice(maxIndex, nTrain, replace = False)
# initializing the training and testing lists
x_train = []
y_train = []
x_test = []
y_test = []
# getting the values for training and testing lists
for i in range(maxIndex):
if i in randomTrainingSamples:
x_train.append(X[i])
y_train.append(y[i])
else:
x_test.append(X[i])
y_test.append(y[i])
# converting the training and testing lists to numpy arrays
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
####################PLACEHOLDER 1#end#########################
# plot the samples you have pickup for training, check to confirm that both negative
# and positive samples are included.
func_DisplayData(x_train, y_train, 2, 'training samples')
func_DisplayData(x_test, y_test, 3, 'testing samples')
# show all charts
plt.show()
# step 2: train logistic regression models
######################PLACEHOLDER2 #start#########################
# using self made model for training
def Sigmoid(x):
g = float(1.0 / float((1.0 + math.exp(-1.0 * x))))
return g
def Prediction(theta, X):
hyp = 0
for i in range(len(theta)):
hyp += X[i]*theta[i]
return Sigmoid(hyp)
def Cost_Function_Derivative(X,Y,theta,j,m,alpha):
sumErrors = 0
for i in range(m):
xi = X[i]
xij = xi[j]
hi = Prediction(theta,X[i])
error = (hi - Y[i])*xij
sumErrors += error
m = len(X)
constant = float(alpha)/float(m)
J = constant * sumErrors
return J
def Cost_Function(X,Y,theta,m):
sumOfErrors = 0
for i in range(m):
xi = X[i]
est_yi = Prediction(theta,xi)
if Y[i] == 1:
error = Y[i] * math.log(est_yi)
elif Y[i] == 0:
error = (1-Y[i]) * math.log(1-est_yi)
sumOfErrors += error
const = -1/m
J = const * sumOfErrors
return J
def Gradient_Descent(X,Y,theta,m,alpha):
new_theta = []
for j in range(len(theta)):
deltaF = Cost_Function_Derivative(X,Y,theta,j,m,alpha)
new_theta_value = theta[j] - deltaF
new_theta.append(new_theta_value)
return new_theta
# initial model parameters
theta = [0,0,0]
# learning rates
alpha = 0.1
# maximal iterations
max_iteration = 2000
# getting the values of x0 for training dataset
train_xValues = np.ones((len(x_train), 3))
train_xValues[:, 1:3] = x_train[:,:]
train_yValues = y_train
arrCost = []
m = len(train_xValues) # number of samples
for x in range(max_iteration):
# call the functions for gradient descent method
new_theta = Gradient_Descent(train_xValues,train_yValues,theta,m,alpha)
theta = new_theta
# calculating the cost function
arrCost.append(Cost_Function(train_xValues,train_yValues,theta,m))
if x % 200 == 0:
print("Cost at iteration",x,":",Cost_Function(train_xValues,train_yValues,theta,m))
# using sklearn class for training
logReg = LogisticRegression()
# call the function fit() to train the class instance
logReg.fit(x_train,y_train)
coeffs = logReg.coef_ # coefficients
intercept = logReg.intercept_ # bias
bHat = np.hstack((np.array([intercept]), coeffs)) # model parameters
######################PLACEHOLDER2 #end #########################
# step 3: Use the model to get class labels of testing samples.
######################PLACEHOLDER3 #start#########################
# predicting the values using self made model
# appending the values of X0 to the testing dataset
test_xValues = np.ones((len(x_test), 3))
test_xValues[:, 1:3] = x_test[:,:]
# getting the values of the ypred
test_yValues = test_xValues.dot(theta)
for i in range(len(test_yValues)):
test_yValues[i] = Sigmoid(test_yValues[i])
test_yValues = (test_yValues >= 0.5).astype(int)
# predicting the values using scikit learn library
test_yValues_scikit = test_xValues.dot(np.transpose(bHat))
for i in range(len(test_yValues_scikit)):
test_yValues_scikit[i] = Sigmoid(test_yValues_scikit[i])
test_yValues_scikit = (test_yValues_scikit >= 0.5).astype(int)
######################PLACEHOLDER 3 #end #########################
# step 4: evaluation
# function for calculating the confusion matrix
def func_calConfusionMatrix(predY, trueY):
# finding the confusion matrix
labels = len(np.unique(trueY))
conf_matr = np.zeros(shape = (labels, labels))
predY = np.transpose(predY)
trueY = np.transpose(trueY)
for i in range(len(trueY)):
for j in range(len(trueY[i])):
conf_matr[trueY[i][j]][predY[i][j]] += 1
# finding the accuracy of the model
sum_of_diag = 0
sum_of_elem = 0
for i in range(len(conf_matr)):
for j in range(len(conf_matr[i])):
if i == j:
sum_of_diag += conf_matr[i][j]
sum_of_elem += conf_matr[i][j]
accuracy = sum_of_diag / sum_of_elem
# finding the precision value of the model
precision = []
for label in range(labels):
column = conf_matr[:, label]
precision.append(conf_matr[label, label] / column.sum())
# finding the recall value of the model
recall = []
for label in range(labels):
row = conf_matr[label, :]
recall.append(conf_matr[label, label] / row.sum())
return conf_matr, accuracy, precision, recall
# evaluating self made model
self_testYDiff = np.abs(test_yValues - y_test)
self_avgErr = np.mean(self_testYDiff)
self_stdErr = np.std(self_testYDiff)
self_score = (len(self_testYDiff) - np.sum(self_testYDiff)) / len(self_testYDiff)
# evaluating scikit model
testYDiff = np.abs(test_yValues_scikit - y_test)
avgErr = np.mean(testYDiff)
stdErr = np.std(testYDiff)
scikit_score = logReg.score(x_test, y_test)
# comparing both the models
print('\nSelf made model average error: {} ({})'.format(self_avgErr, self_stdErr))
print('Scikit learn model average error: {} ({})'.format(avgErr, stdErr))
if self_score > scikit_score:
print("Self Made Model Wins!")
elif self_score < scikit_score:
print("Scikit Model Wins!")
else:
print("Both models perfomed equally well!")
# finding the confusion matrices and respective parameters of both the models
print("\nSelf Made Model:")
self_cm, self_acc, self_pre, self_rec = func_calConfusionMatrix(test_yValues, np.array(y_test, dtype = int))
print("Confusion Matrix:\n {} \nAccuracy = {} \nPrecision = {} \nRecall = {}".format(self_cm, self_acc, self_pre, self_rec))
print("\nScikit Model:")
scikit_cm, scikit_acc, scikit_pre, scikit_rec = func_calConfusionMatrix(test_yValues_scikit, np.array(y_test, dtype = int))
print("Confusion Matrix:\n {} \nAccuracy = {} \nPrecision = {} \nRecall = {}".format(scikit_cm, scikit_acc, scikit_pre, scikit_rec)) | true |
5ea6c29226d20f23590e8ebf51ac066a49c43c81 | Python | agustinmontero/com_datos | /tp_3/tcp_server.py | UTF-8 | 539 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
import socket
TCP_IP = '192.168.0.4'
TCP_PORT = 13012
BUFFER_SIZE = 512
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
conn, addr = s.accept()
print('Connection address:', addr)
while True:
try:
data = conn.recv(BUFFER_SIZE)
if not data:
break
print("received data:", data)
conn.send(data) # echo
except (KeyboardInterrupt, OSError):
print("Finalizando...")
conn.close()
break
conn.close()
| true |
0755dee7babae1fc722e28f02cef15405be7a546 | Python | juhasch/PhysicalQuantities | /tests/test_fractdict.py | UTF-8 | 1,580 | 3.234375 | 3 | [
"BSD-3-Clause"
] | permissive | from PhysicalQuantities.fractdict import *
def test_getitem():
a = FractionalDict()
a['a'] = 1
assert a['a'] == 1
def test_create():
""" create ndict from dict """
a = {'a': 1}
b = FractionalDict(a)
assert a == b
def test_add():
a = FractionalDict()
a['a'] = 1
b = FractionalDict()
b['a'] = 2
b['b'] = 2
c = {'a': 3, 'b': 2}
assert a+b == c
def test_sub():
a = FractionalDict()
a['a'] = 1
b = FractionalDict()
b['a'] = 2
b['b'] = 2
c = {'a': -1, 'b': -2}
assert a-b == c
def test_mul_1():
a = FractionalDict({'a': 3, 'b': 2})
b = Fraction(2)
c = a*b
assert c['a'] == 3*2
assert c['b'] == 2*2
def test_div():
a = FractionalDict({'a': 3, 'b': 2})
c = Fraction(3)
b = a//c
assert b['a'] == Fraction(3, 3)
assert b['b'] == Fraction(2, 3)
def test_rdiv():
a = FractionalDict({'a': 3, 'b': 2})
b = Fraction(3)/a
assert b['a'] == Fraction(3, 3)
assert b['b'] == Fraction(3, 2)
def test_truediv():
a = FractionalDict({'a': 3, 'b': 2})
b = a/Fraction(3)
assert b['a'] == Fraction(3, 3)
assert b['b'] == Fraction(2, 3)
def test_floordiv():
a = FractionalDict({'a': 3, 'b': 6})
b = a//Fraction(3)
assert b['a'] == 1
assert b['b'] == 2
def test_rfloordiv():
a = FractionalDict({'a': 2, 'b': 3})
b = Fraction(6)//a
assert b['a'] == 3
assert b['b'] == 2
def test_rmul():
a = FractionalDict({'a': 3, 'b': 2})
b = Fraction(2)*a
assert b['a'] == 2*3
assert b['b'] == 2*2
| true |
fe45ca4c3347c979180f2715bfb3e74431ddc2ad | Python | elsayedrashed/pyspark-app | /test/unionDataframeTest.py | UTF-8 | 4,058 | 2.828125 | 3 | [] | no_license | """
Test Union of two dataframes.
"""
from packages.dataFrame import dataFrameUtility as su
from pyspark.sql import SparkSession
from pyspark.sql.functions import (lit,col,concat,split)
import os
"""
* Builds the dataFrame containing the Wake county restaurants
*
* @return A dataFrame
"""
def build_wake_restaurants_dataframe(df):
drop_cols = ["OBJECTID", "GEOCODESTATUS", "PERMITID"]
df = df.withColumn("county", lit("Wake")) \
.withColumnRenamed("HSISID", "datasetId") \
.withColumnRenamed("NAME", "name") \
.withColumnRenamed("ADDRESS1", "address1") \
.withColumnRenamed("ADDRESS2", "address2") \
.withColumnRenamed("CITY", "city") \
.withColumnRenamed("STATE", "state") \
.withColumnRenamed("POSTALCODE", "zip") \
.withColumnRenamed("PHONENUMBER", "tel") \
.withColumnRenamed("RESTAURANTOPENDATE", "dateStart") \
.withColumn("dateEnd", lit(None)) \
.withColumnRenamed("FACILITYTYPE", "type") \
.withColumnRenamed("X", "geoX") \
.withColumnRenamed("Y", "geoY") \
.drop("OBJECTID", "GEOCODESTATUS", "PERMITID")
df = df.withColumn("id",
concat(col("state"), lit("_"), col("county"), lit("_"), col("datasetId")))
df.show(5)
df.printSchema()
print("We have {} records in wake_restaurants_dataframe.".format(df.count()))
# I left the following line if you want to play with repartitioning
# df = df.repartition(4);
return df
"""
* Builds the dataFrame containing the Durham county restaurants
*
* @return A dataFrame
"""
def build_durham_restaurants_dataframe(df):
drop_cols = ["fields", "geometry", "record_timestamp", "recordid"]
df = df.withColumn("county", lit("Durham")) \
.withColumn("datasetId", col("fields.id")) \
.withColumn("name", col("fields.premise_name")) \
.withColumn("address1", col("fields.premise_address1")) \
.withColumn("address2", col("fields.premise_address2")) \
.withColumn("city", col("fields.premise_city")) \
.withColumn("state", col("fields.premise_state")) \
.withColumn("zip", col("fields.premise_zip")) \
.withColumn("tel", col("fields.premise_phone")) \
.withColumn("dateStart", col("fields.opening_date")) \
.withColumn("dateEnd", col("fields.closing_date")) \
.withColumn("type", split(col("fields.type_description"), " - ").getItem(1)) \
.withColumn("geoX", col("fields.geolocation").getItem(0)) \
.withColumn("geoY", col("fields.geolocation").getItem(1)) \
.drop(*drop_cols)
df = df.withColumn("id",
concat(col("state"), lit("_"), col("county"), lit("_"), col("datasetId")))
df.show(5)
df.printSchema()
print("We have {} records in durham_restaurants_dataframe.".format(df.count()))
# I left the following line if you want to play with repartitioning
# df = df.repartition(4);
return df
current_dir = os.path.dirname(__file__)
relative_path1 = "../resources/data/sparkInActionData/Restaurants_in_Wake_County_NC.csv"
absolute_file_path1 = os.path.join(current_dir, relative_path1)
relative_path2 = "../resources/data/sparkInActionData/Restaurants_in_Durham_County_NC.json"
absolute_file_path2 = os.path.join(current_dir, relative_path2)
# Creates a session on a local master
spark = SparkSession.builder.appName("Union of two dataframes") \
.master("local[*]").getOrCreate()
df1 = spark.read.csv(path=absolute_file_path1,header=True,inferSchema=True)
df2 = spark.read.json(absolute_file_path2)
wakeRestaurantsDf = build_wake_restaurants_dataframe(df1)
durhamRestaurantsDf = build_durham_restaurants_dataframe(df2)
# Combine dataframes
df = su.combineDataframes(wakeRestaurantsDf, durhamRestaurantsDf)
df.show(5)
df.printSchema()
print("We have {} records in the combined dataframe.".format(df.count()))
partition_count = df.rdd.getNumPartitions()
print("Partition count: {}".format(partition_count)) | true |
89c3db6ce751d6ae7fc1c7b8558b37d1482a4199 | Python | journeytorainbow/BOJ | /1697.py | UTF-8 | 684 | 3.328125 | 3 | [] | no_license | import sys
from collections import deque
# 수빈의 위치=start_pos, 동생의 위치
N, K = map(int, sys.stdin.readline().split())
# 이동할 수 있는 점은 0부터 10만까지
array = [0] * 100001
def bfs(start_pos):
# 큐는 그냥 일반 리스트로 구현해도 상관없음
need_visit = deque([start_pos])
while need_visit:
now_pos = need_visit.popleft()
if now_pos == K:
return array[now_pos]
for next_pos in (now_pos-1, now_pos+1, now_pos*2):
if 0 <= next_pos <= 100000 and not(array[next_pos]):
array[next_pos] = array[now_pos] + 1
need_visit.append(next_pos)
print(bfs(N)) | true |
e996dd4df36562566d89ac09514dc6fd0d98cb45 | Python | xiaomomo/star_wars | /src/constant.py | UTF-8 | 812 | 2.75 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import pygame
pygame.init() # 游戏初始化
pygame.mixer.init() # 混音器初始化
# 游戏背景音乐
pygame.mixer.music.load('../assets/sound/game_music.wav')
pygame.mixer.music.set_volume(0.2)
# 子弹发射音乐
bullet_sound = pygame.mixer.Sound("../assets/sound/bullet.wav")
bullet_sound.set_volume(0.2)
# 我方飞机挂了的音乐
game_over_sound = pygame.mixer.Sound("../assets/sound/game_over.wav")
game_over_sound.set_volume(0.2)
# 敌方飞机挂了的音乐
enemy1_down_sound = pygame.mixer.Sound("../assets/sound/enemy1_down.wav")
enemy1_down_sound.set_volume(0.2)
# 游戏相关常量
WIDTH = 360
HEIGHT = 480
FPS = 60
# 定义颜色常量
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
ENEMY_SIZE = 6
ENEMY_MIN_SIZE = 2
| true |
f3e64113d85db306bbf8e1e7f0e924f75fcbaed8 | Python | dfarache/hackerrank | /serviceLane/serviceLane.py | UTF-8 | 300 | 3.3125 | 3 | [] | no_license |
def calculate_largest_vehicle():
[x, y] = [int(x) for x in input().split()]
return min(freeway[x:y+1])
[length_of_freeway, number_of_tests] = input().split()
freeway = [int(x) for x in input().split()]
for test in range(int(number_of_tests)):
print(str(calculate_largest_vehicle()))
| true |
a69b810f38933c8d38885c6783ac8f2bf11f3565 | Python | Zachary-Ronayne/CS320_Individual_Assignment_Tetris_EvolveSim | /GuiController/GameGui.py | UTF-8 | 1,720 | 3.078125 | 3 | [] | no_license | import pygame
from NeuralNet import NeuralNetHandler
# a class that handles the calculations and rendering for a gui that displays the Tetris game and neural net
class GameGui:
def __init__(self, centralHandler):
pygame.init()
pygame.font.init()
self.centralHandler = centralHandler
self.gui = pygame.display.set_mode((1800, 1000))
pygame.display.set_caption("Tetris game and neural net")
# create the initial surface
self.neuralNetSurface = pygame.image.load("empty.png").convert()
# call this to redraw the NeuralNet based on its current state
def updateNeuralNetSurface(self, neuralNet):
neuralNet.renderWithPygame(self.neuralNetSurface, self.centralHandler)
# render the given tetris and neuralNet to the GUI
# tetirs is a Tetris object
# neuralnet is a NeuralNet object
def render(self, tetris):
self.centralHandler.pyGui.fill((255, 255, 255))
self.centralHandler.pyGui.blit(self.neuralNetSurface, (0, 0))
tetris.renderWithPygame(self.centralHandler.pyGui, self.centralHandler)
pygame.display.update()
# updates the given tetris game based on the given neuralNet
# use None for neuralNet if no move should be made
def tick(self, tetris, neuralNet):
if neuralNet is not None:
# only send the central handler for rendering if the sim is not looping
s = self.centralHandler.simMenu
if s is not None and (s.testing or s.looping):
handle = None
else:
handle = self.centralHandler
NeuralNetHandler.makeNeuralNetMove(neuralNet, tetris, handle)
tetris.nextLine()
| true |
40aebb80c42bcad3c6453411b53d6e2dc662197a | Python | BarryZM/CLUENER | /data-ming.py | UTF-8 | 1,562 | 2.859375 | 3 | [] | no_license | #!/usr/bin/python
# coding:utf8
"""
@author: Cong Yu
@time: 2020-01-09 18:51
"""
import re
import json
def prepare_label():
text = """
地址(address): 544
书名(book): 258
公司(company): 479
游戏(game): 281
政府(government): 262
电影(movie): 307
姓名(name): 710
组织机构(organization): 515
职位(position): 573
景点(scene): 288
"""
a = re.findall(r"((.*?))", text.strip())
print(a)
label2id = {"O": 0}
index = 1
for i in a:
label2id["S_" + i] = index
label2id["B_" + i] = index + 1
label2id["M_" + i] = index + 2
label2id["E_" + i] = index + 3
index += 4
open("label2id.json", "w").write(json.dumps(label2id, ensure_ascii=False, indent=2))
def prepare_len_count():
len_count = {}
for line in open("data/thuctc_train.json"):
if line.strip():
_ = json.loads(line.strip())
len_ = len(_["text"])
if len_count.get(len_):
len_count[len_] += 1
else:
len_count[len_] = 1
for line in open("data/thuctc_valid.json"):
if line.strip():
_ = json.loads(line.strip())
len_ = len(_["text"])
if len_count.get(len_):
len_count[len_] += 1
else:
len_count[len_] = 1
print("len_count", json.dumps(len_count, indent=2))
open("len_count.json", "w").write(json.dumps(len_count, indent=2))
prepare_label()
| true |
3e5b149887017009441c13da473b652ac5c4d764 | Python | real-easypy/easypy | /easypy/tables.py | UTF-8 | 4,283 | 2.953125 | 3 | [
"BSD-3-Clause"
] | permissive | from io import StringIO
from easypy.collections import defaultlist
from easypy.colors import colorize, uncolored
from easypy.humanize import compact
class Column():
def __init__(self, name, title=None, max_width=None, align=None, header_align=None, padding=None, drop_if_empty=False):
self.name = name
self.max_width = max_width
self.align = align
self.header_align = header_align
self.padding = padding
self.overflow = 'ellipsis'
self.title = title or name
self.drop_if_empty = drop_if_empty
self.visible = True
class Table():
"""
:param List[Column] columns: column descriptors
:param List[Bunch] data: rows
"""
HEADER_SEP = "|"
SEPARATORS = "|"
BAR = '='
BAR_SEP = ":"
def __init__(self, *columns, data=None, max_col_width=None, align='left', header_align='center', padding=1):
self.data = data or []
self.columns = []
self.max_col_width = max_col_width
self.align = align
self.header_align = header_align
self.padding = padding
for column in columns:
self.add_column(column)
_ALIGN_MAP = dict(left='<', right='>', center='^')
def add_column(self, column: Column):
self.columns.append(column)
def add_row(self, **row):
self.data.append(row)
def render(self):
rendered = defaultlist(list)
columns = []
def _get_value(data, value):
ret = data.get(value)
if ret is None:
ret = ''
return ret
for column in self.columns:
if not column.visible:
continue
rows = [_get_value(data, column.name) for data in self.data]
if not any(filter(lambda i: i != '', rows)) and column.drop_if_empty:
continue
columns.append(column)
if column.max_width is None:
column.max_width = self.max_col_width
if column.align is None:
column.align = self.align
if column.header_align is None:
column.header_align = self.header_align
if column.padding is None:
column.padding = self.padding
raw_data = [column.title] + rows
colored_data = [colorize(str(data)) for data in raw_data]
uncolored_data = [uncolored(data) for data in colored_data]
max_width = column.max_width or max(len(data) for data in uncolored_data)
for i, data in enumerate(colored_data):
align = column.header_align if i == 0 else column.align
coloring_spacing = len(colored_data[i]) - len(uncolored_data[i])
spacing = max_width + coloring_spacing
format_string = "{{data:{align}{spacing}}}".format(align=self._ALIGN_MAP[align], spacing=spacing)
rendered[i].append(format_string.format(data=data))
output = StringIO()
for r_i, row in enumerate(rendered):
r_parts = []
sep = self.HEADER_SEP if r_i == 0 else self.SEPARATORS[r_i % len(self.SEPARATORS)]
for col_i, col in enumerate(row):
column = columns[col_i]
padding = column.padding * " "
if column.max_width and r_i > 0:
col = compact(col, column.max_width, suffix_length=column.max_width // 10)
r_parts.append("{padding}{col}{padding}".format(col=col, padding=padding))
output.write(sep.join(r_parts))
output.write("\n")
if r_i == 0:
r_parts = [self.BAR * len(uncolored(part)) for part in r_parts]
output.write(self.BAR_SEP.join(r_parts))
output.write("\n")
output.seek(0)
return output.read()
class DecoratedTable(Table):
HEADER_SEP = "│"
SEPARATORS = "┼│┊┊│"
BAR = '═'
BAR_SEP = "╪"
def _test():
table = Table(Column("first", "GREEN<<First>>"))
table.add_column(Column("second", align='right'))
table.add_row(first='1', second='BLUE<<longer>> second MAGENTA<<column>>')
table.add_row(first='longer first column', second='2')
print(table.render())
| true |
339c9ab082eb05ac775478b43b5a39fc950ae76b | Python | xinyooo/image-processing | /thinning/thinning.py | UTF-8 | 2,355 | 2.96875 | 3 | [] | no_license | import os
import sys
from PIL import Image
def filter3x3(matrixList):
checkList = [(element//255+1)%2 for element in matrixList]
if checkList[4] == 1:
N = checkList.count(1)-1
T = 0
if checkList[0] == 0 and checkList[1] == 1:
T += 1
if checkList[1] == 0 and checkList[2] == 1:
T += 1
if checkList[2] == 0 and checkList[5] == 1:
T += 1
if checkList[5] == 0 and checkList[8] == 1:
T += 1
if checkList[8] == 0 and checkList[7] == 1:
T += 1
if checkList[7] == 0 and checkList[6] == 1:
T += 1
if checkList[6] == 0 and checkList[3] == 1:
T += 1
if checkList[3] == 0 and checkList[0] == 1:
T += 1
# Thinning 1
if N >= 2 and N <= 6 and T == 1 and checkList[1]*checkList[5]*checkList[7] == 0 and checkList[1]*checkList[3]*checkList[5] == 0:
matrixList[4] = 255
# Thinning 2
if N >= 2 and N <= 6 and T == 1 and checkList[3]*checkList[5]*checkList[7] == 0 and checkList[1]*checkList[3]*checkList[7] == 0:
matrixList[4] = 255
return matrixList
else:
return matrixList
def modifyImage(pixelMap, imgMode, imgSize):
for i in range(imgSize[0]-2):
for j in range(imgSize[1]-2):
matrixList = [pixelMap[i, j], pixelMap[i, j+1], pixelMap[i, j+2], pixelMap[i+1, j], pixelMap[i+1, j+1], pixelMap[i+1, j+2], pixelMap[i+2, j], pixelMap[i+2, j+1], pixelMap[i+2, j+2]]
newMatrixList = filter3x3(matrixList)
pixelMap[i, j] = newMatrixList[0]
pixelMap[i, j+1] = newMatrixList[1]
pixelMap[i, j+2] = newMatrixList[2]
pixelMap[i+1, j] = newMatrixList[3]
pixelMap[i+1, j+1] = newMatrixList[4]
pixelMap[i+1, j+2] = newMatrixList[5]
pixelMap[i+2, j] = newMatrixList[6]
pixelMap[i+2, j+1] = newMatrixList[7]
pixelMap[i+2, j+2] = newMatrixList[8]
if __name__ == '__main__':
if len(sys.argv) > 1:
filename = sys.argv[1].split('.')[0]
image = Image.open(sys.argv[1])
image = image.convert('1')
modifyImage(image.load(), image.mode, image.size)
image.save(filename+'_thinning.jpg')
else:
print('Need one argument for image\'s path') | true |
7314e1133afecf16a34d360a9309c84e2166fe71 | Python | plinx/CodePractices | /Python/Triangle.py | UTF-8 | 2,886 | 3.265625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys, random
from math import ceil as ceil
from PyQt4 import QtGui, QtCore
class Triangle(object):
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
pass
def __init__(self, x1, y1, x2, y2, x3, y3):
self.p1 = self.Point(x1, y1)
self.p2 = self.Point(x2, y2)
self.p3 = self.Point(x3, y3)
self._sort()
def _sort(self):
if self.p3.y < self.p1.y:
self.p1, self.p3 = self.p3, self.p1
if self.p2.y < self.p1.y:
self.p1, self.p2 = self.p2, self.p1
if self.p3.y < self.p2.y:
self.p2, self.p3 = self.p3, self.p2
print self.p1.y, self.p2.y, self.p3.y
class Window(QtGui.QWidget):
def __init__(self, width, height):
self._width = width
self._height = height
super(Window, self).__init__()
self.resize(self._width, self._height)
self.setWindowTitle('Triangle')
self.show()
def paintEvent(self, event):
self.painter = QtGui.QPainter()
self.painter.begin(self)
self.drawTriangle(self.painter);
self.painter.end()
def drawTriangle(self, painter):
tri = Triangle(100.0, 300.0, 200.0, 400.0, 300.0, 100.0)
painter.setBrush(QtGui.QColor(0, 0, 0))
painter.drawRect(0, 0, self._width, self._height)
clip = Triangle.Point(0, 0)
clip.y = tri.p2.y
clip.x = tri.p1.x + (clip.y - tri.p1.y) * (tri.p3.x - tri.p1.x) / (tri.p3.y - tri.p1.y)
print (tri.p2.x, tri.p2.y), (clip.x, clip.y), (tri.p3.x, tri.p3.y)
self.drawFlatBottomTriangle(painter, tri.p2, clip, tri.p3)
self.drawFlatTopTriangle(painter, tri.p1, tri.p2, clip)
painter.setPen(QtGui.QColor(255, 255, 255))
painter.drawPoint(100, self._height - 100)
def drawFlatBottomTriangle(self, painter, p1, p2, p3):
if p3.y > self._height:
top = self._height
else:
top = p3.y
if p1.y < 0:
bottom = 0
else:
bottom = p1.y
# draw flat bottom triangle
for y in range(int(bottom), int(top)):
scan_lx = p1.x + (y - p1.y) * (p3.x - p1.x) / (p3.y - p1.y)
scan_rx = p2.x + (y - p2.y) * (p3.x - p2.x) / (p3.y - p2.y)
for x in range(int(scan_lx), int(scan_rx)):
#if y > p1.y and y < p3.y:
# if x > scan_lx and x < scan_rx:
painter.setPen(QtGui.QColor(255, 0, 0))
painter.drawPoint(x, self._height - y)
def drawFlatTopTriangle(self, painter, p1, p2, p3):
if p3.x < p2.x:
p2, p3 = p3, p2
if p3.y > self._height:
top = self._height
else:
top = p3.y
if p1.y < 0:
bottom = 0
else:
bottom = p1.y
# draw flat top triangle
for y in range(int(bottom), int(top)):
scan_lx = p1.x + (y - p1.y) * (p2.x - p1.x) / (p2.y - p1.y)
scan_rx = p1.x + (y - p1.y) * (p3.x - p1.x) / (p3.y - p1.y)
for x in range(int(scan_lx), int(scan_rx)):
painter.setPen(QtGui.QColor(0, 0, 255))
painter.drawPoint(x, self._height - y)
def main():
app = QtGui.QApplication(sys.argv)
win = Window(400, 400)
sys.exit(app.exec_())
if __name__ == '__main__':
main() | true |
c6ac818ae6ecb8b02b740e2d4d6e3400c2562238 | Python | roconthebeatz/python_201907 | /day_02/set_04.py | UTF-8 | 1,079 | 4.1875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# Set 타입 내부의 데이터 삭제
# - remove, pop 메소드를 사용
numbers = set([1,2,3,4,5,6,7,8,9])
# 1. remove 메소드 사용
# - 특정 데이터를 삭제하는 경우 사용
# - 삭제만 수행(삭제된 값을 반환하지 않음)
numbers.remove(5)
print(numbers)
r_value = numbers.remove(8)
print(numbers)
# remove 메소드는 삭제한 결과를 반환하지 않으므로
# r_value 에는 None 값이 대입됩니다.
print(f"r_value -> {r_value}")
# Set 내부에 저장되어 있지 않은 값은 삭제할 수 없음
# (에러가 발생됨)
numbers.remove(0)
# 2. pop 메소드를 사용하여 데이터를 삭제
# - 가장 앞에 위치한 데이터를 삭제할 때 사용
# - Set 내부에서 가장 앞에 위치한 데이터를 삭제하고
# 삭제한 값을 반환
r_value = numbers.pop()
print(numbers)
print(f"r_value -> {r_value}")
# 3. clear 메소드를 사용
# - Set 내부의 모든 데이터를 삭제
numbers.clear()
print(numbers)
| true |
2ab89b4cff7fdcc63ebe10dbfcfe918b348a9e9f | Python | sdwfrost/apples | /apples/Core.py | UTF-8 | 10,157 | 2.609375 | 3 | [
"MIT"
] | permissive |
class Core:
def __init__(self, tree):
self.tree = tree
def init(self):
self.tree_S_values()
self.tree_R_values()
def dp(self, obs_dist):
self.observed_S_values(obs_dist)
self.observed_R_values()
def dp_frag(self, obs_dist):
self.all_S_values(obs_dist)
self.all_R_values()
def observed_S_values(self, obs_dist):
for node in self.tree.traverse_postorder():
if node.is_leaf():
node.SDd = 0
node.Sd_D = 0
node.Sd_D2 = 0
node.Sd2_D = 0
node.Sd2_D2 = 0
node.SD = obs_dist[node.label]
node.SD2 = node.SD * node.SD
node.S1_D = 1.0 / node.SD
node.S1_D2 = 1.0 / (node.SD * node.SD)
else:
node.SDd, node.Sd_D, node.Sd_D2, node.Sd2_D, node.Sd2_D2, node.SD2, node.SD, node.S1_D, node.S1_D2 = 9 * [
0]
for child in node.children:
node.SDd += child.edge_length * child.SD + child.SDd
node.Sd_D += child.edge_length * child.S1_D + child.Sd_D
node.Sd_D2 += child.edge_length * child.S1_D2 + child.Sd_D2
node.Sd2_D += child.S1_D * child.edge_length * child.edge_length + child.Sd2_D + 2 * child.edge_length * child.Sd_D
node.Sd2_D2 += child.S1_D2 * child.edge_length * child.edge_length + child.Sd2_D2 + 2 * child.edge_length * child.Sd_D2
node.SD2 += child.SD2
node.SD += child.SD
node.S1_D += child.S1_D
node.S1_D2 += child.S1_D2
def observed_R_values(self):
for node in self.tree.traverse_preorder():
if node == self.tree.root:
continue
node.RDd, node.Rd_D, node.Rd_D2, node.Rd2_D, node.Rd2_D2, node.RD2, node.RD, node.R1_D, node.R1_D2 = 9 * [0]
for sibling in node.parent.children:
if sibling != node:
node.RDd += sibling.SD * sibling.edge_length + sibling.SDd
node.Rd_D += sibling.edge_length * sibling.S1_D + sibling.Sd_D
node.Rd_D2 += sibling.edge_length * sibling.S1_D2 + sibling.Sd_D2
node.Rd2_D += sibling.S1_D * sibling.edge_length * sibling.edge_length + sibling.Sd2_D + 2 * sibling.edge_length * sibling.Sd_D
node.Rd2_D2 += sibling.S1_D2 * sibling.edge_length * sibling.edge_length + sibling.Sd2_D2 + 2 * sibling.edge_length * sibling.Sd_D2
node.RD2 += sibling.SD2
node.RD += sibling.SD
node.R1_D += sibling.S1_D
node.R1_D2 += sibling.S1_D2
if node.parent != self.tree.root:
node.RDd += node.parent.RD * node.parent.edge_length + node.parent.RDd
node.Rd_D += node.parent.edge_length * node.parent.R1_D + node.parent.Rd_D
node.Rd_D2 += node.parent.edge_length * node.parent.R1_D2 + node.parent.Rd_D2
node.Rd2_D += node.parent.R1_D * node.parent.edge_length * node.parent.edge_length + node.parent.Rd2_D + 2 * node.parent.edge_length * node.parent.Rd_D
node.Rd2_D2 += node.parent.R1_D2 * node.parent.edge_length * node.parent.edge_length + node.parent.Rd2_D2 + 2 * node.parent.edge_length * node.parent.Rd_D2
node.RD2 += node.parent.RD2
node.RD += node.parent.RD
node.R1_D += node.parent.R1_D
node.R1_D2 += node.parent.R1_D2
def tree_S_values(self):
for node in self.tree.traverse_postorder():
if node.is_leaf():
node.S = 1
node.Sd = 0
node.Sd2 = 0
else:
node.S, node.Sd, node.Sd2 = 3 * [0]
for child in node.children:
node.S += child.S
node.Sd += child.S * child.edge_length + child.Sd
node.Sd2 += child.S * child.edge_length * child.edge_length + child.Sd2 + 2 * child.edge_length * child.Sd
def tree_R_values(self):
for node in self.tree.traverse_preorder():
if node == self.tree.root:
continue
node.R, node.Rd, node.Rd2 = 3 * [0]
for sibling in node.parent.children:
if sibling != node:
node.R += sibling.S
node.Rd += sibling.S * sibling.edge_length + sibling.Sd
node.Rd2 += sibling.S * sibling.edge_length * sibling.edge_length + sibling.Sd2 + \
2 * sibling.edge_length * sibling.Sd
if node.parent != self.tree.root:
node.R += node.parent.R
node.Rd += node.parent.R * node.parent.edge_length + node.parent.Rd
node.Rd2 += node.parent.R * node.parent.edge_length * node.parent.edge_length + node.parent.Rd2 + \
2 * node.parent.edge_length * node.parent.Rd
def validate_edges(self, obs_dist):
for node in self.tree.traverse_postorder():
if node.is_leaf():
if obs_dist[node.label] == -1.0:
node.valid = False
else:
node.valid = True
else:
if sum([i.valid for i in node.children]) == 0:
node.valid = False
else:
node.valid = True
for node in self.tree.traverse_preorder():
if node == self.tree.root:
node.valid = False
elif sum([i.valid for i in filter(lambda x: x != node, node.parent.children)]) == 0 \
and not node.parent.valid:
node.valid = False
def all_S_values(self, obs_dist):
for node in filter(lambda x: x.valid, self.tree.traverse_postorder()):
if node.is_leaf():
node.S = 1
node.Sd = 0
node.Sd2 = 0
node.SDd = 0
node.Sd_D = 0
node.Sd_D2 = 0
node.Sd2_D = 0
node.Sd2_D2 = 0
node.SD = obs_dist[node.label]
node.SD2 = node.SD * node.SD
node.S1_D = 1.0 / node.SD
node.S1_D2 = 1.0 / (node.SD * node.SD)
else:
node.SDd, node.Sd_D, node.Sd_D2, node.Sd2_D, node.Sd2_D2, node.SD2, node.SD, node.S1_D, node.S1_D2 = 9 * [
0]
node.S, node.Sd, node.Sd2 = 3 * [0]
for child in filter(lambda x: x.valid, node.children):
node.S += child.S
node.Sd += child.S * child.edge_length + child.Sd
node.Sd2 += child.S * child.edge_length * child.edge_length + child.Sd2 + 2 * child.edge_length * child.Sd
node.SDd += child.edge_length * child.SD + child.SDd
node.Sd_D += child.edge_length * child.S1_D + child.Sd_D
node.Sd_D2 += child.edge_length * child.S1_D2 + child.Sd_D2
node.Sd2_D += child.S1_D * child.edge_length * child.edge_length + child.Sd2_D + 2 * child.edge_length * child.Sd_D
node.Sd2_D2 += child.S1_D2 * child.edge_length * child.edge_length + child.Sd2_D2 + 2 * child.edge_length * child.Sd_D2
node.SD2 += child.SD2
node.SD += child.SD
node.S1_D += child.S1_D
node.S1_D2 += child.S1_D2
def all_R_values(self):
for node in filter(lambda x: x.valid, self.tree.traverse_preorder()):
node.RDd, node.Rd_D, node.Rd_D2, node.Rd2_D, node.Rd2_D2, node.RD2, node.RD, node.R1_D, node.R1_D2 = 9 * [0]
node.R, node.Rd, node.Rd2 = 3 * [0]
for sibling in filter(lambda x: x.valid and x != node, node.parent.children):
node.R += sibling.S
node.Rd += sibling.S * sibling.edge_length + sibling.Sd
node.Rd2 += sibling.S * sibling.edge_length * sibling.edge_length + sibling.Sd2 + \
2 * sibling.edge_length * sibling.Sd
node.RDd += sibling.SD * sibling.edge_length + sibling.SDd
node.Rd_D += sibling.edge_length * sibling.S1_D + sibling.Sd_D
node.Rd_D2 += sibling.edge_length * sibling.S1_D2 + sibling.Sd_D2
node.Rd2_D += sibling.S1_D * sibling.edge_length * sibling.edge_length + sibling.Sd2_D + 2 * sibling.edge_length * sibling.Sd_D
node.Rd2_D2 += sibling.S1_D2 * sibling.edge_length * sibling.edge_length + sibling.Sd2_D2 + 2 * sibling.edge_length * sibling.Sd_D2
node.RD2 += sibling.SD2
node.RD += sibling.SD
node.R1_D += sibling.S1_D
node.R1_D2 += sibling.S1_D2
if node.parent != self.tree.root and node.parent.valid:
node.R += node.parent.R
node.Rd += node.parent.R * node.parent.edge_length + node.parent.Rd
node.Rd2 += node.parent.R * node.parent.edge_length * node.parent.edge_length + node.parent.Rd2 + \
2 * node.parent.edge_length * node.parent.Rd
node.RDd += node.parent.RD * node.parent.edge_length + node.parent.RDd
node.Rd_D += node.parent.edge_length * node.parent.R1_D + node.parent.Rd_D
node.Rd_D2 += node.parent.edge_length * node.parent.R1_D2 + node.parent.Rd_D2
node.Rd2_D += node.parent.R1_D * node.parent.edge_length * node.parent.edge_length + node.parent.Rd2_D + 2 * node.parent.edge_length * node.parent.Rd_D
node.Rd2_D2 += node.parent.R1_D2 * node.parent.edge_length * node.parent.edge_length + node.parent.Rd2_D2 + 2 * node.parent.edge_length * node.parent.Rd_D2
node.RD2 += node.parent.RD2
node.RD += node.parent.RD
node.R1_D += node.parent.R1_D
node.R1_D2 += node.parent.R1_D2
| true |
bc7436ef41b39ae87e1423d4b9384e35fce8be2b | Python | daihei-u/Melting | /sosu/sosu5t2.py | UTF-8 | 245 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
print(2)
for wararerukazu in range(3,51,2):
flg=1
for warukazu in range(3,wararerukazu,2):
a=wararerukazu % warukazu
if 0==a:
flg=0
break
if 1==flg:
print(wararerukazu)
| true |
889164cb918936bf75e48e3b077fa91974899446 | Python | DocentS/Python | /Homework_11/HW_11.py | UTF-8 | 4,513 | 4 | 4 | [] | no_license | # Задача 2
# Дано три множества
set1 = {1, 2, 3, 4}
set2 = {2, 3, 5, 6}
set3 = {3, 4, 6, 7}
# Одним действием(одной строкой) виполнить intersection єтих множеств
print('Tasks 2:', set.intersection(set1, set2, set3))
# Задача 3
# Дано три множества
set1 = {1, 2, 3, 4}
set2 = {2, 3, 5, 6}
set3 = {3, 4, 6, 7}
# Одним действием(одной строкой) виполнить difference єтих множеств
set_ = set.difference(set1, set2, set3)
print('Tasks 3:', set.difference(set1, set2, set3))
# Задача 4
# Дано три множества
set1 = {1, 2, 3, 4}
set2 = {2, 3, 5, 6}
set3 = {3, 4, 6, 7}
# Одним действием(одной строкой) виполнить union єтих множеств
print('Tasks 4:', set.union(set1, set2, set3))
# Задача 5
# Добавить список элементов к заданному набору
sampleSet = {"Yellow", "Orange", "Black"}
sampleList = ["Blue", "Green", "Red"]
for item in sampleList:
sampleSet.add(item)
print('Tasks 5:', sampleSet)
# Задача 6
# Вернуть новый набор идентичных предметов из заданных двух наборов
set1 = {10, 20, 30, 40, 50}
set2 = {30, 40, 50, 60, 70}
set_ = set1 & set2
print('Tasks 6:', set_)
# Задача 7
# Возвращает новый набор со всеми элементами из обоих наборов, удаляя дубликаты.
set1 = {10, 20, 30, 40, 50}
set2 = {30, 40, 50, 60, 70}
set_ = set1 | set2
print('Tasks 7:', set_)
# Задача 8
# Учитывая два набора Python, обновите первый набор элементами,
# которые существуют только в первом наборе, но не во втором наборе.
set1 = {10, 20, 30}
set2 = {20, 40, 50}
set1.difference_update(set2)
print('Tasks 8:', set1)
# Задача 9
# Удалите єлементи 10, 20, 30 из следующего набора
set1 = {10, 20, 30, 40, 50}
set1.difference_update({10, 20, 30})
print('Tasks 9:', set1)
# Задача 11
# Проверьте, есть ли в двух наборах какие-либо общие элементы.
# Если да, отобразите общие элементы.
set1 = {10, 20, 30, 40, 50}
set2 = {60, 70, 80, 90, 10}
if len(set1 & set2) > 0:
# print('Tasks 11:', set1 & set2)
print('Tasks 11:', set1.intersection(set2))
# Задача 12
# Обновите набор 1, добавив элементы из набора 2
set1 = {10, 20, 30, 40, 50}
set2 = {60, 70, 80, 90, 10}
set1.update(set2)
print('Tasks 12:', set1)
# Задача 13
# Удалите элементы из set1, которые не являются общими для set1 и set2
set1 = {10, 20, 30, 40, 50}
set2 = {30, 40, 50, 60, 70}
set1.intersection_update(set2)
print('Tasks 13:', set1)
# Задача 14
# Используя Все полученние знания по всем типам данних виполнить рефакторинг
# кода задачи с сложним списком из лекции 6.
# Код уменьшить до минимального количества строк
# Задача 6
# Вернуть новый набор идентичных предметов из заданных двух наборов
set1 = {10, 20, 30, 40, 50}
set2 = {30, 40, 50, 60, 70}
set_ = {}
set_.update()
print('Tasks 14_6:', set_)
# -->> Задача 7
# Возвращает новый набор со всеми элементами из обоих наборов, удаляя дубликаты.
set1 = {10, 20, 30, 40, 50}
set2 = {30, 40, 50, 60, 70}
# set_ = {}
set_ = set1.copy()
set_.update([x for x in set2 if x not in set1])
print('Tasks 14_7:', set_)
# -->> Задача 11
# Проверьте, есть ли в двух наборах какие-либо общие элементы.
# Если да, отобразите общие элементы.
set1 = {10, 20, 30, 40, 50}
set2 = {60, 70, 80, 90, 10}
if len(set1 & set2) > 0:
print('Tasks 14_11:', [x for x in set1 if x in set2])
# -->> Задача 12
# Обновите набор 1, добавив элементы из набора 2
set1 = {10, 20, 30, 40, 50}
set2 = {60, 70, 80, 90, 10}
set1.update([x for x in set2 if x not in set1])
print('Tasks 14_12:', set1)
| true |
cac4e7813c83661f1648aebd0dde48e3aead50f6 | Python | clusterking/clusterking | /clusterking/scan/__init__.py | UTF-8 | 987 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
""" This module generates a parameter dependent distributions for a selection
of sample points (points in parameter space), called ``spoints`` throughout
the code.
Two classes are defined:
* :class:`~clusterking.scan.Scanner`: A general class, set up with a function
(specified in :meth:`~clusterking.scan.Scanner.set_dfunction`) that depends on
points in parameter space and a set of sample points in this parameter space
(specified via one of the ``set_spoints_...`` methods). The function is then
run for every sample point and the results are written to a
:class:`~clusterking.data.Data`-like object.
* :class:`~clusterking.scan.WilsonScanner`: This is a subclass of
:class:`~clusterking.scan.Scanner` that takes a wilson coefficient in the form
of a :class:`wilson.Wilson` object as first argument.
"""
from clusterking.scan.scanner import Scanner, ScannerResult
from clusterking.scan.wilsonscanner import WilsonScanner, WilsonScannerResult
| true |
046ba8d243210deaba81d2c03cb4851f1dff6b36 | Python | dimitarpg13/PythonFOrDSPFeb2021 | /Python_Shared/workspace/tools/mytools.py | UTF-8 | 4,508 | 3.3125 | 3 | [] | no_license | """
Custom Module with common tools
Copyright (C) 2018-2020 C. Daniel Boschen
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. The name of the author may not be used to endorse or promote
products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import itertools as it
# demonstrating importing a module global variable
x = 5
def disp(my_list, ncol = 4, width = 20):
"""
Display list in column format, each successive item in list
will be displayed in a separate column (increments by col
first and then row)
Dan Boschen 11/25/2019
Parameters:
-----------
my_list: List
ncol: integer, optional
number of columns, default = 4
width: width, optional
column spacing, default = 20
Returns:
--------
None
"""
def _abrev(name, width):
if len(str(name)) > width-4:
return str(name)[:width-4] + "..."
else:
return str(name)
# ensure string and shorten all items to column width
my_new_list = [_abrev(i, width) for i in my_list ]
#create a format string for ncol columns spaced by width
# result for width = 20 and ncol = 4 is
# "{:<20}{:<20}{:<20}{:<20}"
template = "".join(("{:<"+str(width)+"}") * ncol)
#print using template
for columns in it.zip_longest(*[iter(my_new_list)] * ncol, fillvalue = ""):
print(template.format(*columns))
def printmd(stringWithMarkdown):
"""
Prints string with Markdown as an output of a code block
"""
from IPython.display import Markdown, display
display(Markdown(stringWithMarkdown))
# recursive memory profiler from
# https://code.tutsplus.com/tutorials/understand-how-much-memory-your-python-objects-use--cms-25609
from collections.abc import Mapping, Container # updated to collections.abc CDB 10/19/2020
from sys import getsizeof
def deep_getsizeof(o, ids):
"""Find the memory footprint of a Python object
This is a recursive function that drills down a Python object graph
like a dictionary holding nested dictionaries with lists of lists
and tuples and sets.
The sys.getsizeof function does a shallow size of only. It counts each
object inside a container as pointer only regardless of how big it
really is.
:param o: the object
:param ids:
:return:
ex:
x = '1234567'
deep_getsizeof(x, set())
44
"""
d = deep_getsizeof
if id(o) in ids:
return 0
r = getsizeof(o)
ids.add(id(o))
# if isinstance(o, str) or isinstance(0, unicode):
if isinstance(o, str): # updated CDB 10/18/2020
return r
if isinstance(o, Mapping):
return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems())
if isinstance(o, Container):
return r + sum(d(x, ids) for x in o)
return r
def get_attributes(obj):
# Returns all attributes excluding methods of an object and their values
# as a dictionary
# Dan Boschen 10/19/2020
# Demorgan's Theorem: not (a or b) == not a and not b
return {i: getattr(obj, i) for i in dir(obj) if not
(i.startswith("__") or callable(getattr(obj, i)))}
def get_methods(obj):
# Returns all methods of an object as a list
# Dan Boschen 10/19/2020
return [i for i in dir(obj) if not
i.startswith("__") and callable(getattr(obj, i))]
| true |
51b7b604d47914efccb9829e53d73c0d2a1c0e53 | Python | cy69855522/pytorch_geometric | /torch_geometric/transforms/fixed_points.py | UTF-8 | 1,353 | 2.75 | 3 | [
"MIT"
] | permissive |
from __future__ import division
import math
import torch
import numpy as np
class FixedPoints(object):
r"""Samples a fixed number of :obj:`num` points and features from a point
cloud.
Args:
num (int): The number of points to sample.
replace (bool, optional): If set to :obj:`False`, samples fixed
points without replacement. In case :obj:`num` is greater than
the number of points, duplicated points are kept to a
minimum. (default: :obj:`True`)
"""
def __init__(self, num, replace=True):
self.num = num
self.replace = replace
def __call__(self, data):
num_nodes = data.num_nodes
if self.replace:
choice = np.random.choice(num_nodes, self.num, replace=True)
else:
choice = torch.cat([
torch.randperm(num_nodes)
for _ in range(math.ceil(self.num / num_nodes))
], dim=0)[:self.num]
for key, item in data:
if 'edge' in key:
continue
if torch.is_tensor(item) and item.size(0) == num_nodes:
data[key] = item[choice]
return data
def __repr__(self):
return '{}({}, replace={})'.format(self.__class__.__name__, self.num,
self.replace)
| true |
c244000336a3ba29a5257c5a87d727d25dd26a58 | Python | marvinruder/ssd | /SRC/my_eval.py | UTF-8 | 9,918 | 2.640625 | 3 | [
"MIT"
] | permissive | from torchvision import transforms
from utils import *
from PIL import Image, ImageDraw, ImageFont
from datasets import GTSDBDataset
from tqdm import tqdm
from pprint import PrettyPrinter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Good formatting when printing the APs for each class and mAP
pp = PrettyPrinter()
# Label map
voc_labels = ('speed limit (20)',
'speed limit (30)',
'speed limit (50)',
'speed limit (60)',
'speed limit (70)',
'speed limit (80)',
'restriction ends 80',
'speed limit (100)',
'speed limit (120)',
'no overtaking',
'no overtaking (trucks)',
'priority at next intersection',
'priority road',
'give way',
'stop',
'no traffic both ways',
'no trucks',
'no entry',
'danger',
'bend left',
'bend right',
'bend',
'uneven road',
'slippery road',
'road narrows',
'construction',
'traffic signal',
'pedestrian crossing',
'school crossing',
'cycles crossing',
'snow',
'animals',
'restriction ends',
'go right',
'go left',
'go straight',
'go right or straight',
'go left or straight',
'keep right',
'keep left',
'roundabout',
'restriction ends (overtaking)',
'restriction ends (overtaking (trucks))')
label_map = {k: v + 1 for v, k in enumerate(voc_labels)}
label_map['background'] = 0
rev_label_map = {v: k for k, v in label_map.items()} # Inverse mapping
# Load model checkpoint
checkpoint = '..\\RESOURCES\\trained.pth.tar'
checkpoint = torch.load(checkpoint)
start_epoch = checkpoint['epoch'] + 1
print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
model = checkpoint['model']
model = model.to(device)
model.eval()
# Transforms
resize = transforms.Resize((600,600))
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
all_results = {}
results = {}
for label in voc_labels:
results[label] = {}
results[label]['true_positive'] = 0
results[label]['false_positive'] = 0
results[label]['false_negative'] = 0
all_results['true_positive'] = 0
all_results['false_positive'] = 0
all_results['false_negative'] = 0
def box_match(box_1, label_1, box_2, label_2, max_overlap=0.3):
"""
Determines whether two boxes overlap themselves and have the same label
:param box_1: the first box
:param label_1: the label of the first box
:param box_2: the second box
:param label_2: the label of the second box
:param max_overlap: threshold value for determining whether two boxes overlap themselves
:return: boolean value indicating whether the boxes overlap themselves and have the same label
"""
return find_jaccard_overlap(box_1.unsqueeze(0), box_2.unsqueeze(0)) > max_overlap and label_1 == label_2
def my_evaluate(original_image, img_id, annotations, min_score=0.45, max_overlap=0.3, top_k=200, annotate_image=True):
"""
Detect objects in an image with a trained SSD600, and visualize the results.
:param original_image: image, a PIL Image
:param img_id: the identifier of the image, used as file name
:param annotations: ground truth information on the traffic signs in the image
:param min_score: minimum threshold for a detected box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via Non-Maximum Suppression (NMS)
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:param annotate_image: boolean variable indicating whether annotated images shall be written to a file
"""
# Transform
image = normalize(to_tensor(resize(original_image)))
# Move to default device
image = image.to(device)
# Forward prop.
predicted_locs, predicted_scores = model(image.unsqueeze(0))
# Detect objects in SSD output
det_boxes, det_labels, det_scores = model.detect_objects(predicted_locs, predicted_scores, min_score=min_score,
max_overlap=max_overlap, top_k=top_k)
# Move detections to the CPU
det_boxes = det_boxes[0].to('cpu')
# Transform to original image dimensions
original_dims = torch.FloatTensor(
[original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0)
det_boxes = det_boxes * original_dims
# Decode class integer labels
det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]
det_scores = det_scores[0].to('cpu').tolist()
# Annotate
annotated_image = original_image
if det_labels != ['background']:
for i in range(det_boxes.size(0)):
# Create an image showing the detected traffic signs, if requested
if annotate_image:
draw = ImageDraw.Draw(annotated_image)
font = ImageFont.truetype("..\\RESOURCES\\Helvetica.ttf", 16)
# Boxes
box_location = det_boxes[i].tolist()
draw.rectangle(xy=box_location, outline='#ff0000')
draw.rectangle(xy=[l + 1. for l in box_location], outline='#ff0000') # a second rectangle at an offset of 1 pixel to increase line thickness
# Text
text = det_labels[i].upper() + ' ' + str(det_scores[i])
text_size = font.getsize(text)
text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,
box_location[1]]
draw.rectangle(xy=textbox_location, fill='#ff0000')
draw.text(xy=text_location, text=text, fill='white', font=font)
del draw
# For every detection, see whether it matches a ground truth
match = False
for j in range(len(annotations['boxes'])):
if annotations['labels'][j] == -1: # this is being set when a ground truth already matched
continue
match = box_match(det_boxes[i],
det_labels[i],
torch.Tensor(annotations['boxes'][j]),
rev_label_map[annotations['labels'][j]])
if match:
annotations['labels'][j] = -1
break
if match: # true positive if the detection is correct and matched a ground truth
all_results['true_positive'] += 1
results[det_labels[i]]['true_positive'] += 1
else: # false positive if the detection did not match a ground truth
all_results['false_positive'] += 1
results[det_labels[i]]['false_positive'] += 1
if annotate_image:
annotated_image.save('..\\RESULTS\\' + img_id + '.png')
# After all detections were checked, let us see whether the detector missed something
for label in annotations['labels']:
if label == -1: # This is set after a detection matched this ground truth
continue
# false negative if we reach this line, since the ground truth object was not found
results[rev_label_map[label]]['false_negative'] += 1
all_results['false_negative'] += 1
if __name__ == '__main__':
path = '..\\DATASET\\'
# Find IDs of images in the test data
with open(os.path.join(path, 'test.txt')) as f:
ids = f.read().splitlines()
# Evaluate and annotate
for img_id in tqdm(ids):
annotations = parse_annotation(path + 'Annotations\\' + img_id + '.xml')
original_image = Image.open(path + img_id + '.ppm', mode='r')
original_image = original_image.convert('RGB')
my_evaluate(original_image, img_id, annotations, annotate_image=True)
# Calculate precisio and recall
precision = {}
recall = {}
precision['ALL'] = all_results['true_positive'] / (all_results['true_positive'] + all_results['false_positive'])
recall['ALL'] = all_results['true_positive'] / (all_results['true_positive'] + all_results['false_negative'])
for label in voc_labels:
# Precision
if results[label]['true_positive'] + results[label]['false_positive'] > 0: # check for detections
precision[label] = results[label]['true_positive'] / (results[label]['true_positive'] + results[label]['false_positive'])
else:
precision[label] = 'No detections'
if results[label]['false_negative'] == 0:
precision[label] = 'No detections, but also no signs in test set'
# Recall
if results[label]['true_positive'] + results[label]['false_negative'] > 0: # check for ground truth objects
recall[label] = results[label]['true_positive'] / (results[label]['true_positive'] + results[label]['false_negative'])
else:
recall[label] = 'No signs in test set'
# Print results
print('PRECISION')
pp.pprint(precision)
print()
print('RECALL')
pp.pprint(recall)
| true |
8fc87fa7ad49d3c292042d391bbf77daa8f6b537 | Python | anishpai/Project-Euler | /P9.py | UTF-8 | 235 | 3.796875 | 4 | [] | no_license | #PROJECT EULER
#P9: Product of Pythogorean Triplet for a+b+c=1000
for a in range(1,1000):
for b in range(1,1000):
c = 1000-a-b
if ((a**2)+(b**2) == c**2):
print(a*b*c)
print(a,b,c)
| true |
ffae8d320c34517ad5a6772a0dbc39fc66da0e8d | Python | slimmilan/ds | /TestPython/HelloWorld.py | UTF-8 | 367 | 3.34375 | 3 | [] | no_license | print("Hello \"World\" 'Double'")
print('Hello "World" \'Single\'')
print(r"/root/find is also D:\user\Administrator\find")
print('''this is the
way we print our output
early in the morning''')
print('hello' + " world")
print('hello' " world")
print('hello', "world")
h = 'hello'
w = " world"
print(h + w)
#print(h w) #doesn't work like this
print(h[1])
print(w[-1]) | true |
6b19b63d249326f568927731d68b95caf2c668e8 | Python | DerekK01/Python-Data-Handling | /Part2.py | UTF-8 | 3,121 | 3.4375 | 3 | [] | no_license |
import matplotlib.pyplot as plt
import numpy as np
import sys
import pandas as pd
#Input file
myhouse_file = open('myhouse.csv')
#Seperation
myhouse_string = myhouse_file.read()
device_data = myhouse_string.split("\n",1)[1]
residents = myhouse_string.split("\n",1)[0].split(":")[1]
print(device_data)
print(residents)
amount_of_devices = device_data.count('\n') + 1
print(amount_of_devices)
device_list = device_data.replace(':',',').replace('\n',',').split(',')
device_array = np.array(device_list).reshape(amount_of_devices,26)
print(device_array)
#putting the data in to array
add_array = []
add_array2 = []
add_device = []
add_totalpower = []
add_hour = []
for i in range(amount_of_devices):
total_device_usage = 0
for x in range(24):
total_device_usage += float(device_array[i,x+2])
#For Array [Device,Power,Hr]
add_array.append(device_array[i,0])
add_array.append(device_array[i,1])
add_array.append(total_device_usage)
#For Array [Device,Power*Hr]
add_array2.append(device_array[i,0])
add_array2.append(int(float(device_array[i,1]) * total_device_usage))
#For Array [Device]
add_device.append(device_array[i,0])
#For Array [Power*Hr]
add_totalpower.append(float(device_array[i,1]) * total_device_usage)
#For Array [Hour]
add_hour.append(total_device_usage)
device_power_df = pd.DataFrame({'Device' : add_device,'Total_Power' : add_totalpower})
sorted_device_power_df= device_power_df.sort_values(by=['Total_Power'])
print(sorted_device_power_df)
#Plotting the first graph
sorted_device_power_df.plot.barh(x='Device',y='Total_Power',title='Device Power Usage In A Day').set(xlabel='Total Power used (Watt * Hr)',ylabel='Device')
plt.style.use = 'default'
plt.tight_layout()
plt.legend(loc="right", fancybox=True, shadow=True)
plt.savefig("Device power usage in a day.png")
plt.show()
#Plotting second graph
plt.xlabel('Device')
plt.ylabel('Hour used')
plt.title('Times used for each device')
plt.bar(add_device,add_hour)
plt.xticks(rotation=80)
plt.tight_layout()
plt.savefig("Times used for each device.png")
plt.show()
#sum vertically
power_per_hour_array = []
time_array=["12:00AM","1:00AM","2:00AM","3:00AM","4:00AM","5:00AM","6:00AM","7:00AM","8:00AM","9:00AM","10:00AM","11:00AM","12:00PM","1:00PM","2:00PM","3:00PM","4:00PM","5:00PM","6:00PM","7:00PM","8:00PM","9:00PM","10:00PM","11:00PM",]
for i in range(24):
total_usage_per_hour = 0
for x in range(amount_of_devices):
total_usage_per_hour += float(device_array[x,1]) * float(device_array[x,i+2])
#For Array [Power Per Hour]
power_per_hour_array.append(total_usage_per_hour)
#Plot the third graph
print(power_per_hour_array)
plt.xlabel('Hour')
plt.ylabel('Power Usage')
plt.title('Power used per hour')
plt.plot(time_array,power_per_hour_array)
plt.xticks(rotation=90)
plt.tight_layout()
plt.savefig("power used per hour.png")
plt.show()
myhouse_file.close()
| true |
2c5e46073f032314571af5a62ab6e45af7ea168e | Python | urdaraluca/Wine-recognition---Classification | /linearClassification.py | UTF-8 | 1,610 | 2.90625 | 3 | [] | no_license | import numpy as np
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns # visualization
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
data = pd.read_csv('wine.data', header=None)
# data = pd.read_csv('wine.names')
data.columns = [ 'name'
,'alcohol'
,'malicAcid'
,'ash'
,'ashalcalinity'
,'magnesium'
,'totalPhenols'
,'flavanoids'
,'nonFlavanoidPhenols'
,'proanthocyanins'
,'colorIntensity'
,'hue'
,'od280_od315'
,'proline'
]
classes=['Wine 1','Wine 2','Wine 3']
print('\n')
#pandas - print first three instances
print(data.head(3)) #check out the data
print('\n')
#pandas - print statistical data
print(data.describe())
wine = datasets.load_wine()
x = wine.data
y = wine.target
# choose 2 features
X=x[:,6:8]
x_train, x_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=1,stratify=y)
classification=svm.SVC(kernel='linear')
classification.fit(x_train,y_train)
linear = classification.predict(x_test)
print(linear)
print(confusion_matrix(y_test,linear))
target=wine.target_names
print(classification_report(y_test,linear,target_names=target))
| true |