blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f0a263b091ed6090b372fcf82e7a700a8247a29d | Python | MGSE97/NAVY | /Hopfield/HopfieldTest.py | UTF-8 | 2,061 | 2.71875 | 3 | [] | no_license | import numpy as np
from Hopfield.Hopfield import HopfieldNet
from Hopfield.HopfieldPatterns import patterns5x5, destroy, prepare_data, convert_data, Pattern, Row
# Prepare network and save patterns
net = HopfieldNet(5, 5)
for p in patterns5x5:
net.save(prepare_data(p.data))
# Sync repair, stopped by halucination
print("Synchronous repair")
#for p in np.random.choice(patterns5x5, 4):
halucination = False
i = 0
g_success = 0.0
while not halucination:
# get random pattern
pattern = np.random.choice(patterns5x5)
# destroy N parts
destroyed = destroy(pattern, 5)
# fix pattern
fixed = Pattern(convert_data(net.recover(prepare_data(destroyed.data))))
# compute difference to original
result = (fixed.data == pattern.data).astype(int)
# save success rate
success = np.average(result)
if success == 1:
g_success += 1
# detect halucination
halucination = not np.any([np.all(fixed.data == s.data) for s in patterns5x5])
# print results
print(i, halucination, success,
"\nDest Fixed Origo Diff",
'\n' + str(Row([destroyed, fixed, pattern, Pattern(result)])))
i += 1
# Async repair, stopped by runtime
print("Asynchronous repair")
halucination = False
j = 0
g_success2 = 0.0
while not halucination and j < 100:
pattern = np.random.choice(patterns5x5)
destroyed = destroy(pattern, 5)
fixed = Pattern(convert_data(net.recover(prepare_data(destroyed.data), False)))
result = (fixed.data == pattern.data).astype(int)
success = np.average(result)
if success == 1:
g_success2 += 1
halucination = not np.any([np.all(fixed.data == s.data) for s in patterns5x5])
print(j, halucination, success,
"\nDest Fixed Origo Diff",
'\n' + str(Row([destroyed, fixed, pattern, Pattern(result)])))
j += 1
print("Halucination Sync: {}".format(i))
print("Average success Sync: {:.2%}".format(g_success/i))
print("Average success Async: {:.2%}".format(g_success2/j))
#print("Halucinations Sync: {}, Async: {}".format(i, j))
| true |
7f84e05e4d6502bbe7483f5d1ea238f21c12975d | Python | choonkiattay/Tensorflow_Image_Classification | /lenet5.py | UTF-8 | 2,176 | 2.796875 | 3 | [] | no_license | import os
import numpy as np
import tensorflow as tf
from tensorflow import keras as tfk
def dataset_prep():
mnist = tf.keras.datasets.mnist
(data_train, label_train), (data_test, label_test) = mnist.load_data()
# # # Normalize 0 to 255 value matrix to 0 to 1 value matrix
# data_train, data_test = data_train/255.0, data_test/255.0
train_data = data_train.reshape(data_train.shape[0], 28, 28, 1)
train_label = np.asarray(label_train, dtype=np.int32)
test_data = data_test.reshape(data_test.shape[0], 28, 28, 1)
test_label = np.asarray(label_test, dtype=np.int32)
return train_data, train_label, test_data, test_label
def lenet5():
model = tfk.models.Sequential([
tfk.layers.Conv2D(filters=6, kernel_size=(5, 5), activation=tf.nn.relu, input_shape=(28, 28, 1)),
tfk.layers.MaxPool2D(pool_size=(2, 2), strides=2),
tfk.layers.Conv2D(filters=16, kernel_size=(5, 5), activation=tf.nn.relu),
tfk.layers.MaxPool2D(pool_size=(2, 2), strides=2),
tfk.layers.Flatten(),
tfk.layers.Dense(120, activation=tf.nn.relu),
tfk.layers.Dense(84, activation=tf.nn.relu),
tfk.layers.Dense(10, activation=tf.nn.softmax),
])
model.compile(optimizer=tfk.optimizers.Adadelta(), loss=tfk.losses.sparse_categorical_crossentropy,
metrics=[tfk.metrics.sparse_categorical_accuracy])
return model
if __name__ == '__main__':
# Create checkpoint callback
checkpoint_path = "/home/Tensorflow_Image_Classification/training_1/lenet5.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=1,
save_best_only=True)
train_data, train_label, test_data, test_label = dataset_prep()
model = lenet5()
model.summary()
print("Network Training Start")
model.fit(train_data, train_label, epochs=10, validation_data=(test_data, test_label), callbacks=[cp_callback])
print("Network Training Finish\n")
print("Network Evaluation")
model.evaluate(test_data, test_label)
| true |
401d4c86cfdb58221feb62398b421d585c0a000b | Python | bnajafi/Scientific_Python_Assignments_POLIMI_EETBS | /Assignment 8 Pandas C Deadline Nov 28th 2017/Assignment8_Riccardi/Assignment8_Riccardi.py | UTF-8 | 1,326 | 2.59375 | 3 | [] | no_license | #assignment 8 - Riccardi
import os
import numpy as np
import pandas as pd
import scipy as sp
os.chdir("C:\Users\Luca\Desktop\Luca\PoliMinchia\Primo anno\Energy and environmental technologies\Assignments\RLFmethod")
DF_Windows = pd.read_csv("windows.csv",sep=";",index_col= 0)
TotalIrr_Values=[]
def pxi_finder(latitude,direction):
DF_Beam = pd.read_csv("BeamIrradiance.csv",sep = ";",index_col=0)
DF_Diffuse = pd.read_csv("DiffuseIrradiance.csv",sep = ";",index_col=0)
NameOfColumns_Beam=DF_Beam.columns.get_values()
NameOfColumnsAsNumbers_Beam = NameOfColumns_Beam.astype(np.int32, copy=False)
ED=sp.interp(latitude,NameOfColumnsAsNumbers_Beam,DF_Beam.loc[direction])
NameOfColumns_Diffuse=DF_Diffuse.columns.get_values()
NameOfColumnsAsNumbers_Diffuse = NameOfColumns_Diffuse.astype(np.int32, copy=False)
Ed=sp.interp(latitude,NameOfColumnsAsNumbers_Diffuse,DF_Diffuse.loc[direction])
TotalIrradiance=ED+Ed
return TotalIrradiance
Latitude= 45
for direction in DF_Windows["Direction"].tolist():
TotalIrr=pxi_finder(Latitude,direction)
TotalIrr_Values=np.append(TotalIrr_Values,TotalIrr)
PXI_Values=TotalIrr_Values*DF_Windows["Tx"]
DF_Windows["PXI"]=PXI_Values
DF_Windows.to_csv("windows_completed_withPXI.csv",sep=";")
| true |
834827e085e80fb369ebe1307ad3051515d95d3d | Python | MaximeGoyette/projecteuler | /020.py | UTF-8 | 73 | 2.640625 | 3 | [] | no_license | print sum([int(x) for x in str(reduce(lambda a, b: a*b, range(1, 101)))]) | true |
f3d59b10eef8833f7f942c1ab863bd15751554e5 | Python | vladimir-popov-dev/Lesson_5 | /divisor_master.py | UTF-8 | 3,967 | 4.25 | 4 | [
"MIT"
] | permissive | from math import sqrt
# 1) проверка числа на простоту (простые числа - это те числа у которых делители единица и они сам
def isprime(x):
"""
Функция проверяет простое число или нет, возвращает True или False
:param x: Проверяемое число
:return: True or False
"""
try:
x = int(x)
if x == 1:
return False
else:
outer = x if x < 100 else int(sqrt(x)) # для больших чисел, чтобы снизить кол-во циклов
for i in range(2, outer):
if x % i == 0:
return False
return True
except ValueError:
print('Ошибка входных данных.Это не число.')
# 2) выводит список всех делителей числа;
def all_divisors(x):
"""
Функция возращает список всех делителей числа
:param x: Число для которого необходимо найти делители
:return: список делителей
"""
try:
divisors = []
for i in range(1, x + 1):
if x % i == 0:
divisors.append(i)
return divisors
except ValueError:
print('Ошибка входных данных.Это не число.')
# 3) выводит самый большой простой делитель числа.
def superior_prime_divisor(x):
"""
Функция возращает самый большой простой делитель числа
:param x: Число для которого необходимо найти делитель
:return: самый большой простой делитель
"""
try:
for i in range(x + 1, 1, -1):
if x % i == 0:
if isprime(i):
return i
except ValueError:
print('Ошибка входных данных.Это не число.')
# функция выводит каноническое разложение числа на простые множители;
def decomposition(x):
"""
Функция возращает массив канонического разложение числа
:param x: Число которое необходимо разложить
:return: массив канонического разложение числа
"""
try:
prime_divisors = []
if isprime(x):
prime_divisors.append(x)
else:
outer = x if x < 900000 else int(sqrt(x)) # для больших чисел, чтобы снизить кол-во циклов
for i in range(2, outer):
if isprime(i):
while x % i == 0:
# print(i)
prime_divisors.append(i)
x /= i
return prime_divisors
except ValueError:
print('Ошибка входных данных.Это не число.')
# функция выводит самый большой делитель (не обязательно простой) числа (кроме самого числа).
def superior_divisor(x):
"""
Функция возращает самый большой делитель числа, кроме самого числа!!!
:param x: Число для которого необходимо найти делитель
:return: самый большой делитель
"""
try:
new_list = all_divisors(x)
new_list.sort(reverse=True)
return new_list[1]
except ValueError:
print('Ошибка входных данных.Это не число.')
if __name__ == '__main__':
pass
# для тестов. | true |
b5491f49477e786de48476f2351d4df9a628c303 | Python | matSciMalcolm/citrine | /complete/magpie_loader_4.py | UTF-8 | 6,928 | 3.21875 | 3 | [] | no_license |
## magpie_loader v4.0 ##
# Version Notes: (December 28th, 2017) This version of the loader normalizaes the data by using scikit-learn's MinMax Scaler found in the preprocessing module.
# This program loads, normalizes, and stadardizes data stored as .csv and exports tuples for use in a neural network.
# This file is a modified version of Michael A. Nielsen's "data_loader" used in "Neural Networks and Deep Learning", Determination Press, 2015.
# The origional file can be found at https://github.com/mnielsen/neural-networks-and-deep-learning.git
# The modified verisions developed by Malcolm Davidson can be found at https://github.com/davidsonnanosolutions/citrine.git
##
#### Libraries
# Standard libraries
import random
import ast
# Third-party libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing
# Global Declerations - store parmeters about the begin and end indexes for usable data within each dataframe
global training_data_start
global training_data_end
global results_data_position
global test_data_start
global test_data_end
training_data_start, training_data_end, results_data_position, test_data_start, test_data_end = [2,-1,-1, 2, 0]
## load_data() function ##
# This function loads traing and test data based on the passed file path.
# The data is normalized and a subset of the training data is set aside as
# validation data. The function returns three (x,y) tuples composed of numpy
# arrays of data (x) and stability vectors (v).
##
def load_data(filePath):
# File I/O for importing and formatting training data and creating validation data
with open(filePath, 'rb') as f:
# Load the training-data.csv file into a (2572,99) pandas dataframe 'raw_df'.
# Store a sub set of the "raw_df" as "data_df" which omits the stabity vector and the element names.
raw_df = pd.read_csv(f)
data_df = raw_df.iloc[:,2:-1]
# Create a temporary datafram "temp_df" to hold normalized version of "data_df".
# Normalization is accomplished using normalize from scikit-learn's prepreocessing library.
# "temp_df" has the same column headers as the origional "data_df"
temp_df = normalize(data_df)
temp_df.columns = data_df.columns
# "norm_df" holds the complete normalized dataframe. This means
# it has the normalized data stored in "temp_df", the element names origionally
# in "raw_df", and the stability vector.
norm_df = raw_df.iloc[:,0:2]
norm_df = norm_df.join(temp_df)
norm_df = norm_df.join(raw_df.iloc[:,-1])
# If necessary, the normalized dataframe can be exported to a file for quality control.
#norm_df.to_csv("/home/spike/citrine/normalized_training_data.csv")
# Randomly choose 10% of the rows in a non-contiguous fashion to be set aside as validation data.
# "valIndex" holds n random rows from "norm_df", where n = 0.1*#rows in "norm_df".
# "validation_df" is the dataframe form of valIndex, sorted and re-indexed.
valIndex = random.sample(xrange(len(norm_df)),int(round(0.1*len(norm_df))))
validation_df = pd.DataFrame(norm_df.ix[i,:] for i in valIndex).sort_index(ascending=True)
validation_df.reset_index()
# "input_df" is the datafram containg training data to be passed to the network. It is built by
# removing validation data from the data to become the "training data" and re-index "norm_df" contiguously.
# The element labels are also removed producing a [1,96] row vector.
input_df = norm_df.drop(valIndex)
input_df.reset_index()
# "input_data" is a (x,y) tuple where x is a float32 np array of training data and y are the associated stability vectors.
input_data = (input_df.iloc[:,training_data_start:training_data_end].values.astype(np.float32),input_df.iloc[:,results_data_position].values)
# "validation_data" is a (x,y) tuple where x is a float32 np array of validation data and y are the associated stability vectors.
validation_data = (validation_df.iloc[:,training_data_start:training_data_end].values.astype(np.float32),validation_df.iloc[:,results_data_position].values)
# File I/O for importing and formatting test data.
with open('/home/spike/citrine/test_data.csv', 'rb') as f:
# Load the test-data.csv file into a (750,99) pandas dataframe 'test_df'.
# Store a sub set of the "test_df" as "data_df" which omits the stabity vector and the element names.
test_df = pd.read_csv(f)
data_df = test_df.iloc[:,2:]
# See above for descriptions of "test_df" and "norm_df".
temp_df = normalize(data_df)
temp_df.columns = data_df.columns
norm_df = test_df.iloc[:,0:2]
norm_df = norm_df.join(temp_df)
# "test_data" is a (x,y) tuple where x is a float32 numpy array of validation data and y are the associated stability vectors.
test_data = norm_df.iloc[:,test_data_start:].values.astype(np.float32)
return(input_data, validation_data, test_data)
## load_data_wrapper() function ##
# This function returns (x,y) tuples of data as list of (96,1) numpy column vectors (x) and a list of stability vectors as
# (11,1) column vectors (y) built from .csv files located at the paased file path.
##
def load_data_wrapper(filePath):
# Store tuples from load_data() as training data "tr_d", validation data "va_d", and test data "te_d".
tr_d, va_d, te_d = load_data(filePath)
# The algorithms used in the network expect a tuple of data composed of a list of column vectors
# represented with a numpy arrays and a list of stability vectors as column vectors.
training_inputs = [np.reshape(x, (96, 1)) for x in tr_d[0]]
training_results = [vectorize(y) for y in tr_d[1]]
training_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (96, 1)) for x in va_d[0]]
validation_results = [vectorize(y) for y in va_d[1]]
validation_data = zip(validation_inputs,validation_results)
test_data = [np.reshape(x, (96, 1)) for x in te_d]
return (training_data, validation_data, test_data)
## vectorize() function##
# This returns the string version of the stability vector as a (11,1) column vector.
##
def vectorize(d):
# Use the Abstract Syntax Trees (ast) to interpret the string version of the stability vector and store it as "d"
# Define "e" an (11,1) numpy array of 0's to store the stability vector.
d = ast.literal_eval(d)
e = np.zeros((11, 1))
# Loop through each element in "d" and assign there value to "e"
for element in xrange(0,len(d)):
e[element] = d[element]
return(e)
## normalize() function ##
# Applies the sci-kit learn MinMaxScaler to the passed pandas dataframe
# and returns a normalized pandas dataframe.
##
def normalize(raw_df):
scaler = preprocessing.MinMaxScaler()
scaled_df = scaler.fit_transform(raw_df)
norm_df = pd.DataFrame(scaled_df)
return(norm_df)
| true |
54d23544c476974ae6efbd186c6f8f0c4379bec7 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_350/ch88_2019_11_27_00_28_07_751460.py | UTF-8 | 247 | 3.015625 | 3 | [] | no_license | class Retangulo:
def __init__(self,p1,p2):
self.p1 = p1
self.p2 = p2
def calcula_perimetro(self):
return (p1.x - p2.x)*2 + (p1.y-p2.y)*2
def calcula_area(self):
return (p1.x - p2.x)*(p1.y-p2.y)
| true |
51f0f8b059655f7a61360c19d67e49c464f84582 | Python | Axess-cheng/openCVLearning | /DetectCircle.py | UTF-8 | 1,665 | 3.15625 | 3 | [] | no_license |
"""
FYP: Image Recognition: translate pictures of directed graphs to textual representations --Patrick Totzke
Detecting Circles in Images using OpenCV and Hough Circles
https://www.pyimagesearch.com/2014/07/21/detecting-circles-images-using-opencv-hough-circles/
"""
# import the necessary packages
# parsing command line arguments
import numpy as np
import argparse
import cv2
# construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--image", required = True, help = "Path to the image")
# args = vars(ap.parse_args())
# load the image, clone it for output, and then convert it to grayscale
# houghCircle nned 8-bit, single channel image
# image = cv2.imread(args["image"])
image = cv2.imread("images/8circles.png")
output = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2.cv.CV_HOUGH_GRADIENT -> cv2.HOUGH_GRADIENT
# the CV_ prefix has been removed with opencv3, and all constants are in the cv2 submodule now
# https://answers.opencv.org/question/177506/cv2-has-no-attribute-cv_hough_gradient/
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 40)
if circles is not None:
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# show the output image
cv2.imshow("output", np.hstack([image, output]))
cv2.waitKey(0) | true |
c35102b07d5a0836222529d54b9ab9be9d37472b | Python | hyunkyeng/TIL | /algorithm/day2/숫자카드.py | UTF-8 | 449 | 3.21875 | 3 | [] | no_license | import sys
sys.stdin = open("숫자카드_input.txt")
T = int(input())
for test_case in range(T):
N = int(input())
L = list(map(int, input()))
c = [0] * 10
max_number = c[0]
index_number = 0
for i in range(len(L)):
c[L[i]] += 1
for j in range(10):
if max_number <= c[j]:
max_number = c[j]
index_number = j
print(c)
print(f'#{test_case+1} {index_number} {max_number}')
| true |
0247591399bc03f90299e992b87b4a235d494248 | Python | Yggdrasill-Moe/Helheim | /超级机器人大战Z2/synopsis_dump.py | UTF-8 | 714 | 2.609375 | 3 | [] | no_license | # -*- coding:utf-8 -*-
#用于导出剧情简介文本
#by Darkness-TX 2023.04.15
import struct
import os
import sys
import io
def byte2int(byte):
return struct.unpack('H',byte)[0]
def FormatString(string, count):
res = "○%08d○%s\n●%08d●%s\n\n"%(count, string, count, string)
return res
def main():
src = open(sys.argv[1],'rb')
src.seek(0,os.SEEK_END)
filesize = src.tell()
src.seek(0,os.SEEK_SET)
dst = open(sys.argv[1]+'.txt','w',encoding='utf16')
while src.tell() < filesize:
num = byte2int(src.read(2))
size = byte2int(src.read(2))
string = src.read(size).replace(b'\x00',b'').decode('932').replace('\n','↙')
dst.write(FormatString(string,num))
if '__main__' == __name__:
main() | true |
812158a3be8153fb0a28462f222841dfa606e3a3 | Python | a-chil/Covid-stat-bot | /Covid_stat_bot.py | UTF-8 | 1,698 | 3.203125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import smtplib
world_URL = 'https://www.worldometers.info/coronavirus/'
Canada_URL = 'https://www.worldometers.info/coronavirus/country/canada/' #Can add any country of choice
class track:
def __init__(self, URL):
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
#Total count scraping
total_span = soup.find('div', {'class': 'maincounter-number'})
self.total_count = total_span.findChildren('span')[0].text
#Total deaths scraping
deaths_span = soup.find_all('div', {'class': 'maincounter-number'})[1]
self.total_deaths = deaths_span.findChildren('span')[0].text
World = track(world_URL)
Canada = track(Canada_URL)
#Mailing function
def mail():
server = smtplib.SMTP('smtp.gmail.com', 587) #For gmail
server.ehlo()
server.starttls()
server.ehlo()
server.login('', '') #The user login information will be required
subject = 'World and Canada COVID-19 Cases'
body = 'As of today Worldwide: \
\nTotal Cases: ' + World.total_count + '\
\nTotal Deaths: ' + World.total_deaths + '\
\n\nAs of today in Canada: \
\nTotal Cases: ' + Canada.total_count + '\
\nTotal Deaths: ' + Canada.total_deaths + '\
\n\nFor more statistics visit: https://www.worldometers.info/coronavirus/'
message = f"Subject: {subject}\n\n{body}"
server.sendmail(
'', #Email sender
'',
message #Email reciever (Can be the same as the sender - it will just be a mail to the same person)
)
print("Email Sent!")
server.quit()
mail()
| true |
ab68a0dbb89f43a388d93577a7aa51f9ce6e2cd1 | Python | SP8888/metod_sbora_informacii | /HW_1_open_api.py | UTF-8 | 649 | 2.734375 | 3 | [] | no_license | import requests
import json
#"2. Изучить список открытых API. Найти среди них любое, требующее авторизацию (любого типа). " \
#"Выполнить запросы к нему, пройдя авторизацию. Ответ сервера записать в файл."
url = 'https://api.vk.com'
user_id = 7582181
method = 'friends.getOnline'
access_token_my = 'ff8717fc85a825f6f742874ea1b89fc54f08e69816b94244600036ea8e214cb612d74e8bcde848c6507c5'
response = requests.get(f'{url}/method/{method}?v=5.52&user_ids={user_id}&access_token={access_token_my}')
print(response.text) | true |
e682313a0d930574334fb0cec58ce9ab37ac79fa | Python | popovicaco/MinecraftDiscordBot | /cogs/mbti.py | UTF-8 | 5,080 | 2.6875 | 3 | [] | no_license | import discord
import json
from discord.ext import commands
import os
class mbtiCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.questions = json.load(open(f"{os.path.split(os.getcwd())[0]}/{os.path.split(os.getcwd())[1]}" + "/valuableinfo/mbtiquestions.json"))
self.answerdict = {}
@commands.command()
async def mbti(self, ctx):
'''Myers-Briggs Personality Type Test '''
def embedmaker(n):
embed = discord.Embed(title="MBTI Test", description=self.questions[n] + ' (' + str(n+1) + '/32)', color=0xf6d0d0)
embed.set_author(name="Personality Test", icon_url="https://i.pinimg.com/originals/d8/a4/4f/d8a44fba200d17685c8520faf223e36a.gif")
embed.set_footer(text="1️⃣ - Agree 2️⃣ - Somewhat Agree 3️⃣ - Neither 4️⃣ - Somewhat Disagree 5️⃣ - Disagree ")
return embed
message = await ctx.send(embed = embedmaker(0))
for emoji in ['1️⃣','2️⃣','3️⃣','4️⃣','5️⃣','❌']:
await message.add_reaction(emoji)
self.answerdict[(message.id,ctx.message.author.id)] = []
@commands.Cog.listener()
async def on_reaction_add(self, reaction, user):
# if the key exists in the dictionary
if (reaction.message.id,user.id) in self.answerdict.keys():
if reaction.emoji in ('1️⃣','2️⃣','3️⃣','4️⃣','5️⃣'):
answerlist = self.answerdict[(reaction.message.id,user.id)]
def embedmaker(n):
embed = discord.Embed(title="MBTI Test", description=self.questions[n] + ' (' + str(n+1) + '/32)', color=0xf6d0d0)
embed.set_author(name="Personality Test", icon_url="https://i.pinimg.com/originals/d8/a4/4f/d8a44fba200d17685c8520faf223e36a.gif")
embed.set_footer(text="1️⃣ - Agree 2️⃣ - Somewhat Agree 3️⃣ - Neither 4️⃣ - Somewhat Disagree 5️⃣ - Disagree ")
return embed
if (len(answerlist) + 1 < 31):
await reaction.message.edit(embed = embedmaker(len(answerlist) + 1))
self.answerdict[(reaction.message.id,user.id)].append(reaction.emoji)
await reaction.message.remove_reaction(reaction.emoji,user)
else:
ptype = ""
# mbtisum
eiscore = 0
snscore = 0
tfscore = 0
jpscore = 0
scores = {'1️⃣':5 , '2️⃣':2, '3️⃣':0, '4️⃣':-2, '5️⃣':-5 }
for i in range(0,len(answerlist)):
if (i <= 7):
#EXTROVERSION VS INTROVERSION every second on is an introversion q
if (i%2 == 0):
eiscore += scores[answerlist[i]]
else:
eiscore -= scores[answerlist[i]]
elif (8 <= i <= 15):
#SENSING VS INTUITION every second on is an intuition q
if (i%2 == 0):
snscore += scores[answerlist[i]]
else:
snscore -= scores[answerlist[i]]
elif (16 <= i <= 23):
#THINKING VS FEELING every second on is an feeling q
if (i%2 == 0):
tfscore += scores[answerlist[i]]
else:
tfscore -= scores[answerlist[i]]
else:
#JUDGJING VS PERCEIVING every second one is a perceiving q
if (i%2 == 0):
jpscore += scores[answerlist[i]]
else:
jpscore -= scores[answerlist[i]]
if (eiscore > 0): ptype += "E"
elif (eiscore < 0): ptype += "I"
else: ptype += "X"
if (snscore > 0): ptype += 'S'
elif (snscore < 0): ptype += 'N'
else: ptype += 'X'
if (tfscore > 0): ptype += 'T'
elif (tfscore < 0): ptype += 'F'
else: ptype += 'X'
if (jpscore > 0): ptype += 'J'
elif (jpscore < 0): ptype += 'P'
else: ptype += 'X'
await reaction.message.channel.send(f"{user.mention}'s MBTI Type is: {ptype}")
del self.answerdict[(reaction.message.id,user.id)]
await reaction.message.delete()
elif (reaction.emoji == '❌'):
del self.answerdict[(reaction.message.id,user.id)]
await reaction.message.delete()
def setup(bot):
bot.add_cog(mbtiCog(bot)) | true |
4b2e52c07ce48e4cd80fc3f83a7ded36942ced87 | Python | ianharris/linear-regression-gradient-descent-numpy | /linear_regression.py | UTF-8 | 2,311 | 3.46875 | 3 | [] | no_license | import numpy as np
import pandas as pd
class Data():
def __init__(self, fpath):
# read in the file
data = pd.read_csv(fpath)
# assign the features
self.features = data[['X1', 'X2']].as_matrix()
# get the number of samples
self.numSamples = np.shape(self.features)[0]
# assign the labels
self.labels = data['y'].as_matrix()
self.labels = np.reshape(self.labels, (self.numSamples, 1))
def hypothesis(theta, features):
m = np.matmul(features, theta[1:, :])
hyp = np.add(theta[0, :], m)
return hyp
def cost(theta, features, labels):
# theta = [1, 3]
# features = [-1, 2]
# labels = [-1]
s = np.subtract(hypothesis(theta, features), labels)
p = np.power(s, 2.0)
c = 0.5*np.sum(p)
return c
def gradient_descent(theta, features, labels, numSamples):
alpha = np.power(10.0, -2)
alpha = 2*np.divide(alpha, np.shape(features)[0])
ntheta = np.matrix([[0.0], [0.0], [0.0]])
hyp = hypothesis(theta, features)
ntheta[0, :] = theta[0, :] - alpha * np.sum(np.subtract(hyp, labels))
ntheta[1, :] = theta[1, :] - alpha * np.sum(np.multiply(np.subtract(hyp, labels), np.reshape(features[:, 0], (numSamples, 1))))
ntheta[2, :] = theta[2, :] - alpha * np.sum(np.multiply(np.subtract(hyp, labels), np.reshape(features[:, 1], (numSamples, 1))))
return ntheta
def main():
# read in the data
data = Data('data/training-plane.csv')
# initialise theta
theta = np.subtract(np.multiply(np.random.rand(3, 1), 2), 1)
# run iterations
for i in range(2000):
if i % 100 == 0:
print('The cost at iteration {} is {}'.format(i, cost(theta, data.features, data.labels)))
theta = gradient_descent(theta, data.features, data.labels, data.numSamples)
# calculate a prediction
y_pred = hypothesis(theta, data.features)
print('Writing output')
with open('output.txt', 'w') as f:
f.write('X1,X2,y,y_pred\n')
for i in range(np.shape(data.features)[0]):
f.write('{},{},{},{}\n'.format(data.features[i,0], data.features[i,1], np.asscalar(data.labels[i]), np.asscalar(y_pred[i])))
print(np.transpose(theta))
if __name__ == '__main__':
main()
| true |
93ce8ecfeffef83372b35a1c9680a63d655ef783 | Python | ynikitenko/lena | /lena/core/meta.py | UTF-8 | 1,306 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | # functions to deal with sequences
import lena.core
from . import lena_sequence
def alter_sequence(seq):
orig_seq = seq
seq = flatten(seq)
changed = False
if not isinstance(seq, lena_sequence.LenaSequence):
# an element
el = seq
if hasattr(el, "alter_sequence") and callable(el.alter_sequence):
return el.alter_sequence(el)
else:
return orig_seq
for ind in reversed(range(len(seq))):
el = seq[ind]
if hasattr(el, "alter_sequence") and callable(el.alter_sequence):
new_seq = el.alter_sequence(seq)
if new_seq == seq:
changed = False
else:
changed = True
new_seq = alter_sequence(new_seq)
if not changed:
return orig_seq
return seq
def flatten(seq):
flattened = []
flat = True
if not isinstance(seq, (lena_sequence.LenaSequence, tuple)):
# seq is an element
return seq
for el in seq:
if isinstance(el, lena_sequence.LenaSequence):
flattened.extend(flatten(el))
flat = False
else:
flattened.append(el)
if flat:
# return unchanged
return seq
else:
# return flattened list
return flattened
| true |
e083a8d066aa5deed9472590723e6d518511dbdb | Python | petedaws/game-of-life | /test_event.py | UTF-8 | 1,875 | 2.875 | 3 | [] | no_license | import socket
import time
import event
import struct
class TimerTest(event.Event):
def __init__(self,hellostring='helloworld'):
event.Event.__init__(self)
self.hellostring = hellostring
def hello(self):
print '%s: %f' % (self.hellostring,time.time())
def stop(self,timer_name):
print 'stopping timer: %s' % timer_name
event.modify_timer(timer_name,interval=0)
def modify_timer(self,timer_name,new_interval):
print 'modify timer: %s' % timer_name
event.modify_timer(timer_name,new_interval)
class SocketTest(event.Event):
def __init__(self,port=10000):
event.Event.__init__(self)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('', port))
mreq = struct.pack("4sl", socket.inet_aton('224.1.1.1'), socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def receive(self):
message = self.sock.recv(1024)
if message == 'execute':
self.emit('test',message)
if message.split('_')[0] == 'stop':
self.emit('stop',message.split('_')[1])
if message.split('_')[0] == 'modifytimer':
self.emit('modifytimer',message.split('_')[1],float(message.split('_')[2]))
def send(self,message=''):
print 'socket send: %s' % message
def main():
timer1 = TimerTest('timer1')
timer2 = TimerTest('timer2')
sock1 = SocketTest(10001)
sock2 = SocketTest(10002)
sock1.connect('test',sock2.send)
sock1.connect('stop',timer1.stop)
sock1.connect('stop',timer2.stop)
sock1.connect('modifytimer',timer1.modify_timer)
sock1.connect('modifytimer',timer2.modify_timer)
event.add_timer(1,timer1.hello,name='timer1')
event.add_timer(3,timer2.hello,name='timer2')
event.add_io_watcher(sock1.sock,sock1.receive)
event.add_io_watcher(sock2.sock,sock2.receive)
event.mainloop()
main()
| true |
1854cb797cc12dd3e5145e14213c840eb4cff8fa | Python | mateusfugita/GestaoFinanceira | /Lp/Perfil_Usuarios.py | UTF-8 | 3,677 | 2.703125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from sklearn.cluster import KMeans
import mysql.connector
import Metodos as metodos
import DAO as dao
log = open('log.txt', 'w')
log.close()
log = open('log.txt', 'r')
conteudo = log.readlines()
log.close()
conteudo.append('Processo iniciado \n')
try:
resultSelect = dao.BuscaDados()
conteudo.append('Dados obtidos com sucesso \n')
resultSelect = resultSelect.rename(columns={0: 'Data', 1: 'Valor', 2:'Id_Usuario', 3:'Id_Perfil'})
print(resultSelect)
ListaDatas = list(resultSelect['Data'])
ListaUsuarios = list(resultSelect['Id_Usuario'])
print(ListaDatas)
try:
print(ListaDatas)
resultSelect = resultSelect.drop('Data', axis=1)
resultSelect = resultSelect.drop('Id_Usuario', axis=1)
resultSelect['Data'] = ListaDatas
x = np.array(resultSelect)
print(resultSelect)
print(x)
conteudo.append('Treino preparado \n')
except:
conteudo.append('Falha ao preparar os dados \n')
try:
kmeans = KMeans(n_clusters=3, random_state=0)
kmeans.fit(x)
conteudo.append('Treino executado com sucesso \n')
print(kmeans.labels_)
print(type(kmeans.labels_))
except:
conteudo.append('Erro ao realizar o treino \n')
lista = list(kmeans.labels_)
lis = []
for i in lista:
if i == 0:
lis.append("Conservador")
if i == 1:
lis.append("Moderado")
if i == 2:
lis.append("Consumista")
resultSelect["Categoria"] = lis
resultSelect['Id_Usuario'] = ListaUsuarios
print(resultSelect)
Categoria = []
ListaUsuarios = metodos.Remove_repetidos(ListaUsuarios)
for i in metodos.Remove_repetidos(ListaUsuarios):
TabelaConservador = resultSelect.loc[
(resultSelect["Categoria"] == "Conservador") & (resultSelect["Id_Usuario"] == i), ["Categoria", "Id_Usuario"]]
TabelaModerado = resultSelect.loc[
(resultSelect["Categoria"] == "Moderado") & (resultSelect["Id_Usuario"] == i), ["Categoria", "Id_Usuario"]]
TabelaAgressivo = resultSelect.loc[
(resultSelect["Categoria"] == "Consumista") & (resultSelect["Id_Usuario"] == i), ["Categoria",
"Id_Usuario"]]
aux = [TabelaConservador.Categoria.count(), TabelaModerado.Categoria.count(), TabelaAgressivo.Categoria.count()]
print(aux)
if (max(aux) == aux[0]):
print('Usuario:', i, 'é Conservador')
Categoria.append(1)
if (max(aux) == aux[1]):
print('Usuario:', i, 'é Moderado')
Categoria.append(2)
if (max(aux) == aux[2]):
print('Usuario:', i, 'é Consumista')
Categoria.append(3)
print(ListaUsuarios)
print(Categoria)
Usuarios_Categoria = list(zip(ListaUsuarios, Categoria))
print(Usuarios_Categoria[0][0])
print(Usuarios_Categoria[0][1])
n = 0
try:
for i in Usuarios_Categoria:
dao.AtualizaTabela(Usuarios_Categoria[n][0], Usuarios_Categoria[n][1])
n += 1
conteudo.append('Update realizado com sucesso \n')
except:
conteudo.append('Erro no update \n')
except:
conteudo.append('Erro ao buscar os dados \n')
finally:
conteudo.append('Processo finalizado \n')
log = open('log.txt', 'w')
log.writelines(conteudo) # escreva o conteúdo criado anteriormente nele.
log.close()
| true |
0d1f57978032298a3ad814f5587002ee5050a0f0 | Python | kozistr/ML-Study | /CS20si/examples/04_word2vec_visualize.py | UTF-8 | 8,064 | 2.765625 | 3 | [
"MIT"
] | permissive | """ word2vec skip-gram model with NCE loss and
code to visualize the embeddings on TensorBoard
CS 20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Chip Huyen (chiphuyen@cs.stanford.edu)
Lecture 04
"""
import os
import numpy as np
from tensorflow.contrib.tensorboard.plugins import projector
import tensorflow as tf
import utils
import word2vec_utils
# Model hyper-parameters
VOCAB_SIZE = 50000
BATCH_SIZE = 128
EMBED_SIZE = 128 # dimension of the word embedding vectors
SKIP_WINDOW = 1 # the context window
NUM_SAMPLED = 64 # number of negative examples to sample
LEARNING_RATE = 1.0
NUM_TRAIN_STEPS = 100000
VISUAL_FLD = 'visualization'
SKIP_STEP = 5000
# Parameters for downloading data
DOWNLOAD_URL = 'http://mattmahoney.net/dc/text8.zip'
EXPECTED_BYTES = 31344016
NUM_VISUALIZE = 3000 # number of tokens to visualize
class SkipGramModel:
""" Build the graph for word2vec model """
def __init__(self, dataset, vocab_size, embed_size, batch_size, num_sampled, learning_rate):
self.vocab_size = vocab_size
self.embed_size = embed_size
self.batch_size = batch_size
self.num_sampled = num_sampled
self.lr = learning_rate
self.global_step = tf.get_variable('global_step', initializer=tf.constant(0), trainable=False)
self.skip_step = SKIP_STEP
self.dataset = dataset
def _import_data(self):
""" Step 1: import data
"""
with tf.name_scope('data'):
self.iterator = self.dataset.make_initializable_iterator()
self.center_words, self.target_words = self.iterator.get_next()
def _create_embedding(self):
""" Step 2 + 3: define weights and embedding lookup.
In word2vec, it's actually the weights that we care about
"""
with tf.name_scope('embed'):
self.embed_matrix = tf.get_variable('embed_matrix',
shape=[self.vocab_size, self.embed_size],
initializer=tf.random_uniform_initializer())
self.embed = tf.nn.embedding_lookup(self.embed_matrix, self.center_words, name='embedding')
def _create_loss(self):
""" Step 4: define the loss function """
with tf.name_scope('loss'):
# construct variables for NCE loss
nce_weight = tf.get_variable('nce_weight',
shape=[self.vocab_size, self.embed_size],
initializer=tf.truncated_normal_initializer(
stddev=1.0 / (self.embed_size ** 0.5)))
nce_bias = tf.get_variable('nce_bias', initializer=tf.zeros([VOCAB_SIZE]))
# define loss function to be NCE loss function
self.loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weight,
biases=nce_bias,
labels=self.target_words,
inputs=self.embed,
num_sampled=self.num_sampled,
num_classes=self.vocab_size), name='loss')
def _create_optimizer(self):
""" Step 5: define optimizer """
self.optimizer = tf.train.GradientDescentOptimizer(self.lr).minimize(self.loss,
global_step=self.global_step)
def _create_summaries(self):
with tf.name_scope('summaries'):
tf.summary.scalar('loss', self.loss)
tf.summary.histogram('histogram loss', self.loss)
# because you have several summaries, we should merge them all
# into one op to make it easier to manage
self.summary_op = tf.summary.merge_all()
def build_graph(self):
""" Build the graph for our model """
self._import_data()
self._create_embedding()
self._create_loss()
self._create_optimizer()
self._create_summaries()
def train(self, num_train_steps):
saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias
initial_step = 0
utils.safe_mkdir('checkpoints')
with tf.Session() as sess:
sess.run(self.iterator.initializer)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
# if that checkpoint exists, restore from checkpoint
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
writer = tf.summary.FileWriter('graphs/word2vec/lr' + str(self.lr), sess.graph)
initial_step = self.global_step.eval()
for index in range(initial_step, initial_step + num_train_steps):
try:
loss_batch, _, summary = sess.run([self.loss, self.optimizer, self.summary_op])
writer.add_summary(summary, global_step=index)
total_loss += loss_batch
if (index + 1) % self.skip_step == 0:
print('Average loss at step {}: {:5.1f}'.format(index, total_loss / self.skip_step))
total_loss = 0.0
saver.save(sess, 'checkpoints/skip-gram', index)
except tf.errors.OutOfRangeError:
sess.run(self.iterator.initializer)
writer.close()
def visualize(self, visual_fld, num_visualize):
""" run "'tensorboard --logdir='visualization'" to see the embeddings """
# create the list of num_variable most common words to visualize
word2vec_utils.most_common_words(visual_fld, num_visualize)
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
# if that checkpoint exists, restore from checkpoint
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
final_embed_matrix = sess.run(self.embed_matrix)
# you have to store embeddings in a new variable
embedding_var = tf.Variable(final_embed_matrix[:num_visualize], name='embedding')
sess.run(embedding_var.initializer)
config = projector.ProjectorConfig()
summary_writer = tf.summary.FileWriter(visual_fld)
# add embedding to the config file
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# link this tensor to its metadata file, in this case the first NUM_VISUALIZE words of vocab
embedding.metadata_path = 'vocab_' + str(num_visualize) + '.tsv'
# saves a configuration file that TensorBoard will read during startup.
projector.visualize_embeddings(summary_writer, config)
saver_embed = tf.train.Saver([embedding_var])
saver_embed.save(sess, os.path.join(visual_fld, 'model.ckpt'), 1)
def gen():
yield from word2vec_utils.batch_gen(DOWNLOAD_URL, EXPECTED_BYTES, VOCAB_SIZE,
BATCH_SIZE, SKIP_WINDOW, VISUAL_FLD)
def main():
dataset = tf.data.Dataset.from_generator(gen,
(tf.int32, tf.int32),
(tf.TensorShape([BATCH_SIZE]), tf.TensorShape([BATCH_SIZE, 1])))
model = SkipGramModel(dataset, VOCAB_SIZE, EMBED_SIZE, BATCH_SIZE, NUM_SAMPLED, LEARNING_RATE)
model.build_graph()
model.train(NUM_TRAIN_STEPS)
model.visualize(VISUAL_FLD, NUM_VISUALIZE)
if __name__ == '__main__':
main()
| true |
dc060f6e91896c411724aa2f5e4f81a286a336b9 | Python | Rohini05/Dynamic-Expression-based-Result-from-dictionaries | /test.py | UTF-8 | 600 | 3.734375 | 4 | [] | no_license | def search(arr, x):
y = list(x.split(" "))
print(y)
z=[]
for key, value in arr.items():
if key in y:
if len(y) == 1 or "or" in y :
print(value)
break;
else:
if "and" in y :
z.append(value)
string =''.join([str(elem) for elem in z])
print("hgh",string)
return -1
arr = {'A':'Hello', 'B': 'World', 'C': 'Buddy'}
x=input("Enter the expression:")
search(arr, x)
# print(x, "is present at index",
# search(arr, x)) | true |
9838c5a58e4f182ff18bfe3dacc31a597bdb15e9 | Python | FuGuishan/leetcode | /question150.py | UTF-8 | 989 | 2.8125 | 3 | [] | no_license | import math
class Solution(object):
#def isNumber(self,str):
def evalRPN(self, tokens):
stack=[]
op1=0
op2=0
for i in xrange(len(tokens)):
if tokens[i]=='+' or tokens[i]=='-' or tokens[i]=='*' or tokens[i]=='/':
op2=int(stack.pop())
op1=int(stack.pop())
if tokens[i]=='+':
stack.append(str(op1+op2))
elif tokens[i]=='-':
stack.append(str(op1-op2))
elif tokens[i]=='*':
stack.append(str(op1*op2))
else:
if op1*op2<0 and op1%op2!=0:
stack.append(str(int(op1/op2)+1))
else:
stack.append(str(int(op1/op2)))
print(str(int(op1/op2)))
else:
stack.append(tokens[i])
return int(stack.pop())
| true |
08681fa7656f3d4b8b384ef1c2b51d3b62bc804b | Python | microsoftgraph/msgraph-sdk-python | /msgraph/generated/models/meeting_participant_info.py | UTF-8 | 3,276 | 2.515625 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
from dataclasses import dataclass, field
from kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter
from kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .identity_set import IdentitySet
from .online_meeting_role import OnlineMeetingRole
@dataclass
class MeetingParticipantInfo(AdditionalDataHolder, BackedModel, Parsable):
# Stores model information.
backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)
# Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additional_data: Dict[str, Any] = field(default_factory=dict)
# Identity information of the participant.
identity: Optional[IdentitySet] = None
# The OdataType property
odata_type: Optional[str] = None
# Specifies the participant's role in the meeting.
role: Optional[OnlineMeetingRole] = None
# User principal name of the participant.
upn: Optional[str] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> MeetingParticipantInfo:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: MeetingParticipantInfo
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return MeetingParticipantInfo()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .identity_set import IdentitySet
from .online_meeting_role import OnlineMeetingRole
from .identity_set import IdentitySet
from .online_meeting_role import OnlineMeetingRole
fields: Dict[str, Callable[[Any], None]] = {
"identity": lambda n : setattr(self, 'identity', n.get_object_value(IdentitySet)),
"@odata.type": lambda n : setattr(self, 'odata_type', n.get_str_value()),
"role": lambda n : setattr(self, 'role', n.get_enum_value(OnlineMeetingRole)),
"upn": lambda n : setattr(self, 'upn', n.get_str_value()),
}
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
writer.write_object_value("identity", self.identity)
writer.write_str_value("@odata.type", self.odata_type)
writer.write_enum_value("role", self.role)
writer.write_str_value("upn", self.upn)
writer.write_additional_data_value(self.additional_data)
| true |
818b9cbc4d9a0c95ba2a947e9167f525d15ba6b4 | Python | etmwb/ml | /python/RF/RandomForest.py | UTF-8 | 1,157 | 3.375 | 3 | [] | no_license | import numpy as np
class RandomForest():
def __init__(self, n_trees, n_features, n_samples, depth=10, min_leaf=5):
r"""
Args:
n_trees (int): number of trees.
n_features (int): number of features used for each tree.
n_samples (int): number of training data used for each tree.
depth (int): depth of each tree.
min_leaf (int): minimum number of training data required to cause further split.
"""
self._n_trees, self._n_features, self._n_samples, self._depth, self._min_leaf = \
n_trees, n_features, n_samples, depth, min_leaf
def train(self, X, y):
np.random.seed(12)
self.X, self.y = X, y
self.trees = [self._create_tree() for i in range(self._n_trees)]
def _create_tree(self):
indices = np.random.permutation(len(self.y))[:self._n_samples]
feature_indices = np.random.permutation(self.X.shape[1])[:self._n_features]
return DecisionTree(self._n_features, feature_indices, idxs=np.array(range(self._n_samples)),
depth=self._depth, min_leaf=self._min_leaf).train(self.X[indices], self.y[indices])
def forward(self, X):
return np.mean([tree.forward(x) for tree in self.trees], axis=0)
| true |
ca4ccd8f60e4c467d8e84092d1d525623c5bf4ad | Python | code-afrique/bootcamps | /2019/GuessTheMovie.py | UTF-8 | 1,374 | 4.46875 | 4 | [] | no_license | import random
import string
# Guess the Movie Game
# This game simply allows you to guess a movie randomly picked
# from a list that have been compiled. You guess one character at a time
# to fill in spaces provided hinting the number of characters in the movie
# You only have 10 trails to win this game
# Enjoy and I hope you have fun
#This function loads the movie list into our python program
def movie_logic():
"""
the syntax below allows you to import the movie
txt file into our python program.
"""
#opens the movie file
file = open("movies.txt", "r")
#create an array and save the text file in an array
movie_array = []
#this loop was created to repeat the action within the loop till
#the loop satisfies its condition. Notice the use of range(start, end, step)
for i in range(0, 25, 1):
#reads the movie list
movies_file = file.readline()
movie_list = movies_file.split()
#adds the list to the movie_list_array
movie_array.append(movie_list)
return movie_array
MOVIE_STYLE = movie_logic()
print(MOVIE_STYLE)
def random_movie(movie_style):
"""
This function select a random movie from the list of
movies and then returns it
"""
random_movie = random.choice(movie_style)
print(random_movie)
return random_movie
random_movie(MOVIE_STYLE)
| true |
757ee3de42d67c367fdf14d0dabca7e0237a7492 | Python | smorzhov/adventofcode2017 | /03_spiral_memory.py | UTF-8 | 2,340 | 3.1875 | 3 | [] | no_license | import numpy as np
def find_closest(number, max_spiral_num):
"""Part 2"""
def count(a, i, j):
def try_get_value(i, j):
if len(a) > abs(i) and len(a[i]) > abs(j):
return a[i][j]
else:
return 0
return (try_get_value(i + 1, j) + try_get_value(i + 1, j + 1) +
try_get_value(i, j + 1) + try_get_value(i - 1, j + 1) +
try_get_value(i - 1, j) + try_get_value(i - 1, j - 1) +
try_get_value(i, j - 1) + try_get_value(i + 1, j - 1))
# lol should be enough
size = 2 * max_spiral_num + 1
zeros = np.zeros((size, size), dtype=np.int)
zeros[0][0] = 1
spiral_num = 0
j = 0
# No ckeck for index out of range
while True:
spiral_num += 1
i = spiral_num
side = (2 * spiral_num + 1)
for j in range(j, j + side - 1, 1):
zeros[spiral_num][j] = count(zeros, spiral_num, j)
if zeros[spiral_num][j] > number:
return zeros[spiral_num][j]
for i in range(i - 1, i - side, -1):
zeros[i][j] = count(zeros, i, j)
if zeros[i][j] > number:
return zeros[i][j]
for j in range(j - 1, j - side, -1):
zeros[i][j] = count(zeros, i, j)
if zeros[i][j] > number:
return zeros[i][j]
for i in range(i + 1, i + side, 1):
zeros[i][j] = count(zeros, i, j)
if zeros[i][j] > number:
return zeros[i][j]
def find_distance(number):
"""Part 1"""
if number == 1:
return 0
spiral_num = 0
while True:
spiral_num += 1
side = (2 * spiral_num + 1)
maximum = side * side
minimum = maximum - (4 * (side - 1)) + 1
if minimum <= number <= maximum:
anchor = minimum + (side - 1) / 2 - 1
for i in range(0, 4):
anchor += i * side - i
if anchor - (
(side - 1) / 2 - 1) <= number <= anchor + (side - 1) / 2:
return spiral_num + abs(anchor - number), spiral_num
def main():
distance, spiral_num = find_distance(265149)
print('Part 1: ' + str(distance))
print('Part 2: ' + str(find_closest(265149, spiral_num)))
if __name__ == '__main__':
main() | true |
f6b8eb98dd4ada191ad9d49570f5b2fdcd495c31 | Python | ElizabethKon/Lesson1 | /Lesson1 e2.py | UTF-8 | 494 | 3.6875 | 4 | [] | no_license | #Задача2
seconds = int(input('Введите число секунд '))
hour = seconds // 3600
minute = (seconds-hour*3600) // 60
second = seconds % 60
print(hour,'час',minute,'мин',second,'сек') #мне так проще смотреть ответ
print(f"{hour}:{minute}:{second}") #очевидная попытка попасть в формат
print("%02d:%02d:%02d" % (hour, minute, second)) #подсмотренное на форумах решение
| true |
775f1dce0e86c34589a56df14e7cda48515069f6 | Python | ameyaphatak88/Project_Facial_Expression | /Recognizer.py | UTF-8 | 685 | 2.546875 | 3 | [] | no_license | import cv2
from model import FacialExpressionModel
import numpy as np
font = cv2.FONT_HERSHEY_SIMPLEX
facec = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
model = FacialExpressionModel("model.json", "model_weights.h5")
def predict_and_rectangle(fr):
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for(x, y, w, h) in faces:
fc = gray_fr[y:y+h, x:x+w]
roi = cv2.resize(fc, (48, 48))
pred = model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
cv2.putText(fr, pred, (x, y), font, 1, (255, 255, 0), 2)
cv2.rectangle(fr,(x,y),(x+w,y+h),(255,0,0),2)
return fr | true |
d658f02b009ab0569c32862767423c692325b6bf | Python | Manuelpv17/flask_workshop | /controllers/students_controller.py | UTF-8 | 1,566 | 2.65625 | 3 | [] | no_license | from flask import render_template, Blueprint, request, jsonify
from models.student_model import students
controller = Blueprint("controller", __name__)
@controller.route("/")
def hello_world():
print(request.method)
print(request.url)
print(request.headers)
print(request.json)
# myResponse = Response(status=220, response="Hi",
# headers={"myheader": "hello"})
return "<h1>Hello {}</h1>".format(students[0]["name"]), 220, {"myheader": "hello"}
@ controller.route("/students")
def students_templete():
return render_template("students.html", students=students)
@ controller.route("/number/<string:id>")
def num(id):
return id
@ controller.route("/api/students", methods=["GET", "POST"], strict_slashes=False)
def students_constroller():
if request.method == "GET":
return jsonify(students)
elif request.method == "POST":
students.append(request.json)
return jsonify(students)
@ controller.route("/api/students/<int:id>", methods=["GET", "DELETE", "PUT"])
def student_controller(id):
if request.method == "GET":
for student in students:
if student["id"] == id:
return student
elif request.method == "DELETE":
for i in range(len(students)):
if students[i]["id"] == id:
students.pop(i)
return jsonify(students)
elif request.method == "PUT":
for i in range(len(students)):
if students[i]["id"] == id:
students[i] = request.json
return jsonify(students)
| true |
f3b2704d24cdc7188fa634d526a5a6a71a270ba8 | Python | djlad/patientrecordproject | /models/doctormodel.py | UTF-8 | 4,151 | 2.546875 | 3 | [] | no_license | import pymysql
from models.dbconnect import Dbconnect
from models.queries import queries
class DoctorModel(object):
def __init__(self):
pass
def add_doctor(self, doctorID, name, specialty, location):
'''
method to add doctor index tot database
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# create a new user index
sql = queries["Add Doctor"]
cursor.execute(sql, (doctorID, name, specialty, location))
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
finally:
connection.close()
return "add doctor success"
def change_doctor_info(self, name, specialty, location, doctorID):
'''
method to modify existing doctor index
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# create a new user index
sql = queries["Change Doctor"]
cursor.execute(sql, (name, specialty, location, doctorID))
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
finally:
connection.close()
return "change doctor success"
def get_doctor_info_by_name(self, name):
'''
method to get doctor information by name
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# get all prescriptions assigned to a patient
sql = queries["Get Doctor Info by Name"]
cursor.execute(sql, (name))
# get result of prescriptions query
result = cursor.fetchall()
finally:
connection.close()
return result
def get_doctor_info_by_specialization(self, specialization):
'''
method to get doctor information by specialization
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# get all prescriptions assigned to a patient
sql = queries["Get Doctor Info by Specialization"]
cursor.execute(sql, (specialization))
# get result of prescriptions query
result = cursor.fetchall()
finally:
connection.close()
return result
def get_doctor_info_list(self, limit=1000, offset=0):
'''
method to get a list of doctorInfo from offset to limit
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# get all patients within defined limit and offset
sql = queries["Get Doctor Info List"]
cursor.execute(sql, (limit, offset))
result = cursor.fetchall()
finally:
connection.close()
return result
def get_doctor_by_id(self, doctorID):
'''
method to get doctor information by doctorID
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# get all patients within defined limit and offset
sql = queries["Get Doctor by ID"]
cursor.execute(sql, (doctorID))
result = cursor.fetchone()
finally:
connection.close()
return result
def remove_doctor(self, doctorID):
'''
remove doctor from database
'''
connection = Dbconnect.get_connection()
try:
with connection.cursor() as cursor:
# get all prescriptions assigned to a patient
sql = queries["Remove Doctor"]
cursor.execute(sql, (doctorID))
connection.commit()
finally:
connection.close()
return "remove doctor success" | true |
e95eed4774ac1fae68e24d001f5d3437261df387 | Python | mattroy/DataMiningProject | /scripts/split_data.py | UTF-8 | 2,895 | 3.140625 | 3 | [] | no_license | #split_data.py
#
#The purpose of this script is to split the training data into a test and validation set.
#It will remove, articles with less than 10 references
#It also creates a ground truth file for the validation set.
#
#Data Mining Project
#CS6220 Fall 2014
#Team ELSAMAT
import sys
import os
import ConfigParser
import random
config = ConfigParser.ConfigParser()
config.read(sys.argv[1])
validationSize = config.get('split_data', 'validation_size')
trainingData = config.get('split_data', 'training_location')
outputLocation = config.get('split_data', 'output_location')
trainingLocation = config.get('split_data', 'training_output_location')
validationLocation = config.get('split_data', 'validation_location')
gtLocation = config.get('split_data', 'gt_location')
print "----------------------------------------------------------------------------"
print "- Load config from: ", sys.argv[1]
print "- Training data will be split into ", validationSize, "% validation and ", 100 - int(validationSize), "% training"
print "- Trainingg data is at ", trainingData
print "- Output is at ", outputLocation
print "----------------------------------------------------------------------------"
if not os.path.exists(outputLocation):
os.makedirs(outputLocation)
validationFile = open(os.path.join(outputLocation,validationLocation) , "w")
trainingFile = open(os.path.join(outputLocation, trainingLocation), "w")
gtFile = open(os.path.join(outputLocation, gtLocation), "w")
gtFile.write("Id,References\n")
sample = ""
validationGTLine = ""
gtCount = 0
badDataCount = 0
validationCount = 0
trainingCount = 0
sampleYear = 0
with open(trainingData) as f:
for line in f:
if line.startswith("#index"):
if sampleYear == 2012:
if(random.randint(1,100) < int(validationSize)):
validationFile.write(sample)
validationCount += 1
gtFile.write(validationGTLine + "\n")
else:
trainingFile.write(sample)
trainingCount += 1
gtCount = 0
sample = line
validationGTLine = line[7:-1] + ","
elif line.startswith("#%"):
if gtCount < 10:
validationGTLine += " " + line[3:-1]
gtCount += 1
sample += line
elif line.startswith("#t"):
sampleYear = line[3:].strip()
sample += line
else:
sample += line
print "- Removed ", badDataCount, " lines."
print "- Created ", validationCount, " validation samples"
print "- Created ", trainingCount, " training samples"
print "- At ", float(validationCount)/float(trainingCount) * 100, "% of validation files"
print "----------------------------------------------------------------------------" | true |
d44b958bba0a56e388b43d8f70089acf53be855e | Python | bluddy/pytorch_datasets | /pytorch_datasets/datasets/wcvp.py | UTF-8 | 701 | 2.59375 | 3 | [] | no_license | import glob
from tqdm import tqdm
import numpy as np
from pytorch_datasets.dataset import DataSet
class WCVP(DataSet):
def __init__(self, root):
self.dataset = []
for annotation_file in tqdm(glob.glob(root + "*/*.txt"),
ncols=100,
desc="Creating WCVP dataset"
):
xyz = open(annotation_file, "r").readlines()[3].split()
self.dataset.append({
'image_file': annotation_file.replace('txt', 'jpg'),
'orientation': np.arctan2(float(xyz[0]), float(xyz[1])) * 180. / np.pi,
'source': 'WCVP',
})
| true |
aebabad12ab9a4084ff4a0c7f5c460442688fa33 | Python | erikstein/django-markupfield | /markupfield/tests/tests.py | UTF-8 | 3,957 | 2.859375 | 3 | [] | no_license | r"""
>>> from django.core import serializers
>>> from markupfield.fields import MarkupField, Markup
>>> from markupfield.widgets import MarkupTextarea, AdminMarkupTextareaWidget
>>> from markupfield.tests.models import Post, Article, CustomArticle
# Create a few example posts
>>> mp = Post(title='example markdown post', body='**markdown**', body_markup_type='markdown')
>>> mp.save()
>>> rp = Post(title='example restructuredtext post', body='*ReST*', body_markup_type='ReST')
>>> rp.save()
## Basics ##
# Make sure verbose_name works
>>> mp._meta.get_field('body').verbose_name
'body of post'
# The body attribute is an instance of Markup
>>> mp.body.raw, mp.body.rendered, mp.body.markup_type
('**markdown**', u'<p><strong>markdown</strong></p>', 'markdown')
# Calling unicode on the Markup object gives the rendered string
>>> unicode(rp.body.rendered)
u'<p><em>ReST</em></p>\n'
# The data loads back from the database correctly and 'post' has the right type.
>>> p1 = Post.objects.get(pk=mp.pk)
>>> isinstance(p1.body, Markup)
True
>>> unicode(p1.body)
u'<p><strong>markdown</strong></p>'
## Assignment ##
# assignment directly to body
>>> rp.body = '**ReST**'
>>> rp.save()
>>> unicode(rp.body)
u'<p><strong>ReST</strong></p>\n'
# assignment to body.raw
>>> rp.body = '*ReST*'
>>> rp.save()
>>> unicode(rp.body)
u'<p><em>ReST</em></p>\n'
# assignment to rendered
>>> rp.body.rendered = 'this should fail'
Traceback (most recent call last):
...
AttributeError: can't set attribute
# assignment to body.type
>>> rp.body.markup_type = 'markdown'
>>> rp.save()
>>> rp.body.markup_type, unicode(rp.body)
('markdown', u'<p><em>ReST</em></p>')
## Serialization ##
# serialize to json
>>> stream = serializers.serialize('json', Post.objects.all())
>>> stream
'[{"pk": 1, "model": "tests.post", "fields": {"body": "**markdown**", "_body_rendered": "<p><strong>markdown</strong></p>", "body_markup_type": "markdown", "title": "example markdown post"}}, {"pk": 2, "model": "tests.post", "fields": {"body": "*ReST*", "_body_rendered": "<p><em>ReST</em></p>", "body_markup_type": "markdown", "title": "example restructuredtext post"}}]'
# deserialization
>>> obj = list(serializers.deserialize("json", stream))[0]
>>> obj.object == mp
True
## forms and formfields
# ensure that MarkupTextarea widget is used
>>> isinstance(MarkupField().formfield().widget, MarkupTextarea)
True
# ensure that MarkupTextarea shows the correct text
>>> from django.forms.models import modelform_factory
>>> ArticleForm = modelform_factory(Article)
>>> af = ArticleForm()
# ensure that a field with markup_type set does not have a field (non-editable)
>>> af.fields.keys()
['normal_field', 'default_field', 'normal_field_markup_type', 'markdown_field', 'default_field_markup_type']
# make sure that a markup_type field shows the correct choices
>>> af.fields['normal_field_markup_type'].choices
[('markdown', 'markdown'), ('ReST', 'ReST')]
# test default_markup_type
>>> af.fields['normal_field_markup_type'].initial is None
True
>>> af.fields['default_field_markup_type'].initial
u'markdown'
# test correct fields are used in ModelAdmin
# borrows from regressiontests/admin_widgets/tests.py
>>> from django.contrib import admin
>>> ma = admin.ModelAdmin(Post, admin.site)
>>> isinstance(ma.formfield_for_dbfield(Post._meta.get_field('body')).widget, AdminMarkupTextareaWidget)
True
## Custom Markup Classes ##
>>> complex_rest = "Title of the article\n====================\n\nA paragraph with an *emphasized text*.\n\n"
>>> a = CustomArticle(text=complex_rest)
>>> a.save()
>>> type(a.text)
<class 'markupfield.tests.markup.RestructuredtextMarkup'>
>>> a.text.rendered
u'<div class="section" id="title-of-the-article">\n<h2>Title of the article</h2>\n<p>A paragraph with an <em>emphasized text</em>.</p>\n</div>\n'
>>> a.text.plaintext()
u'Title of the article\n\nA paragraph with an emphasized text.'
>>> a.text.title()
u'Title of the article'
"""
| true |
076b8cef5b0d08552f04b9908d80a75ec6c62d7c | Python | yashraj9892/Network_manager | /Supportfiles/Login.py | UTF-8 | 1,702 | 2.640625 | 3 | [] | no_license | import urllib.request as req
import subprocess
import ssl
import urllib.parse as ub
ssl._create_default_https_context = ssl._create_unverified_context
def send_request(request_type, BASE_URL,arg):
if(request_type == 'login'):
params = ub.urlencode({'mode': 191, 'username': arg[0], 'password': arg[1]}).encode('utf-8')
elif(request_type == 'logout'):
params = ub.urlencode({'mode': 193, 'username': arg[0]}).encode('utf-8')
try:
response = req.urlopen(BASE_URL, params,timeout=2)
return response.read()
except Exception as e:
pass
def logout(username,url):
arg=[]
arg.append(username)
data = send_request('logout',url,arg)
return data
def main(value,username,password,url):
login_check= False
arg =[]
arg.append(username)
arg.append(password)
try:
if "Login" in value:
data = send_request("login",url,arg)
## print(data)
if None is data:
return "Request Failed, Please try again later"
elif b"maximum" in data:
return "MAXIMUM LIMIT"
elif b"exceeded" in data:
return "Data Exceeded"
elif b"could not" in data:
return "Incorrect username or password"
elif b"logged in" in data:
return "Logged in"
else:
return "Request Failed, Please try again later"
else:
data = logout(username,url)
return "Logged out Successfully!!"
except KeyboardInterrupt or Exception as e:
logout(username,url)
return "Logged out Successfully!!"
| true |
3ccb9ba2ebc33bb314a1dfa49baa5cbc2d597c35 | Python | xuldor/moos-ivp-reed | /src/Python/MOOSPlot_Box.py | UTF-8 | 6,919 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import time
import pymoos
from sys import argv
from atexit import register as reg
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
class MOOS_comms(object):
""" This class id for MOOS communications. It has 2 parts:
1. Initialize comms
a. Set the Timewarp
b. Connect to the server
2. Get new values.
"""
def __init__(self):
# Open a communication link to MOOS
self.comms = pymoos.comms()
self.X = []
self.Y = []
self.Index =[]
self.New_Pnt = []
self.New_Poly = []
def Register_Vars(self):
""" Register for the ID for the Tide Station.
"""
self.comms.register('NAV_X', 0)
self.comms.register('NAV_Y', 0)
self.comms.register('NEW_POINTS', 0)
self.comms.register('NEW_POLYS', 0)
self.comms.register('CYCLE_INDEX', 0)
return True
def Set_time_warp(self, timewarp):
if timewarp>1:
# Time Warp and Scaling factor constant
time_warp = timewarp
scaling_factor = 0.04*time_warp
# Set the timewarp and scale factor
pymoos.set_moos_timewarp(time_warp)
self.comms.set_comms_control_timewarp_scale_factor(scaling_factor)
def Initialize(self):
""" This function registers for the current X,Y, and heading and then
connects to the server.
"""
# Set timewarp
self.Set_time_warp(1)
# Register for desired variables
self.comms.set_on_connect_callback(self.Register_Vars)
# Connect to the server
self.comms.run('localhost',9000,'Plot_WPT')
def Get_mail(self):
""" Get the new value for the name of the tide station """
# Fetch the name of the tide station
info = self.comms.fetch()
# Store the tide station's name
for x in info:
if x.is_double():
if x.name()=='NAV_X':
self.X.append(x.double())
elif x.name()=='NAV_Y':
self.Y.append(x.double())
elif x.name()=='CYCLE_INDEX':
self.Index.append(x.double())
else:
if x.name()=='NEW_POLYS':
self.New_Poly.append(x.string())
elif x.name()=='NEW_POINTS':
self.New_Pnt.append(x.string())
class plot_MOOS(object):
def __init__(self, wptX, wptY):
self.needsInit = True
self.plots = {}
# Open a communication link to MOOS
self.init_MOOS()
self.data_items= []
self.data_items.append([])
self.data_items.append([])
self.data_items.append([])
self.data_items.append([])
self.data_items.append([])
self.start = time.time()
self.wptX = wptX
self.wptY = wptY
def init_MOOS(self):
""" Initializes the communication link to MOOS.
Ouputs:
self.comms - Communication link to MOOS
MOOS - Object to access the MOOSDB
"""
self.MOOS = MOOS_comms()
self.MOOS.Initialize()
# Need this time to connect to MOOS
time.sleep(.25)
self.comms = self.MOOS.comms
def updatePlot(self, pl, x, y, clear=False):
if clear:
plt.savefig('Polygons.pdf')
pl.set_xdata([])
pl.set_ydata([])
else:
pl.set_xdata(np.append(pl.get_xdata(), x))
pl.set_ydata(np.append(pl.get_ydata(), y))
def run(self):
while True:
try:
self.MOOS.Get_mail()
if len(self.MOOS.X) == 0 and len(self.MOOS.Y) == 0:
gotData=False
else:
gotData=True
if not self.needsInit and len(self.MOOS.New_Pnt)!=0:
# x=$(XPOS),y=$(YPOS),$(t_lvl),$(label)
parse = self.MOOS.New_Pnt[-1].split(',')
if len(parse) == 4:
ptX = parse[0].split('=')[1]
ptY = parse[1].split('=')[1]
self.plots[2], = plt.plot(ptX, ptY, 'm*', markersize =20)
self.MOOS.New_Pnt = []
if not self.needsInit and len(self.MOOS.New_Poly)!=0:
poly = np.reshape(np.array([0, 196, 0.772542, 196.196, 1.46946, 196.764, 2.02254, 197.649, 2.37764, 198.764, 2.5, 200, 2.37764, 201.236, 2.02254, 202.351, 1.46946, 203.236, 0.772542, 203.804, 2.44929e-16, 204, -0.772542, 203.804, -1.46946, 203.236, -2.02254, 202.351, -2.37764, 201.236, -2.5, 200, -2.37764, 198.764, -2.02254, 197.649, -1.46946, 196.764, -0.772542, 196.196, 0, 196]),[21,2]).T
self.plots[2], = plt.plot(poly[0,:], poly[1,:], 'm', markersize =10)
if len(self.MOOS.X) != 0:
x=(self.MOOS.X[-1])
self.MOOS.X = []
if len(self.MOOS.Y)!= 0:
y=(self.MOOS.Y[-1])
self.MOOS.Y = []
if len(self.MOOS.Index)!= 0:
# clear_plot=True
if not self.needsInit:
# self.updatePlot(self.plots[1],0,0,)
self.MOOS.Index = []
if self.needsInit:
if gotData:
self.plots[0], = plt.plot(self.wptX, self.wptY, '--', linewidth=4)
self.plots[1], = plt.plot(x,y, 'r', linewidth=1.5)
self.needsInit = False
else:
if gotData:
self.updatePlot(self.plots[1], x,y)
plt.pause(0.1)
fig = plt.gca()
fig.set_title('Simple Waypoint Mission')
fig.set_xlim([min(self.wptX)-10, max(self.wptX)+10])
fig.set_ylim([min(self.wptY)-20, max(self.wptY)+20])
except KeyboardInterrupt:
self.updatePlot(self.plots[1],0,0,True)
exit(1)
if __name__ == '__main__':
p = plot_MOOS([-75, 75, 75, -75, -75], [210, 210, 200, 200, 210])
p.run()
reg(p.updatePlot(p.plots[1],0,0,True))
# if len(argv)>1:
# tide = tide_prediction(argv[1])
# else:
# tide = tide_prediction()
# tide.run() | true |
9864a25ebcd407b5dd6398a1cdef48d3148d15a6 | Python | wwlaoxi/mri_liver_seg | /src/generate_heatmap.py | UTF-8 | 888 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python
# import libraries
import numpy as np
from scipy.stats import multivariate_normal
def generate_heatmap(xyz_location, xyz_limits, sigma):
"""
INPUTS:
xyz_location:
tuple of x, y, and z coordinates for centering heatmap[
xyz_limits:
tuple of x, y, and z coordinates for the map
sigma:
standard deviation
"""
# assemble bins
bins = np.indices([xyz_limits[0], xyz_limits[1], xyz_limits[2]])
pos = np.stack(bins, axis=-1)
# define covariance matrix
cov = [[sigma, 0, 0], [0, sigma, 0], [0, 0, sigma]]
# create prob density function
var = multivariate_normal(mean=xyz_location, cov=cov)
# normalize to 1
rtn_mtx = var.pdf(pos)
min_val = rtn_mtx.min()
max_val = rtn_mtx.max()
rtn_mtx = (rtn_mtx - min_val)/(max_val - min_val)
return rtn_mtx
| true |
3c0a511273bb48c7a4372f058a671881d4dfb667 | Python | a-gon/problem_solutions | /trees/sum_even_grandparent.py | UTF-8 | 502 | 2.984375 | 3 | [] | no_license | from TreeNode import TreeNode
class Solution:
def __init__(self):
self.result = 0
def sumEvenGrandparent(self, root: TreeNode) -> int:
if not root:
return 0
self.dfs(root, None, None)
return self.result
def dfs(self, root, p, gp):
if root:
if gp and gp.val % 2 == 0:
self.result += root.val
self.dfs(root.left, root, p)
self.dfs(root.right, root, p) | true |
bb04a93abdd4d612ca9164542f9ff8b277a66be6 | Python | ruby-kim/Math-AI | /4-RPI smart_MB.py | UTF-8 | 3,783 | 3.109375 | 3 | [] | no_license | import speech_recognition as sr
from random import *
import pyaudio
import time
from timeit import default_timer as timer
import os
# def make_mp3_file(mp3_file): # make .mp3 file
# mp3_file = mp3_file + '.wav'
# return str(mp3_file)
def make_words_speech(words, mp3_file): # use translator api query to get voice
# mp3_file = make_mp3_file(mp3_file)
mp3_file = mp3_file + '.wav'
mp3_file = "./" + mp3_file
label = words
sentence = str('wget -q -U Mozilla -O ' + mp3_file + ' "http://translate.google.com/translate_tts?ie=UTF-8&total=1&idx=0&textlen=32&client=tw-ob&q=' + label + '.&tl=en"')
os.system('sudo ' + sentence)
os.system('sudo omxplayer -o local ' + mp3_file)
os.remove(mp3_file)
def voice():
r = sr.Recognizer()
with sr.Microphone(device_index=1,sample_rate=16000,chunk_size=512) as source: # change the device_index according to index
r.pause_threshold = 0.7
start = timer()
audio = r.listen(source,timeout=None)
r.energy_threshold += 400
print(r.energy_threshold)
end = timer()
print(end - start)
try:
comm_words = {"own": "1", "on": "1", "won": "1", "to": "2", "too": "2","tree":"3", "for": "4", "fur": "4", "phor": "4",
"Ford":"4","fall":"4","fight":"5","pipe":"5","pip":"5","sex": "6", "zex": "6","zeban":"7",
"salmon":"7","ate":"8","eat":"8","night": "9","nyne":'9',"ben":"10","then":"10"}
valid = [str(i) for i in range(-50, 50)]
user = r.recognize_google(audio,language="en-US")
user = str(user).lower()
print(user)
if user in comm_words.keys():
num = comm_words[user]
return int(num)
elif user in valid:
return int(user)
else:
# print("please Say again ")
make_words_speech("please Say again ", mp3_file='')
return voice()
except sr.UnknownValueError :
# print("please Say again")
make_words_speech("please Say again ", mp3_file='')
return voice()
def math():
num1 = randint(1, 5)
num2 = randint(1, 5)
symbol = randint(1, 3)
if symbol == 1: # check for postive sign
print("What is " + str(num1) + "+" + str(num2) + "?" )
question = num1 + num2
make_words_speech("What is " + str(num1) + "plus" + str(num2) + "?" , mp3_file='')
answer = voice()
if int(question) == answer:
print("its Right Answer")
make_words_speech("its Right Answer", mp3_file='')
else:
print("its wrong Answer")
make_words_speech("its wrong Answer", mp3_file='')
elif symbol == 2: # check for Negative sign
print("What is " + str(num1) + "-" + str(num2) + "?" )
question = num1 - num2
make_words_speech("What is " + str(num1) + "minus" + str(num2) + "?" , mp3_file='')
answer = voice()
if int(question) == answer:
print("its Right Answer")
make_words_speech("its Right Answer", mp3_file='')
else:
print("its wrong Answer")
make_words_speech("its wrong Answer", mp3_file='')
elif symbol == 3: # check for Multiply sign
print("What is " + str(num1) + "*" + str(num2) + "?" )
question = num1 * num2
make_words_speech("What is " + str(num1) + "multiply" + str(num2) + "?", mp3_file='')
answer = voice()
if int(question) == answer:
print("its Right Answer")
make_words_speech("its Right Answer", mp3_file='')
else:
print("its wrong Answer")
make_words_speech("its wrong Answer", mp3_file='')
s=timer()
for i in range(5):
math()
e=timer()
print(e-s)
| true |
5ae9d72426b850ecadf1f74d446822aceebcd46e | Python | P-H-Pancholi/Python_practice_programs | /Hackerrank/GridChallenge1.py | UTF-8 | 515 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 16:45:57 2019
@author: Phalin Pancholi
"""
def isSorted(arr):
return arr == sorted(arr)
def gridChallenge(arr,n):
for i in range(n):
if not(isSorted(arr[i::n])):
return "NO"
return "YES"
i = int(input())
while i > 0:
n = int(input())
arr = []
for j in range(n):
temp_list = list(input())
temp_list.sort()
arr = arr + temp_list
print(gridChallenge(arr,n))
i -= 1
| true |
19492b3cd8635c22b17de9ccefd1df1c1463493e | Python | RodaRolla/1 | /Scripts/btask.py | UTF-8 | 939 | 3.5 | 4 | [] | no_license | m=['вася','петя','женя','коля','саша','жора']
f=['надя','маша','саша','света','женя','настя','гертруда']
l=m+f
def nomatch(list):
for i in list:
while list.count(i) > 1:
list.remove(i)
return list
def Two1():
matchList=[]
for i in f:
if i in m:
matchList.append(i)
else:
continue
return matchList
def Two2():
m2=['вася','петя','женя','коля','саша','жора']
f2=['надя','маша','саша','света','женя','настя','гертруда']
l=set(m+f)
for i in l:
if i in Two1():
m2.remove(i)
f2.remove(i)
else:
continue
return print ('не повторяющиеся мужские: \n',m),print ('не повторяющиеся женские: \n',f)
print('Повторяются: \n',Two1())
Two2()
print ('Список все имен: \n',nomatch(l))
print(m,f) | true |
2604bf74da24fe16e8704d862fee21f958e1cd90 | Python | bosscha/GASS | /GA_Subarray_Optimization/GA_Subarray_Selection/CASA_Sim/plotRMSbriggs.py | UTF-8 | 1,038 | 2.859375 | 3 | [
"MIT"
] | permissive | import pickle
import pylab as pl
f = open('c40-1.rms.brigss.pickle')
datac401 = pickle.load(f)
f.close()
f = open('c40-2.rms.brigss.pickle')
datac402 = pickle.load(f)
f.close()
f = open('c40-3.rms.brigss.pickle')
datac403 = pickle.load(f)
f.close()
f = open('c40-9.rms.brigss.pickle')
datac409 = pickle.load(f)
f.close()
x1 = datac401[0]
y1 = datac401[1]
print("Factor C40-1: %f"%(y1[62]/y1[-1]))
x2 = datac402[0]
y2 = datac402[1]
print("Factor C40-2: %f"%(y2[62]/y2[-1]))
x3 = datac403[0]
y3 = datac403[1]
print("Factor C40-3: %f"%(y3[62]/y3[-1]))
x9 = datac409[0]
y9 = datac409[1]
print("Factor C40-9: %f"%(y9[62]/y9[-1]))
# print("Factor RMS : %f \n"%(yy[62]/yy[-1]))
fig = pl.figure()
ax = fig.add_subplot(111)
ax.plot(x1, y1 /y1[-1],'k-')
ax.plot(x2, y2 /y2[-1],'g-')
ax.plot(x3, y3 /y3[-1],'r-')
ax.plot(x9, y9 /y9[-1],'c-')
#ax.set_xlim((-90.,40.))
#ax.set_ylim((0., 70.))
leg = ax.legend(('C40-1','C40-2','C40-3','C40-9'), 'upper right')
xx0 =[-2,2]
yy0 = [1.,1.]
ax.plot(xx0,yy0,'y--')
pl.show()
| true |
945dbe745342b7d98b0a61e33bd41cfee97c8a09 | Python | glennjw/Clinic-Appointment-Chatbot | /scripts/have_nurse.py | UTF-8 | 1,139 | 3 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import pymysql.cursors
from datetime import datetime
connection = pymysql.connect(host='IP',
user='clinicuser',
password='pwd',
db='DB_name')
cursor = connection.cursor()
def search_nurse(target):
if len(target.split()) == 1:
sql = "SELECT FirstName, LastName FROM nurses WHERE FirstName = '%s' OR LastName = '%s' "
cursor.execute(sql % (target, target ))
sql_result = cursor.fetchall()
else:
sql = "SELECT FirstName, LastName FROM nurses WHERE FirstName = '%s' AND LastName = '%s' "
cursor.execute(sql % (target.split()[0], target.split()[-1] ))
sql_result = cursor.fetchall()
sql_result_list = []
if len(sql_result) == 0:
return ('No %s found in database.' % target)
for i in range(len(sql_result)):
sql_result_list.append(sql_result[i][0]+ ' ' + sql_result[i][1])
result = ', '.join(sql_result_list)
return ('Yes, we have %d : %s' % ( len(sql_result),result ) )
result = search_nurse(sys.argv[1])
print(result)
| true |
7bb7e7be7a0c2304c6d50c13791349aed43cedc4 | Python | MaggieYear/PyProject | /ifpack/testif.py | UTF-8 | 178 | 3.0625 | 3 | [] | no_license | number = '464981323'
mynumber = input('please input your number:')
if mynumber == number :
print('I have money ,buy house,buy car')
else:
print('I loss it ,continue .') | true |
026f55547af78e65358975a901d909fceec1af41 | Python | leobouts/Skyline_top_k_queries | /b_top_k.py | UTF-8 | 1,584 | 3.203125 | 3 | [
"MIT"
] | permissive | from generators import *
import heapq
def generate_top_join_b(number_of_valid_lines):
# create the generators
generator_for_r1 = generate_next_r1(number_of_valid_lines)
generator_for_r2 = generate_next_r2(number_of_valid_lines)
r1_list = []
r2_list = {}
q = []
# read the males sorted file
try:
while True:
r2_tup = next(generator_for_r2)
age = r2_tup[1].replace(' ', '')
new_hash_list = []
# create a hash table with age as the key and a list of tuples as a value
if age in r2_list:
# if exists just add the new tuple
current_hash_list = r2_list[age]
current_hash_list.append(r2_tup)
new_hash_list = current_hash_list
else:
# else create the tuple list
new_hash_list.append(r2_tup)
# update the hash table
r2_list.update({age: new_hash_list})
except StopIteration:
pass
try:
while True:
r1_tup = next(generator_for_r1)
r1_list.append(r1_tup)
age = r1_tup[1].replace(' ', '')
# check if an age number does not exist in the file
if age not in r2_list:
continue
for tup in r2_list[age]:
f_sum = float(r1_tup[25]) + float(tup[25])
heapq.heappush(q, (-f_sum, (r1_tup, tup)))
except StopIteration:
pass
while True:
to_yield = heapq.heappop(q)
yield to_yield
| true |
0c0e6bb480efba443684f6cdd3ec024376197757 | Python | AvinEzio/Vaccination-App | /UserLogin/Login System/user_login.py | UTF-8 | 837 | 3.109375 | 3 | [] | no_license | import json
from os import access, read
def login():
with open("user.json","r") as x:
user = json.load(x)
for access in user:
username = access["name"]
password = access["phone number"]
def entry():
with open("user.json","r") as x:
for access in x:
username = ["name"]
password = ["phone number"]
def menu():
print("1.Login\n2.Sign Up\n3.Quit")
while True:
menu()
user_input = input("Enter A Number To Proceed: ")
if user_input == "1":
login()
if input("Enter Username: ") and input("Enter Password; ") == "Alex" and 1234:
print("Login Successful")
else:
print("login fail")
break
elif user_input == "2" or "3":
break
#*******************************************
| true |
fb3a3005db6f46b3f0fbeff26289959c2957c0d3 | Python | xcorter/purpleserver | /api/serializers.py | UTF-8 | 888 | 2.546875 | 3 | [] | no_license | from rest_framework import serializers
from api.models import Mark
class MarkSerializer(serializers.Serializer):
key = serializers.CharField(read_only=True, source='id')
message = serializers.CharField(required=True, allow_blank=False)
coordinate = serializers.CharField(required=True)
def create(self, validated_data):
message = validated_data.get('message', None)
coordinate = validated_data.get('coordinate', None)
return Mark.objects.create(message=message, coordinate=coordinate)
def update(self, instance, validated_data):
"""
Update and return an existing `Mark` instance, given the validated data.
"""
instance.message = validated_data.get('message', instance.message)
instance.coordinate = validated_data.get('coordinate', instance.coordinate)
instance.save()
return instance
| true |
d1414dab28655f11d4e5069ca8ae3936f8403086 | Python | rafaelperazzo/programacao-web | /moodledata/vpl_data/59/usersdata/149/47168/submittedfiles/testes.py | UTF-8 | 171 | 3.28125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
n=int(input('digite o valor de n:'))
j=0
soma=0
if n<0:
n=n*(-1)
for i in range(1,n+1,1):
s=i/(n-j)
soma=soma+s
j=j+1
print(soma) | true |
01ba12ea7883b4c64728573f19b114605263351e | Python | haddocking/pdb-tools | /setup.py | UTF-8 | 4,190 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
from os import listdir, path
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Collect names of bin/*py scripts
# e.g. 'pdb_intersect=bin.pdb_intersect:main',
binfiles = listdir(path.join(here, 'pdbtools'))
bin_py = [f[:-3] + '=pdbtools.' + f[:-3] + ':main' for f in binfiles
if f.endswith('.py')]
setup(
name='pdb-tools', # Required
version='2.5.0', # Required
description='A swiss army knife for PDB files.', # Optional
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional (see note above)
url='http://bonvinlab.org/pdb-tools', # Optional
author='Joao Rodrigues', # Optional
author_email='j.p.g.l.m.rodrigues@gmail.com', # Optional
license='Apache Software License, version 2',
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
#'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='bioinformatics protein structural-biology pdb', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(), # Required
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': bin_py,
},
# scripts=[path.join('bin', f) for f in listdir(path.join(here, 'bin'))],
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/haddocking/pdb-tools/issues',
'Source': 'https://github.com/haddocking/pdb-tools',
},
# Test Suite
test_suite='tests.test_all',
)
| true |
33c522ff3fe3f38d1d2223f59415130aa3bfa586 | Python | AdilsonVarela/Project_Big_Data-BA | /Test SAT.py | UTF-8 | 11,403 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Caso de estudio Test SAT
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import scale
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# # Estudio del test SAT
# Vamos a analizar un estudio que recogió datos de los resultados del test SAT (un test estándar
# que se utiliza ampliamente para admisiones en USA) por estados, combinados con otros datos.
# El test SAT se utiliza para medir el rendimiento de los estudiantes y, compararlo con el gasto en
# educación. La idea es la de tratar de entender si hay una relación entre el gasto y los resultados.
# En este caso, el estudio es de un sólo año y se compara entre los diferentes estados de Estados
# Unidos.
# Es importante resaltar que el tema del impacto del gasto en los resultados es un tema recurrente
# en política educativa, y genera debates sobre la verdadera incidencia del gasto. Hay quien
# sostiene que no es el gasto lo importante, sino cómo se gasta el dinero, mientras que otras
# posiciones tienden a intentar igualar el gasto en favor de la igualdad de oportunidades.
# # variables:
# state: Nombre de los estados;
# expend: Gasto actual por alumno, en promedio de asistencia diaria en escuelas públicas, primarias y secundarias, 1994-95 (en miles de dólares);
# ratio: Promedio del ratio alumno/maestro en escuelas de primaria y secundaria públicas, otoño 1994;
# salary: Salario anual promedio de maestros en escuelas públicas de primaria y secundaria entre 194-95, en miles de dólares; frac: Porcentaje de estudiantes elegibles que realizaron el SAT, 1994-95 verbal: Promedio en calificación verbal de la
# puntuación SAT, 1994-95;
# math: Promedio en matemáticas de la puntuación SAT, 1994-95;
# sat: Promedio total de la puntuación SAT, 1994-95;
#
# # Se pide:
# Analizar gráficamente si los scores del test tienen una escala similar y los componentes
# tienen relación;
# Ordenar los datos y obtener los estados que más (y menos) gastan y los que mejores (y
# peores) resultados obtienen;
# Analizar de manera intuitiva con gráficos si hay relación entre las variables que indican
# resultados (las del test SAT) y las variables que indican gasto;
# Utilizar correlaciones y un modelo lineal básico como exploración numérica de ese tipo
# de relación gasto/resultados. ¿Cuál es la conclusión aparente?
# In[3]:
df_sat =pd.read_table("sat.dat.txt",delim_whitespace=True)
df =pd.DataFrame(df_sat)
df
df.columns=['State','expend','ratio','salary','frac','verbal','math','sat']
# # Información de Datos
# In[4]:
df.info()
# # Verificar valor missing
# In[18]:
df.isna().sum()
# In[19]:
df.isnull().sum()
# In[4]:
# Normalización de la Datos
#df_norm=df[df.columns[[1,2,3,4,5,6,7]]]
#df_norm =df_norm.apply(minmax_scale)
#df_norm
# # Sumarización de las datos
# In[98]:
df.describe()
# # Analizar gráficamente si los scores del test tienen una escala similar e relación entre variables.
# In[ ]:
sns.distplot(df_norm['math'],kde=True)
# In[30]:
max_verbal=df['verbal'].max()
print("Max verbal=",max_verbal)
media_verbal=round(df['verbal'].mean(),)
print("Media Verbal=",media_verbal)
min_verbal=df['verbal'].min()
print("Min verbal=",min_verbal)
# In[29]:
max_math=df['math'].max()
print("Max math=",max_math)
media_math=round(df['math'].mean(),)
print("Media math=",media_math)
min_math=df['math'].min()
print("Min math=",min_math)
# In[30]:
max_sat=df['sat'].max()
print("Max sat=",max_sat)
media_sat=round(df['sat'].mean(),)
print("Media sat=",media_sat)
min_sat=df['sat'].min()
print("Min sat=",min_sat)
# # Análise Gráfica
# In[110]:
plot_hist=sns.distplot(df['verbal'],kde=True)
plot_hist.set(xlabel='Pontuación Verbal')
# In[108]:
plot_hist=sns.distplot(df['math'],kde=True)
plot_hist.set(xlabel='Pontuación Math')
# In[107]:
plot_hist=sns.distplot(df['sat'],kde=True)
plot_hist.set(xlabel='Pontuación Sat')
# In[92]:
verbal= df['verbal']
math=df['math']
sat=df['sat']
box_plot_data=[verbal,math,sat]
plt.boxplot(box_plot_data,patch_artist=True,labels=['Verbal','Math','Sat'])
plt.title("Relación entre Verbal Math y Sat variables")
plt.show()
# In[115]:
sns.scatterplot(x='verbal',y='math',data=df)
# In[116]:
sns.scatterplot(x='verbal',y='sat',data=df)
# In[117]:
sns.scatterplot(x='math',y='sat',data=df)
# #### Discusión:
# Tenga en cuenta que a través del histograma las variables verbal y math tienen la misma escala, mientras que esto no se nota para la variable sat.
# Hicimos un diagrama de caja para analizar mejor las 3 variables, notamos que los valores mínimo, promedio y máximo están más cerca entre las variables verbal y math que sat. También se debe tener en cuenta que el puntaje promedio de Sat en matemática es más alto que el puntaje verbal promedio de las notas pero por debajo del puntaje promedio total de Sat.
# La pantalla del histograma muestra que los valores están más concentrados o más cerca del valor mínimo para ambas 3 variables y también puede reforzar la visualización a través de un diagrama de caja que indica claramente que la línea mediana está más cerca de los valores mínimos. También descubrí que los datos no tienen una distribución normal, por lo que se deben elegir pruebas no paramétricas para futuros análisis.
#
# A través de gráficos de puntos, notamos que existe una relación muy fuerte entre el puntaje promedio de Sat en matemáticas con el puntaje promedio total Sat en relación con el puntaje promedio de Sat en matemáticas con el promedio verbal de puntajes y el promedio verbal de puntajes con el puntaje promedio total de Sat. A medida que el puntaje promedio total de sat aumenta, el puntaje promedio de Sat en matemáticas también crece significativamente.
# # Análisis de Estados con más y menos gasto por alumno
# In[170]:
df2=df.iloc[:,[0,1]]
df3=df2.sort_values(['State','expend']).sort_values("expend", ascending=False)
index =[x for x in range(1,len(df3)+1)]
df3['index']=index
df3.set_index('index',inplace = True)
df3
# In[161]:
df4=df.sort_values(['State','expend']).sort_values("expend", ascending=False)
# In[165]:
plt.figure(figsize=(15,5))
sns.set(style="whitegrid")
sns.set_color_codes("pastel")
g = sns.barplot(x="State", y="expend", data=df4, palette='Set1')
g.axes.set_title('Estados con más y menos gasto por alumno', fontsize=18,color="black",alpha=2)
g.set_xlabel("Satate", size = 12,color="black")
g.set_ylabel("Expend", size = 12,color="black")
sns.despine(left=True, bottom=True)
g.set_xticklabels(g.get_xticklabels(),rotation=65,horizontalalignment='right')
plt.show()
# ### Discusión:
#
# Observamos a través de la tabla y el análisis gráfico que los tres estados con más gasto actual por estudiante son New Jersey ,New Yourk y Alaska, mientras que el estado con el gasto más pequeño es Utah con un promedio de $ 3656.
# # Análisis de Estados con más y menos gasto por maestro
# In[174]:
df5=df.iloc[:,[0,3]]
df5=df5.sort_values(['State','salary']).sort_values("salary", ascending=False)
index =[x for x in range(1,len(df5)+1)]
df5['index']=index
df5.set_index('index',inplace = True)
df5
# In[176]:
df6=df.sort_values(['State','salary']).sort_values("salary", ascending=False)
# In[178]:
plt.figure(figsize=(15,5))
sns.set(style="whitegrid")
sns.set_color_codes("pastel")
g = sns.barplot(x="State", y="salary", data=df4, color='blue')
g.axes.set_title('Estados con más y menos gasto por maestro', fontsize=18,color="black",alpha=2)
g.set_xlabel("Estado", size = 12,color="black")
g.set_ylabel("Salario", size = 12,color="black")
sns.despine(left=True, bottom=True)
g.set_xticklabels(g.get_xticklabels(),rotation=65,horizontalalignment='right')
plt.show()
# #### Discusión:
# Observamos que a través del análisis tabular y gráfico, los mismos estados que tuvieron el mayor gasto en alonos son los que también tuvieron con los maestros, y South Dakota es el estado que gasta menos en los maestros con un promedio de $ 25,994.
# # Análisis de rendimiento estatal con SAT más alto y más bajo
# In[186]:
df6=df.iloc[:,[0,7]]
df6=df6.sort_values(['State','sat']).sort_values("sat", ascending=False)
index =[x for x in range(1,len(df5)+1)]
df6['index']=index
df6.set_index('index',inplace = True)
df6
# In[194]:
df7=df.sort_values(['State','sat']).sort_values("sat", ascending=False)
plt.figure(figsize=(15,5))
sns.set(style="whitegrid")
sns.set_color_codes("pastel")
g = sns.barplot(x="State", y="sat", data=df7,palette="BuGn_d")
g.axes.set_title('Rendimiento estatal con SAT más alto y más bajo', fontsize=18,color="black",alpha=2)
g.set_xlabel("Estado", size = 12,color="black")
g.set_ylabel("SAT", size = 12,color="black")
sns.despine(left=True, bottom=True)
g.set_xticklabels(g.get_xticklabels(),rotation=65,horizontalalignment='right')
plt.show()
# #### Discusión:
# Por increíble que parezca, notamos a través de la tabla y el análisis gráfico que aquí el escenario se ha revertido. Los estados que tenían más gastos en maestros y estudiantes tenían puntajes más bajos en sat, mientras que estados como Utah, south Dakota, North Dakota, que tenían menos gastos tanto en estudiantes como en maestros, tenían mejores puntajes por sat.
# In[260]:
df[['expend', 'sat']].plot(figsize=(15, 4), title='Relación entre gasto del alumno y puntaje Sat ', grid=True)
# In[259]:
df[['salary', 'sat']].plot(figsize=(15, 4), title='Relación entre salario y puntaje Sat ', grid=True)
# #### Discusión:
# Notamos a partir de estos dos gráficos que ambas líneas están muy aisladas entre sí, lo que justifica una relación débil entre las 3 variables. Pero calculemos la correlación entre ellos y las gráficas de la relación entre ellos para sacar una mejor conclusión.
# # Análisis de correlación
# In[253]:
round(df['expend'].corr(df['sat']),3)
# In[252]:
round(df['salary'].corr(df['sat']),3)
# In[5]:
round(df['salary'].corr(df['math']),3)
# In[6]:
round(df['expend'].corr(df['math']),3)
# In[7]:
round(df['expend'].corr(df['verbal']),3)
# In[8]:
round(df['salary'].corr(df['verbal']),3)
# In[11]:
lm = sns.lmplot('expend', 'sat', data=df, aspect=2.5, order=3)
lm.ax.set_xlabel('Expend')
# In[257]:
lm = sns.lmplot('salary', 'sat', data=df, aspect=2.5, order=3)
lm.ax.set_xlabel('Salary')
# In[12]:
lm = sns.lmplot('expend', 'math', data=df, aspect=2.5, order=3)
lm.ax.set_xlabel('Expend')
# In[13]:
lm = sns.lmplot('expend', 'verbal', data=df, aspect=2.5, order=3)
lm.ax.set_xlabel('Expend')
# #### Discusión:
# La correlación entre el gasto (salario del maestro, gasto promedio por alumno) y el puntaje Sat, verbal, matematica mostró una correlación negativa. Reforzamos la conclusión basada en el análisis gráfico que muestra claramente que cuanto mayor es el gasto, menor es el puntaje de Sat.
# ##### ------------------------------------------------------------------------ Fim------------------------------------------------------------------------------------------------------
# ###### Por: Adilson Varela
# ###### Ps: la escritura fue en portugués y traducida al español con google translate
# In[ ]:
| true |
cddf8eae03d3346db03b58b8eca5dbe48aef7974 | Python | hannuxx/experiments | /vortex.py | UTF-8 | 436 | 2.625 | 3 | [] | no_license | #!/usr/bin/python3
import socket
import struct
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("vortex.labs.overthewire.org", 5842))
ix = []
sum = 0
for i in range(4):
chunk = s.recv(4)
unp = struct.unpack('I', chunk)
ix.append(unp[0])
sum = sum + unp[0]
print('chunk: ', chunk, ' -- struct.unpack: ', unp)
print('Sum: ', sum, ' -- ix: ', ix)
s.send(struct.pack('I', sum))
chunk = s.recv(256)
print(chunk)
| true |
7762c923727b9536c086cc3780d0fa46d264f5c6 | Python | MandyMeindersma/BFEX | /web/bfex/components/key_generation/key_generation_approach.py | UTF-8 | 592 | 2.703125 | 3 | [] | no_license | from abc import ABC
import json
class KeyGenerationApproach(ABC):
""""Key Generation Approach Interface for subclasses to inherit from"""
def __init__(self, approach_id, description):
"""
:approach_id The assigned id for the algorithm
:description Brief description on the approach
"""
self.approach_id = approach_id
self.description = description
def generate_keywords(self,text):
""" keyword generation algorithm implementation"""
pass
def get_id(self):
""" The Approach id """
pass | true |
2dfbba78a64838b7b4b979c591976c6f378cea11 | Python | tesserata/PyVK | /objects/_attr_types.py | UTF-8 | 2,839 | 2.609375 | 3 | [] | no_license | '''
The file describes custom datatypes for VK object fields declaration. Each type
is defined by a function testing whether its argument matches the datatype. The
function has to be named `test_<type_alias>`, had a single argument, and return
values interpretable as Boolean.
'''
## Common types.
def test_intp(x):
'''
Positive integers.
'''
return type(x) is int and x > 0
def test_intpz(x):
'''
Positive integers and zero.
'''
return type(x) is int and x >= 0
def test_flag(x):
'''
Binary flag: {0, 1}.
'''
return type(x) is int and 0 <= x <= 1
## Group
def test_int_0_1_2(x):
'''
Integers {0, 1, 2}.
'''
return type(x) is int and 0 <= x <= 2
def test_int_1_2_3(x):
'''
Integers {1, 2, 3}.
'''
return type(x) is int and 1 <= x <= 3
def test_group_deactivated(x):
return x == 'deleted' or x == 'banned'
def test_group_type(x):
return x == 'group' or x == 'page' or x == 'event'
## User
def test_user_deactivated(x):
return x == 'deactivated' or x == 'banned'
def test_hidden(x):
return x == 1
def test_relation_int(x):
return x== 0 or x == 1 or x == 2 or x == 3 or x == 4 or x == 5 or x == 6 or x == 7
def test_int_1_2(x):
return x == 1 or x == 2
def test_int_0_1_2_3(x):
return x == 0 or x == 1 or x == 2 or x == 3
def test_occupation_type(x):
return x == 'work' or x == 'school' or x == 'unversity'
def test_political(x):
return x == 1 or x == 2 or x == 3 or x == 4 or x == 5 or x == 6 or x == 7 or x == 8 or x == 9
def test_people_main(x):
return x == 1 or x == 2 or x == 3 or x == 4 or x == 5 or x == 6
def test_life_main(x):
return x == 1 or x == 2 or x == 3 or x == 4 or x == 5 or x == 6 or x == 7 or x == 8
def test_view(x):
return x == 1 or x == 2 or x == 3 or x == 4 or x == 5
## Message
def test_push_settings(x):
return x == 'sound' or x == 'disabled_until'
def test_action_str(x):
return x == 'chat_photo_update' or x == 'chat_photo_remove' or x == 'chat_create' or x == 'chat_title_update' or x == 'chat_invite_user' or x == 'chat_kick_user'
def test_int1(x):
return x == 1
## Attachments
def test_attach_type(x):
return (x == 'photo' or x == 'video' or x == 'audio' or x == 'doc' or x == 'wall' or x == 'wall_reply' or x == 'sticker'
or x == 'posted_photo' or x == 'graffiti' or x == 'link' or x == 'note' or x == 'app' or x == 'poll' or x == 'page'
or x == 'album' or x == 'photos_list')
## Privacy
def test_privacy_str(x):
return x == 'all' or x == 'friends' or x == 'friends_of_friends'
def test_privacy_int(x):
return type(x) is int and x != 0
def test_privacy_list(x):
try:
a = int(x[4:])
except ValueError:
return False
else:
return x[:4] == 'list'
| true |
cc9662c81100f7bca8d6cdffcbd85ff9e1e8f69b | Python | perrystallings/newbot | /app/bot/conversations/create_project/material_menu.py | UTF-8 | 3,760 | 2.84375 | 3 | [] | no_license | def respond(sender_id, message_text, attachment_type, attachment_url, postback, quick_reply, context):
from bot.models import Material
from bot.lib.maker import get_maker_id
from .utilities import format_supply_carousel, send_materials
"""Takes in ``sender_id``, ``message_text``= add or select, ``quick_reply`` = add or select
``context``= project id and updates project and sends a reponse.
:param str sender_id: The unique id created by facebook and the current facebook's sender's ID
:param str quick_reply: an automatic reply
:param str message_text: Any text written in the chat interface
:param dict context: attributes sent between conversations
:param str attachment_type: dentifies attachment type i.e photo (optional, defaults to None)
:param str attachment_url: The location of the attachment (optional, defaults to None)
:param str postback: a reponse sent from the user clicking a button (optional, defaults to None)
:returns: ``reponse``a dict with the next message to move the conversation
``new_context`` context project id, and ``coverstation`` dict containing
the next stage and task for the the bot
"""
action = 'add'
if (quick_reply and 'select' in quick_reply.lower()) or (message_text and 'select' in message_text.lower()):
action = 'select'
conversation = dict(name='create_project', stage='{0}_material'.format(action))
new_context = context
if action == 'select':
maker_id = get_maker_id(sender_id=sender_id)
materials = Material.objects.filter(maker_id=maker_id)
if materials.count() <= 2:
response = dict(attachment=format_supply_carousel(
supply_query_set=materials))
elif materials.count() > 2:
response = send_materials(sender_id=sender_id)
else:
response = dict(message_text='Take a photo of your material')
return response, new_context, conversation
def validate(sender_id, message_text, attachment_type, postback, quick_reply):
"""Boolean takes in ``message_text``= add or select or ``postback`` == add or select
and determines if the message type and text is valid.
:param str sender_id: The unique id created by facebook and the current facebook's sender's ID
:param str quick_reply: an automatic reply
:param str message_text: Any text written in the chat interface (optional, defaults to None)
:param str attachment_type: Identifies attachment type i.e photo (optional, defaults to None)
:param str postback: a reponse sent from the user clicking a button (optional, defaults to None)
:returns: Booleen and a dict with message text and quick replies if the message is not valid """
if quick_reply and quick_reply.lower() in ['add_material', 'select_material']:
return True, dict()
elif message_text and (
'add' in message_text.lower() or 'select' in message_text.lower()) and 'material' in message_text.lower():
return True, dict()
else:
return False, dict(message_text="I'm sorry, did you want to add a new material or select an existing one?",
quick_replies=[
{
"content_type": "text",
"title": "New Material",
"payload": "ADD_MATERIAL",
},
{
"content_type": "text",
"title": "Select Material",
"payload": "SELECT_MATERIAL",
},
]
)
| true |
f8edc562327c500a9aada9c8462b75f4522a4070 | Python | qingqing01/droidlet | /craftassist/sentry/pull_logs.py | UTF-8 | 1,788 | 2.6875 | 3 | [
"MIT"
] | permissive | import argparse
import os
import requests
def pull_logs(org, project, status, keyword):
print("Pulling logs...")
auth_token = os.getenv("SENTRY_AUTH_TOKEN")
url = "https://sentry.io/api/0/projects/{}/{}/issues/".format(org, project)
params = {"query": "{} {}".format(status, keyword)}
headers = {"Authorization": "Bearer {}".format(auth_token)}
result = []
response = requests.get(url, params=params, headers=headers)
result.extend(response.json())
link = response.headers.get("Link")
while 'rel="next"; results="true"' in link:
print("Pulling logs...")
start = link.find(", <") + 3
end = link.find('>; rel="next"; results="true";')
next_link = link[start:end]
response = requests.get(next_link, headers=headers)
result.extend(response.json())
link = response.headers.get("Link")
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--status",
default="is:unresolved",
help="status of issues, can be ['is:unresolved', 'is:resolved', 'is:ignored', 'is:assigned', 'is:unassigned']",
)
parser.add_argument("--org", default="craftassist", help="sentry organization slug")
parser.add_argument("--project", default="craftassist", help="sentry project slug")
parser.add_argument("--keyword", default="", help="search query keyword")
parser.add_argument("--save_to", default="", help="search result save path")
args = parser.parse_args()
result = pull_logs(args.org, args.project, args.status, args.keyword)
with open(args.save_to, "w") as f:
for e in result:
f.write(str(e))
f.write("\n")
| true |
3b9ff5c0e59badb92bfbd91b09b4fd7b215530e8 | Python | maxisses/family-dashboard | /imageprocessing/test.py | UTF-8 | 274 | 2.71875 | 3 | [] | no_license | # link prediction to label
f=open("tf-model/labels.txt", "r")
contents=f.readlines()
f.close()
final_labels = []
for i in range(len(contents)):
split_line = contents[i].split(" ")
final_labels.append(split_line)
print(type(final_labels))
print(final_labels[0][1])
| true |
2e6e71f9f5bbc21561b74584840c0e3245a205cb | Python | Neelesh1121/Representation-Learning-for-Information-Extraction | /utils/vocabulary.py | UTF-8 | 1,086 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | from collections import Counter
import warnings
from utils import str_utils
class VocabularyBuilder():
"""Vocabulary builder class to generate vocabulary."""
def __init__(self, max_size = 512):
self._words_counter = Counter()
self.max_size = max_size
self._vocabulary = { '<PAD>':0, '<NUMBER>':1, '<RARE>':2 }
self.built = False
def add(self, word):
if not str_utils.is_number(word):
self._words_counter.update([word.lower()])
def build(self):
for word, count in self._words_counter.most_common(self.max_size):
self._vocabulary[word] = len(self._vocabulary)
print(f"Vocabulary of size {len(self._vocabulary)} built!")
self.built = True
return self._vocabulary
def get_vocab(self):
if not self.built:
warnings.warn(
"The vocabulary is not built. Use VocabularyBuilder.build(). Returning default vocabulary.", Warning)
return self._vocabulary
else:
return self._vocabulary
| true |
a2e3abdaf449da84d06d32cdd0fae24666114f99 | Python | StephenDeSalvo/CombinatorialProbability | /combinatorics/CombinatorialStructure.py | UTF-8 | 522 | 3.265625 | 3 | [] | no_license |
class CombinatorialStructure:
def __init__(self, object_reference):
self.object_reference = object_reference
def initialize(self, combinatorial_object):
self.combinatorial_object = combinatorial_object
def __iter__(self):
return self
def __next__(self, **kwargs):
self.combinatorial_object, flag = self.object_reference.next_object(self.combinatorial_object, **kwargs)
if flag:
raise StopIteration()
return self.combinatorial_object
| true |
9f63e79a8329ef3eea91a21a0c3a29e1bae57c62 | Python | no2key/lets-encrypt-preview | /letsencrypt/acme/jose_test.py | UTF-8 | 3,773 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | """Tests for letsencrypt.acme.jose."""
import pkg_resources
import unittest
import Crypto.PublicKey.RSA
RSA256_KEY = Crypto.PublicKey.RSA.importKey(pkg_resources.resource_string(
'letsencrypt.client.tests', 'testdata/rsa256_key.pem'))
RSA512_KEY = Crypto.PublicKey.RSA.importKey(pkg_resources.resource_string(
'letsencrypt.client.tests', 'testdata/rsa512_key.pem'))
class JWKTest(unittest.TestCase):
"""Tests fro letsencrypt.acme.jose.JWK."""
def setUp(self):
from letsencrypt.acme.jose import JWK
self.jwk256 = JWK(key=RSA256_KEY.publickey())
self.jwk256json = {
'kty': 'RSA',
'e': 'AQAB',
'n': 'rHVztFHtH92ucFJD_N_HW9AsdRsUuHUBBBDlHwNlRd3fp5'
'80rv2-6QWE30cWgdmJS86ObRz6lUTor4R0T-3C5Q',
}
self.jwk512 = JWK(key=RSA512_KEY.publickey())
self.jwk512json = {
'kty': 'RSA',
'e': 'AQAB',
'n': '9LYRcVE3Nr-qleecEcX8JwVDnjeG1X7ucsCasuuZM0e09c'
'mYuUzxIkMjO_9x4AVcvXXRXPEV-LzWWkfkTlzRMw',
}
def test_equals(self):
self.assertEqual(self.jwk256, self.jwk256)
self.assertEqual(self.jwk512, self.jwk512)
def test_not_equals(self):
self.assertNotEqual(self.jwk256, self.jwk512)
self.assertNotEqual(self.jwk512, self.jwk256)
def test_to_json(self):
self.assertEqual(self.jwk256.to_json(), self.jwk256json)
self.assertEqual(self.jwk512.to_json(), self.jwk512json)
def test_from_json(self):
from letsencrypt.acme.jose import JWK
self.assertEqual(self.jwk256, JWK.from_json(self.jwk256json))
# TODO: fix schemata to allow RSA512
#self.assertEqual(self.jwk512, JWK.from_json(self.jwk512json))
# https://en.wikipedia.org/wiki/Base64#Examples
B64_PADDING_EXAMPLES = {
'any carnal pleasure.': ('YW55IGNhcm5hbCBwbGVhc3VyZS4', '='),
'any carnal pleasure': ('YW55IGNhcm5hbCBwbGVhc3VyZQ', '=='),
'any carnal pleasur': ('YW55IGNhcm5hbCBwbGVhc3Vy', ''),
'any carnal pleasu': ('YW55IGNhcm5hbCBwbGVhc3U', '='),
'any carnal pleas': ('YW55IGNhcm5hbCBwbGVhcw', '=='),
}
B64_URL_UNSAFE_EXAMPLES = {
chr(251) + chr(239): '--8',
chr(255) * 2: '__8',
}
class B64EncodeTest(unittest.TestCase):
"""Tests for letsencrypt.acme.jose.b64encode."""
@classmethod
def _call(cls, data):
from letsencrypt.acme.jose import b64encode
return b64encode(data)
def test_unsafe_url(self):
for text, b64 in B64_URL_UNSAFE_EXAMPLES.iteritems():
self.assertEqual(self._call(text), b64)
def test_different_paddings(self):
for text, (b64, _) in B64_PADDING_EXAMPLES.iteritems():
self.assertEqual(self._call(text), b64)
def test_unicode_fails_with_type_error(self):
self.assertRaises(TypeError, self._call, u'some unicode')
class B64DecodeTest(unittest.TestCase):
"""Tests for letsencrypt.acme.jose.b64decode."""
@classmethod
def _call(cls, data):
from letsencrypt.acme.jose import b64decode
return b64decode(data)
def test_unsafe_url(self):
for text, b64 in B64_URL_UNSAFE_EXAMPLES.iteritems():
self.assertEqual(self._call(b64), text)
def test_input_without_padding(self):
for text, (b64, _) in B64_PADDING_EXAMPLES.iteritems():
self.assertEqual(self._call(b64), text)
def test_input_with_padding(self):
for text, (b64, pad) in B64_PADDING_EXAMPLES.iteritems():
self.assertEqual(self._call(b64 + pad), text)
def test_unicode_with_ascii(self):
self.assertEqual(self._call(u'YQ'), 'a')
def test_non_ascii_unicode_fails(self):
self.assertRaises(ValueError, self._call, u'\u0105')
def test_type_error_no_unicode_or_str(self):
self.assertRaises(TypeError, self._call, object())
if __name__ == '__main__':
unittest.main()
| true |
725ca4d0c8838ecd02005c6d6eb69becc2b12d14 | Python | fishbigger/Python | /LibrarySystemV1.py | UTF-8 | 5,576 | 2.703125 | 3 | [] | no_license | from tkinter import *
def Return():
titleEntry.grid_forget()
authorEntry.grid_forget()
barcodeEntry.grid_forget()
addBookButton.grid_forget()
titleLabel.grid_forget()
authorLabel.grid_forget()
barcodeLabel.grid_forget()
takeOutButton2.grid_forget()
quitButton.grid_forget()
yearLabel.destroy()
yearEntry.grid_forget()
takeOutLabel.grid_forget()
takeOutButton3.grid_forget()
searchButton.grid_forget()
barcodeEntry2.grid_forget()
printData.grid_forget()
newBookButton.grid(row = 0)
takeOutButton.grid(row = 1)
getDataButton.grid(row = 2)
def search():
searchButton.grid_forget()
barcodeEntry.grid_forget()
barcodeLabel.grid_forget()
quitButton.grid(row = 0)
barcode = barcodeEntry.get()
if barcode in library:
if library[barcode]['in']:
takeOutLabel.grid(row = 2, column = 0)
takeOutButton2.grid(row = 0, column = 2)
else:
Return()
def finalyTakeOut():
bookBarcode = barcodeEntry.get()
personBarcode = barcodeEntry2.get()
library[bookBarcode]['in'] = False
library[bookBarcode]['who_got_it'] = personBarcode
library[bookBarcode]['year'] = yearEntry.get()
setData()
def takeOut():
newBookButton.grid_forget()
takeOutButton.grid_forget()
getDataButton.grid_forget()
#takeOutLabel.grid_forget()
quitButton.grid(row = 1)
searchButton.grid(row = 1, column = 1)
barcodeEntry.grid(row = 0, column = 1)
barcodeLabel.grid(row = 0, column = 0)
def takeOutBook():
takeOutButton2.grid_forget()
takeOutLabel.grid_forget()
barcodeEntry2.grid(row = 1, column = 1)
barcodeLabel.grid(row = 1, column = 0)
yearEntry.grid(row = 2, column = 1)
yearLabel.grid(row = 2, column = 0)
takeOutButton3.grid(row = 0, column = 1)
def setData():
Return()
with open('library.txt', 'w'): pass
f = open('Library.txt', 'r+')
f.write( str(library) )
f.close()
def addBook(Author, Title):
book = {}
book['Author'] = Author
book['Title'] = Title
book['in'] = True
book['who_got_it'] = None
book['year'] = None
book['name'] = None
return book
def newBookEntry():
newBookButton.grid_forget()
takeOutButton.grid_forget()
getDataButton.grid_forget()
titleLabel.grid(row = 0)
authorLabel.grid(row = 1)
barcodeLabel.grid(row = 2)
quitButton.grid(row = 3)
titleEntry.grid(row = 0, column = 1)
authorEntry.grid(row = 1, column = 1)
barcodeEntry.grid(row = 2, column = 1)
addBookButton.grid(row = 3, column = 1)
def getData():
f = open('Library.txt', 'r+')
file = f.read()
library = eval(file)
return library
def addBook():
library[barcodeEntry.get()] = addBook(authorEntry.get(), titleEntry.get())
setData()
def askData():
newBookButton.grid_forget()
takeOutButton.grid_forget()
getDataButton.grid_forget()
yearLabel.grid(column = 0)
yearEntry.grid(column = 1, row = 0)
printButton.grid(column = 1, row = 1)
def Print():
global toPrint
yearLabel.grid_forget()
yearEntry.grid_forget()
printButton.grid_forget()
stringList = []
for barcode in library:
if library[barcode][year] == yearEntry.get():
string = library[barcode][who_got_it] + '-'
string += library[barcode][name] + '-'
string += library[barcode][Title]
master = Tk()
master.title('librarySystem')
#master.config(bg = '#006400')
global library
library = getData()
global newBookButton
global takeOutButton
global getDataButton
newBookButton = Button(master, text = 'NewBook', command = newBookEntry)
takeOutButton = Button(master, text = 'TakeOut', command = takeOut)
getDataButton = Button(master, text = 'GetData', command = askData)
newBookButton.grid(row = 0)
takeOutButton.grid(row = 1)
getDataButton.grid(row = 2)
global titleEntry
global authorEntry
global barcodeEntry
global barcodeEntry2
global takeOutEntry
global yearEntry
global addBookButton
global searchButton
global quitButton
global takeOutButton2
global takeOutButton3
global printButton
global titleLabel
global authorLabel
global barcodeLabel
global takeOutLabel
global yearLabel
titleEntry = Entry(master)
authorEntry = Entry(master)
barcodeEntry = Entry(master)
barcodeEntry2 = Entry(master)
yearEntry = Entry(master)
titleLabel = Label(master, text = 'Title: ')
authorLabel = Label(master, text = 'Author: ')
barcodeLabel = Label(master, text = 'Barcode: ')
takeOutLabel = Label(master, text = 'Would you like to take it out?')
yearLabel = Label(master, text = 'Year: ')
addBookButton = Button(master, text = 'AddBook', command = addBook)
searchButton = Button(master, text = 'Search', command = search)
quitButton = Button(master, text = 'Quit', command = Return)
takeOutButton2 = Button(master, text = 'takeOut?', command = takeOutBook)
takeOutButton3 = Button(master, text = 'takeOut?', command = finalyTakeOut)
printButton = Button(master, text = 'printData?', command = Print)
#while True:
# f = open('Library.txt', 'r+')
# file = f.read()
# library = eval(file)
#
# barcode = input('Barcode: ')
# if barcode in library:
# print('Title: {0}'.format(library[barcode]['Title']))
# print('Author: {0}'.format(library[barcode]['Author']))
# else:
# print('Book not in library!')
#
# with open('library.txt', 'w'): pass
# f = open('Library.txt', 'r+')
# f.write( str(library) )
# f.close()
| true |
f836f9c3095603773dd02d35307711ad088632e3 | Python | nuaa/bid_test | /ocpx/pid_simulator.py | UTF-8 | 1,404 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @Version : 1.0
# @Time : 2019-10-15
# @Author : bading
# @File : pid_simulator.py
kp = 0.78
ki = 0.05
kd = 0.35
pid_err_hist = [0, 0, 0]
last_pid = 21371
def pid_core(set_val, act_val):
pid_err_hist[0] = set_val - act_val
print("curr pid err hist: {} ...".format(pid_err_hist))
inc = kp * pid_err_hist[0] - ki * pid_err_hist[1] + kd * pid_err_hist[2]
pid_err_hist[2] = pid_err_hist[1]
pid_err_hist[1] = pid_err_hist[0]
return inc
def get_new_price(consume, click):
act_val = consume / 1000000.0 / (click + 0.0001) * 10000
pid_inc = pid_core(10000, act_val)
if abs(pid_inc) >= 30000:
pid_inc = pid_inc/abs(pid_inc)*30000
curr_pid = last_pid + pid_inc
print("curr cpc: {}, last pid: {}, curr pid inc: {}, curr pid: {} ...".format(
act_val, last_pid, pid_inc, curr_pid))
return curr_pid
if __name__ == "__main__":
from utils.io_util import read_file
# rt_hist = [(7834500, 1084, 6), (16241410, 1649, 8), (24330900, 2234, 16)]
rt_hist = read_file("../data/pid_test", [int, int, int], False)
counter = 0
for tmp_rt in rt_hist:
counter += 1
if counter == 20:
# break
last_pid = 20
print("=======================================")
last_pid = get_new_price(tmp_rt[2], tmp_rt[1])
# print(last_pid)
| true |
4ae4a81ea72caeb2cc803c4d9ee6be8ee8dd1311 | Python | Tabish-99/Face-Recognition | /KNN face train.py | UTF-8 | 611 | 2.59375 | 3 | [] | no_license | from sklearn.neighbors import KNeighborsClassifier
import face_recognition as fr
import os
import pickle
import json
fp = './face_data.json'
K = 3
model = KNeighborsClassifier(n_neighbors=K,algorithm='ball_tree',leaf_size=10)
X = []
Y = []
with open(fp,'r') as f:
lod = json.load(f)
for fd in lod:
name = fd['name']
print(f'Found face encodings for {name}')
for enc in fd['encodings']:
X.append(enc)
Y.append(name)
print('Fitting Model:')
model.fit(X,Y) #TODO: mini batch training or smthin
with open('knn_face_clf','wb') as f:
pickle.dump(model,f)
print('Done!')
| true |
469244163b9e2364b4b723a40110d910375b5eef | Python | augusthacks/dpa-validation-station | /HSV_Python/graphing.py | UTF-8 | 1,879 | 2.609375 | 3 | [] | no_license | from matplotlib import pyplot as plot
import numpy
import pandas
filename200 = "Plots/KNOWN_200_2020-03-26_22-02-41.csv"
filename500 = "Plots/KNOWN_500_2020-03-26_22-03-18.csv"
filename1000 = "Plots/KNOWN_1000_2020-03-26_22-04-46.csv"
filename2500 = "Plots/KNOWN_2500_2020-03-26_22-08-09.csv"
filename5000 = "Plots/KNOWN_5000_2020-03-26_22-16-08.csv"
filename40000 = "Plots/KNOWN_40000_10000_2020-03-27_02-43-39.csv"
filename5Masked = "Plots/KNOWN_MASKED_5000_90000_2020-03-27_01-40-31.csv"
fiveMasked = pandas.read_csv(filename5Masked).to_numpy().astype('float')
traces200 = pandas.read_csv(filename200).to_numpy().astype('float')
traces500 = pandas.read_csv(filename500).to_numpy().astype('float')
traces1000 = pandas.read_csv(filename1000).to_numpy().astype('float')
traces2500 = pandas.read_csv(filename2500).to_numpy().astype('float')
traces5000 = pandas.read_csv(filename5000).to_numpy().astype('float')
traces40000 = pandas.read_csv(filename40000).to_numpy().astype('float')
maxIndex = numpy.where(traces40000 == numpy.amax(traces40000))[0][0]
xFit = numpy.linspace(200, 40000, 38000)
xScatter = 200, 500, 1000, 2500, 5000, 40000
y = traces200[maxIndex], traces500[maxIndex], traces1000[maxIndex], traces2500[maxIndex], traces5000[maxIndex], \
traces40000[maxIndex]
eq = numpy.poly1d(numpy.squeeze(numpy.polyfit(xScatter, y, 1)))
fourPos = numpy.zeros(90000)
fourPos += 4.5
fourNeg = numpy.zeros(90000)
fourNeg -= 4.5
plot.figure(1)
plot.title('T-Test: 5k UnMasked vs 5k Masked')
plot.ylabel("Similarity")
plot.xlabel("Time (Row of Trace)")
# plot.scatter(xScatter, y, color='green')
# plot.plot(xFit, eq(xFit), '--', color= 'orange')
# plot.plot(traces200)
# plot.plot(traces500)
# plot.plot(traces1000)
# plot.plot(traces2500)
plot.plot(traces5000)
# plot.plot(traces40000)
plot.plot(fiveMasked)
plot.plot(fourPos, 'k--')
plot.plot(fourNeg, 'k--')
plot.show()
| true |
1c50ae5414e183d8b6c92379517f9cf4a3e4089f | Python | Maquiwo/automatic_composition | /pyxmch.py | UTF-8 | 487 | 3.078125 | 3 | [] | no_license | class PyxelMusicChanger:
def change(tune):
parts = []
for par in tune:
part = ""
for ton in par:
tone = PyxelMusicChanger.tlanslate(ton)
part = part + tone
parts.append(part)
return parts
def tlanslate(tone):
if tone["name"] == "R":
return "R"
pyxelTone = tone["name"] + str(tone["octave"])
pyxelTone.replace('♭', '-')
return pyxelTone
| true |
9282cceff549478b1392672ccf914cb4b2add634 | Python | sayZeeel/DSAproject | /trash/hierarch.py | UTF-8 | 2,197 | 2.78125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
import scipy.cluster.hierarchy as sch
from sklearn.cluster import AgglomerativeClustering
from sklearn import metrics
# import sys
# np.set_printoptions(threshold=sys.maxsize)
data = pd.read_csv('HTRU2/HTRU_2.csv',sep=',',header=0)
# print data.loc[0,:]
y = data["class"]
# print data.shape
r,c = data.shape
# print r,c
X = np.zeros((r,c-1))
cols = data.columns.tolist()
# print cols
#['m_ip', 'sd_ip', 'ek_ip', 'sk_ip', 'm_dmsnr', 'sd_dmsnr', 'ek_dmsnr', 'sk_dmsnr', 'class']
X[:,0] = data[cols[0]]
X[:,1] = data[cols[1]]
X[:,2] = data[cols[2]]
X[:,3] = data[cols[3]]
X[:,4] = data[cols[4]]
X[:,5] = data[cols[5]]
X[:,6] = data[cols[6]]
X[:,7] = data[cols[7]]
X = StandardScaler().fit_transform(X)
dendrogrm = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dendrogram')
plt.xlabel('Pulsars')
plt.ylabel('Euclidean distance')
plt.show()
affinities = ["euclidean", "l1", "l2", "manhattan", "cosine"]
linkages = ["ward", "complete", "average", "single"]
hc = AgglomerativeClustering(n_clusters = 2, affinity = affinities[0], linkage = linkages[1])
y_hc = hc.fit_predict(X)
# print type(y_hc)
# print "done"
# print y_hc
# y_hc[y_hc==1] = 2
# y_hc[y_hc==0] = 1
# y_hc[y_hc==2] = 0
# print len(y_hc[y_hc==0])
# print len(y_hc[y_hc==1])
y_vals = np.array(y.tolist())
corr_preds_tot = np.sum(y_hc==y_vals)
# print corr_preds_tot
print "Total number of data points: ",len(y_hc)
print "Total number of correct predictions: ",corr_preds_tot #in how many places both arrays have same elements
print "Overall Accuracy = ",corr_preds_tot*100.0/len(y_hc)
count = np.zeros((2),dtype=int)
for i in range(len(y_hc)):
if y_vals[i]==0 and y_vals[i]==y_hc[i]:
count[0] = count[0]+1
if y_vals[i]==1 and y_vals[i]==y_hc[i]:
count[1] = count[1]+1
# print count
print "Correctly predicted 0s: ",count[0]," out of 16,259"
print "Accuracy of 0s prediction: ", count[0]*100.0/16259
print "Correctly predicted 1s: ",count[1]," out of 1,639"
print "Accuracy of 1s prediction: ", count[1]*100.0/1639
print "\nMetrics are:",metrics.homogeneity_completeness_v_measure(y_vals,y_hc) | true |
4e62e271d064de92ec5b5c325a1b9080f95b09a7 | Python | Tigge/advent-of-code-2020 | /day21.py | UTF-8 | 1,656 | 2.921875 | 3 | [] | no_license | import functools
import operator
def parse(f):
lines = f.read().strip().split("\n")
for line in lines:
[ingredients, allergens] = line[:-1].split(" (contains ")
yield set(ingredients.split(" ")), set(allergens.split(", "))
with open("day21.txt", "r", encoding="utf-8") as f:
stuff = list(parse(f))
all_allergens = functools.reduce(
operator.or_, [safe_ingredient[1] for safe_ingredient in stuff]
)
all_ingredients = functools.reduce(
operator.or_, [safe_ingredient[0] for safe_ingredient in stuff]
)
possible = dict()
for allergen in all_allergens:
possible[allergen] = set(all_ingredients)
for ingredients, allergens in stuff:
if allergen in allergens:
possible[allergen] &= ingredients
allergen_ingredients = functools.reduce(operator.or_, possible.values())
safe_ingrediens = all_ingredients - allergen_ingredients
p1 = 0
for ingredients, _ in stuff:
for safe_ingredient in safe_ingrediens:
if safe_ingredient in ingredients:
p1 += 1
p1 = len(all_ingredients) - len(allergen_ingredients)
print(f"Part 1: {p1}")
p2 = ",".join(sorted(list(allergen_ingredients)))
for i in range(len(possible)):
for allergen, ingredients in possible.items():
if len(ingredients) == 1:
for allergen2, ingredients2 in possible.items():
if allergen != allergen2:
possible[allergen2] -= ingredients
p2 = ",".join([list(possible[k])[0] for k in sorted(possible.keys())])
print(f"Part 2: {p2}")
| true |
8131d22d701eac296ac0183579b4bbb6ef5ac685 | Python | Rafaelbs15/first_stepspython | /cap03_criarcalculadora.py | UTF-8 | 838 | 4.6875 | 5 | [] | no_license | print('*'*15, 'Python Calculadora','*'*15)
print('Selecione o número da equação desejada: ')
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
calc = ['1 - Soma', '2 - Subtração', '3 - Multiplicação', '4 - Divisão']
for i in calc:
print(i)
escolha = input(' Digite sua opção (1/2/3/4): ')
num1 = int(input(' Digite o primeiro número: '))
num2 = int(input(' Digite o segundo número: '))
if escolha == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif escolha == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif escolha == '3':
print(num1, "X", num2, "=", multiply(num1, num2))
elif escolha == '4':
print(num1, "/", num2, "=", divide(num1, num2))
| true |
26565069cf167df6a7d65689efdcabeccc431ef3 | Python | dack-c/osp_repo2 | /py_lab2/myprog_pkg.py | UTF-8 | 338 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python3
from my_pkg.bin_conversion import *
from my_pkg.union_inter import *
while True:
menu = int(input('Select menu: 1) conversion 2) union/intersection 3) exit ? '))
if menu == 1:
converse()
elif menu == 2:
calculate()
elif menu == 3:
print("exit the program...")
break
| true |
68605fa67d3a4fa423ca5515536b5f67fc1de4c2 | Python | MarHakopian/Intro-to-Python-HTI-3-Group-2-Marine-Hakobyan | /Homework_8/decorators.py | UTF-8 | 510 | 3.484375 | 3 | [] | no_license | import time
def warn_slow(func):
def inner(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
duration = end - start
if duration > 2:
print(f"execution of {func.__name__} with {(*args, *kwargs.values())} arguments took more than 2 seconds")
return result
return inner
@warn_slow
def func_slow(x, y):
time.sleep(3)
@warn_slow
def func_fast(x, y):
print(x, y)
func_slow(1, 2)
func_fast(1, 2) | true |
2562ec519bd9ab49538ce078eca3d67060f96820 | Python | annttu/Renkisrv | /libs/exceptions.py | UTF-8 | 1,505 | 2.84375 | 3 | [
"MIT"
] | permissive | # encoding: utf-8
class ConfigException(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return self.msg
def __unicode__(self):
return unicode(self.__str__())
class ConfigTypeError(Exception):
def __init__(self, name, value, type):
self.name = name
self.value = value
self.type = type
def __str__(self):
return "Config setting %s value '%s' does not match type %s!" % (
self.name, self.value, self.type)
def __unicode__(self):
print unicode(self.__str__())
class ConfigValueError(Exception):
def __init__(self, name, value, values):
self.name = name
self.value = value
self.values = values
def __str__(self):
return "Config setting %s value '%s' does not match any allowed values %s!" % (
self.name, self.value, self.values)
def __unicode__(self):
print unicode(self.__str__())
class ConfigError(Exception):
def __init__(self, value, dependency=None):
self.value = value
self.dependency = dependency
def __str__(self):
if self.dependency:
return "Config setting %s is mandatory if module %s is set!" % (
self.value, self.dependency)
return "Config setting %s is mandatory!" % self.value
def __unicode__(self):
print unicode(self.__str__())
| true |
0413b1b71cd22987099272fa92a688b4c6e48950 | Python | marciorasf/python-playground | /leet_code/17_letter_combinations/test_solution.py | UTF-8 | 763 | 3.25 | 3 | [] | no_license | import unittest
from solution import letter_combinations
class TestSolution(unittest.TestCase):
def test_case_1(self):
digits = ""
expected_result = []
result = letter_combinations(digits)
self.assertEqual(result, expected_result)
def test_case_2(self):
digits = "2"
expected_result = ["a", "b", "c"]
result = letter_combinations(digits)
self.assertEqual(result, expected_result)
def test_case_3(self):
digits = "23"
expected_result = ["ad", "ae", "af",
"bd", "be", "bf", "cd", "ce", "cf"]
result = letter_combinations(digits)
self.assertEqual(result, expected_result)
if __name__ == '__main__':
unittest.main()
| true |
e983334c36d803112f6a9b36d84b0a891f588224 | Python | Kelly901/IPC2_Proyecto1_201900716 | /IPC2_Proyecto1_201900716/Nodo.py | UTF-8 | 350 | 2.6875 | 3 | [] | no_license | class Nodo:
#nombre= nombre de cada matriz
#n= la fila de matriz
#m= columna de la matriz
#x= la posicion en x que tendra el numero
#y= la posicion en y que tendra el numero
def __init__(self,x,y,numero):
self.x=x
self.y=y
self.numero=numero
self.enlace=None
| true |
1a80849968c09ffbff130840356edbe40215e2e9 | Python | wilsonmar/python-samples | /pandas-indexing.py | UTF-8 | 4,353 | 3.5 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# pandas-indexing.py in https://github.com/bomonike/fullest-stack/blob/main/python/caiq-yaml-gen/pandas-indexing.py
# by Wilson Mar - v0.3
# based on https://sparkbyexamples.com/pandas/iterate-over-rows-in-pandas-dataframe/
# (which has an analysis of the efficiency of each method as the dataset gets larger)
# https://www.geeksforgeeks.org/different-ways-to-iterate-over-rows-in-pandas-dataframe/
import pandas as pd
Technologys = ({
'Courses':["Spark","Spark","PySpark","Hadoop","Python","Pandas","Oracle","Java"],
'Fee' :[10000,20000,25000,26000,22000,24000,21000,22000],
'Duration':['15day','30day', '40days' ,'35days', '40days', '60days', '50days', '55days']
})
df = pd.DataFrame(Technologys)
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html
print("\r\n*** print(df) ")
print(df)
print("***\r\n")
print("\r\n*** Using DataFrame.iterrows() - most inefficient? ")
row = next(df.iterrows())[1]
print("Data For First Row :")
print(row)
print("***\r\n")
print("\r\n*** for index, row in df.iterrows()")
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.iterrows.html
for index, row in df.iterrows():
print (index,row["Fee"], row["Courses"])
print("***\r\n")
print("\r\n*** for index, columns in data.iterrows() ")
# From https://statisticsglobe.com/loop-through-index-pandas-dataframe-python
data = pd.DataFrame({'x1':['a', 'b', 'c', 'd'], # Create pandas DataFrame
'x2':['w', 'x', 'y', 'z']})
print(data)
for i, row in data.iterrows(): # Initialize for loop
print('Index', i, '- Column x1:', row['x1'], '- Column x2:', row['x2'])
# Index 0 - Column x1: a - Column x2: w
# Index 1 - Column x1: b - Column x2: x
# Index 2 - Column x1: c - Column x2: y
# Index 3 - Column x1: d - Column x2: z
print("***\r\n")
print("\r\n*** next(df.itertuples() -- as namedtuples ")
# iterate over DataFrame rows
row = next(df.itertuples(index = True, name='Tuition'))
print("Data For First Row :")
print(row)
print("***\r\n")
# https://www.w3resource.com/pandas/dataframe/dataframe-itertuples.php
print("\r\n*** for row in df3.itertuples(name='Animal') ")
df3 = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
index=['fox', 'eagle'])
for row in df3.itertuples(name='Animal'):
print(row)
# Animal(Index='fox', num_legs=4, num_wings=0)
# Animal(Index='eagle', num_legs=2, num_wings=2)
print("***\r\n")
print("\r\n*** for row in df.itertuples(index = True) ")
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.itertuples.html
for row in df.itertuples(index = True):
print (getattr(row,'Index'),getattr(row, "Fee"), getattr(row, "Courses"))
print("***\r\n")
print("\r\n*** df.apply ")
print(df.apply(lambda row: str(row["Fee"]) + " " + str(row["Courses"]), axis = 1))
print("***\r\n")
print("\r\n*** for idx in df.index ")
for idx in df.index:
print(idx, df['Fee'][idx], df['Courses'][idx])
print("***\r\n")
print("\r\n*** for i in range(len(df)) : df.loc ")
for i in range(len(df)) :
print(df.loc[i, "Fee"], df.loc[i, "Courses"])
print("***\r\n")
print("\r\n*** for i in range(len(df)) : df.iloc ")
for i in range(len(df)) :
print(df.iloc[i, 0], df.iloc[i, 2])
print("***\r\n")
print("\r\n*** for label, content in df.items() ")
for label, content in df.items():
print(f'label: {label}')
print(f'content: {content}', sep='\n')
print("***\r\n")
# And from https://stackoverflow.com/questions/36864690/iterate-through-a-dataframe-by-index
# staticData.apply((lambda x: (x.name, x['exchange'])), axis=1)
# https://stackabuse.com/how-to-iterate-over-rows-in-a-pandas-dataframe/
# https://pandas.pydata.org/docs/reference/api/pandas.Series.iteritems.html
# https://www.geeksforgeeks.org/different-ways-to-iterate-over-rows-in-pandas-dataframe/
###########
# What I really need: lookup values based on an indexed key column, then
# iterate through several values matching that key found.
# https://www.geeksforgeeks.org/indexing-and-selecting-data-with-pandas/
print("*** using .loc reading nba-2 "
# making data frame from csv file
data = pd.read_csv("nba-2.csv", index_col ="Name")
# retrieving row by loc method
first = data.loc["Avery Bradley"]
second = data.loc["R.J. Hunter"]
print(first, "\n\n\n", second)
print("***\r\n")
| true |
144b5e64a15176ab8e46405445b1a5e4ae437937 | Python | Rellikiox/advent-of-code | /2017/day5.py | UTF-8 | 1,198 | 4.125 | 4 | [] | no_license |
def plus_one(index_increment):
return index_increment + 1
def plus_one_minus_one(index_increment):
return index_increment + 1 if index_increment < 3 else index_increment - 1
def calculate_jumps(increment_fn=plus_one):
"""We transform the input into a list and iterate over it. On each pass we take our current
index, calculate the next increment with our increment_fn, add that ammount to our current
index and add one to the number of steps made. Once our index gets out of bouds (i.e. our
program exists its jump loop of hell) it'll trigger a IndexError, which we catch and then
return the number of jumps made"""
with open('day5.input') as input_f:
jump_list = [int(line.strip()) for line in input_f]
index = 0
jumps_made = 0
while True:
try:
index_increment = jump_list[index]
jump_list[index] = increment_fn(index_increment)
index += index_increment
jumps_made += 1
except IndexError:
break
return jumps_made
if __name__ == '__main__':
# Part one
print calculate_jumps(plus_one)
# Part two
print calculate_jumps(plus_one_minus_one)
| true |
baa5a9446b7436c5f281c8c7381ffcf78179e73b | Python | miso-belica/playground | /python/slots.py | UTF-8 | 907 | 2.984375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
class Class1(object):
__slots__ = ("a", "b", "c")
class Class2(object):
__slots__ = ()
def __init__(self):
self._a = ["a", "b", "c"]
class Slotless(object):
pass
class Derived(Slotless):
__slots__ = ()
def __init__(self):
self._a = ["a", "b", "c"]
if __name__ == "__main__":
o1 = Class1()
o1.a, o1.b, o1.c = 1, 1, 1
try:
o1._d = 1
raise ValueError("Attribute '_d' should raise exception")
except AttributeError:
pass
try:
o2 = Class2()
raise ValueError("Attribute '_a' should raise exception")
except AttributeError:
pass
try:
o3 = Derived()
except AttributeError:
raise ValueError("Attribute '_a' shouldn't raise exception")
| true |
613712856263ef66e7b247f2ab0b668b9a8aa17c | Python | Wellsjian/20180826 | /xiaojian/first_phase/day06/code01.py | UTF-8 | 324 | 3.71875 | 4 | [] | no_license | '''
元组 tuple: 一系列变量组成的不可变序列
'''
# 创建元组
# ----->> 空元组
s = ()
s = tuple()
# ----->>具有默认值的元组
# 列表 可变:预留空间
# 元组 只读:按需分配
s = tuple([1,2,3,4,5])
s1 = (2,3,4)
print(s+s1)
print(s*3)
| true |
66485415e57e738c4ae94b331a45c3e296d0671b | Python | jgemels/alien_invasion | /ship.py | UTF-8 | 1,148 | 3.875 | 4 | [] | no_license | import pygame
class Ship():
"""Klasa opisujaca statek gracza"""
def __init__(self, screen):
"""Inicjalizacja statku kosmicznego i jego położenia początkowego"""
self.screen = screen
# Wczytywanie obrazu statku kosmiecznego i pobieranie jego prostokąta
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
self.screen_rect = screen.get_rect()
# Każdy nowy statek kosmiczny pojawia się na dole ekranu
self.rect.centerx = self.screen_rect.centerx
self.rect.bottom = self.screen_rect.bottom
# Opcje wskazujące na poruszanie się statku
self.moving_right = False
self.moving_left = False
def update(self):
"""
Uaktualnienie położenia statku na podstawie opcji
wskazującej na jego ruch
"""
if self.moving_right:
self.rect.centerx += 1
if self.moving_left:
self.rect.centerx -= 1
def blitme(self):
"""Wyświetlanie statku kosmicznego w jego aktualnym położeniu"""
self.screen.blit(self.image, self.rect)
| true |
45df603d202e649aa31a201e54be5a0c66609e8d | Python | apanasyu/TwitterTargetCommunityCollection | /Main.py | UTF-8 | 10,900 | 2.640625 | 3 | [] | no_license | def googleSearch(query):
from googlesearch import search
startOffSet = len("https://twitter.com/")
endOffSet = len("?lang=en")
potentialInfluencerToWebHit = {}
count = 0
for url in search(query, tld="com", num=100, lang="en"):
print(url)
if url.startswith("https://twitter.com/") and url.endswith("?lang=en") and not "/status/" in url:
userName = url[startOffSet:len(url)-endOffSet]
if not "hashtag/" in userName:
potentialInfluencerToWebHit[userName.lower()] = count
count += 1
return potentialInfluencerToWebHit
def generateCSVFileWithUserInfo(collection, potentialUserToHitNumber, outputDir):
tweetCursor = collection.find({}, no_cursor_timeout=True)
fieldsOfInterest = ['screenName', 'followers_count', 'location', 'name', 'created_at', 'description']
rows = [['WebHitNumber']+fieldsOfInterest]
for userInfo in tweetCursor:
row = [potentialUserToHitNumber[userInfo['screenName'].lower()]]
for field in fieldsOfInterest:
row.append(userInfo[field])
print(row)
rows.append(row)
writeRowsToCSV(rows, outputDir+collection.name+".csv")
def formCommunities(screenNames, outputDir, minFriend, maxFriend, maxFollower, portInput):
screenNameToFollowers = {}
for screenName in screenNames:
userInfoPath = outputDir+str(screenName.lower())+".pickle"
import pickle
with open(userInfoPath, "rb") as fp:
followers = pickle.load(fp)
followersSTR = set([])
for follower in followers:
try:
followersSTR.add(str(follower))
except:
print("Error loading follower")
followers = followersSTR
screenNameToFollowers[screenName] = followers
#iterate over possible pairs
followersOfInterest = set([])
for i in range(0,len(screenNames),1):
for j in range(0,len(screenNames),1):
if i < j:
mutualFollowers = screenNameToFollowers[screenNames[i]].intersection(screenNameToFollowers[screenNames[j]])
followersOfInterest = followersOfInterest.union(mutualFollowers)
print(str(len(followersOfInterest)) + " followers of interest by iterating over every pair of influencers")
from MongoDBInterface import getMongoClient
client = getMongoClient(portInput)
finalCommunity = set([])
for db_name in screenNames:
db = client[db_name]
collection = db["followerInfo"]
tweetCursor = collection.find({}, no_cursor_timeout=True)
fieldsOfInterest = ['followers_count', 'friends_count']
for userInfo in tweetCursor:
if str(userInfo['id_str']) in followersOfInterest:
if (userInfo['followers_count'] <= maxFollower and
userInfo['friends_count'] <= maxFriend and
userInfo['friends_count'] >= minFriend):
finalCommunity.add(str(userInfo['id_str']))
print(str(len(finalCommunity)) + " final community after applying thresholds")
return finalCommunity
def writeRowsToCSV(rows, fileToWriteToCSV):
import csv
if len(rows) > 0:
with open(fileToWriteToCSV, "w") as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(rows)
fp.close()
print("Written " + str(len(rows)) + " rows to: " + fileToWriteToCSV)
def loadFriends(collectionToRead):
usersCommunityFollows = {}
query = {}
tweetCursor = collectionToRead.find(query, no_cursor_timeout=True)
for userInfo in tweetCursor:
usersThatUserOfCommunityFollows = userInfo["friends"]
for friend in usersThatUserOfCommunityFollows:
if not friend in usersCommunityFollows:
usersCommunityFollows[friend] = 0
usersCommunityFollows[friend] += 1
return usersCommunityFollows
def getTopNCommunityFollowsInCSV(db_name, collectionName, portInput, usersCommunityFollows, N):
from operator import itemgetter
res = sorted(usersCommunityFollows.items(), key = itemgetter(1), reverse = True)[:N]
topN = []
for pair in res:
topN.append(pair[0])
print(topN)
print(res)
from MongoDBInterface import getMongoClient
client = getMongoClient(portInput)
db = client[db_name]
collectionToRead = db[collectionName]
tweetCursor = collectionToRead.find({}, no_cursor_timeout=True)
fieldsOfInterest = ['screenName', 'followers_count', 'location', 'name', 'created_at', 'description']
nameToRow = {}
topNSet = set(topN)
for userInfo in tweetCursor:
if str(userInfo["id_str"]) in topNSet:
row = [usersCommunityFollows[str(userInfo["id_str"])]]
for field in fieldsOfInterest:
row.append(userInfo[field])
nameToRow[str(userInfo["id_str"])] = row
rows = [['FollowsByCommunity']+fieldsOfInterest]
for name in topN:
rows.append(nameToRow[name])
writeRowsToCSV(rows, outputDir+db_name+"TopNMostFrequentlyFollowed.csv")
if __name__ == '__main__':
pass
outputDir = "programOutput/"
import os
if not os.path.isdir(outputDir):
os.mkdir(outputDir)
followersDir = "collectFollowers/"
import os
if not os.path.isdir(followersDir):
os.mkdir(followersDir)
from TwitterAPI import getAPI
twitterAPI1 = getAPI()
port = 27020
step0 = False
if step0:
print("applying google search")
db_name = "TempInfluencersFromGoogleSearch"
from MongoDBInterface import getMongoClient
client = getMongoClient(port)
from CollectUserInfo import mainProcessScreenNames
db = client[db_name]
queries1 = ["Minsk Belarus Twitter", "Moscow Russia Twitter", "Moskva Russia Twitter"]
queries2 = ["Buffalo NY Twitter", "Syracuse NY Twitter"]
queries = queries1+queries2
import time
for query in queries:
potentialInfluencerToWebHit = googleSearch(query)
print(potentialInfluencerToWebHit)
collectionName = query.replace(" ", "")
collectionToWrite = db[collectionName]
collectionToWrite.drop()
mainProcessScreenNames(twitterAPI1, db_name, collectionName, potentialInfluencerToWebHit.keys(), port, True)
generateCSVFileWithUserInfo(collectionToWrite, potentialInfluencerToWebHit, outputDir)
time.sleep(120) #wait 2 minutes before queries to google so that a bot is not suspected
influencer1 = ['moscowgov', 'MID_RF', 'mfa_russia']
influencer2 = ['franakviacorka', 'BelarusMID', 'BelarusFeed', 'Tsihanouskaya']
influencer3 = ['SyracuseUNews', 'Syracuse1848', 'AndrewDonovan', 'BenWalsh44', 'SyracusePolice', 'Cuse_Tennis']
influencer4 = ['WKBW', 'SPECNewsBuffalo', 'NWSBUFFALO', 'BPDAlerts']
step1 = False #collect followers
if step1:
influencers = influencer1+influencer2+influencer3+influencer4
'''collect influencer's followers and profile information of each follower'''
from MainDBSetup import setupDBUsingSingleUser
maxFollowerToCollect = 500000
for influencerScreenName in influencers:
setupDBUsingSingleUser(twitterAPI1, influencerScreenName, maxFollowerToCollect, followersDir, port)
step2 = False #form communities
communities = {}
communities["ComMoscowRussiaTwitter"] = influencer1
communities["ComMinskBelarusTwitter"] = influencer2
communities["ComSyracuseNYTwitter"] = influencer3
communities["ComBuffaloNYTwitter"] = influencer4
if step2:
minFriend = 10
maxFriend = 25
maxFollower = 500
for communityName in communities:
print("working on community for: " + str(communityName))
followersMeetingThreshold = formCommunities(communities[communityName], followersDir, minFriend, maxFriend, maxFollower, port)
from CollectFriends import mainProcessFriends
mainProcessFriends(twitterAPI1, communityName, "friendsOfCommunity", followersMeetingThreshold, port)
from TwitterAPI import getAPI2
twitterAPI1 = getAPI2()
step3 = False
if step3: #collect info on those users followed by community
for communityName in communities:
db_name = communityName
collectionName = "friendsOfCommunity"
from MongoDBInterface import getMongoClient
client = getMongoClient(port)
db = client[db_name]
collectionNeedBePerformed = True
if "communityOverWhichFriendInfoCollected" in db.list_collection_names():
collectionNeedBePerformed = False
if collectionNeedBePerformed:
collectionToRead = db[collectionName]
usersCommunityFollows = loadFriends(collectionToRead)
from CollectUserInfo import mainProcessIDs
friends = list(usersCommunityFollows.keys())
print(list(friends))
print(str(len(friends)) + " friends loaded")
collectionNameToWrite = "friendInfo"
collectionNameToWrite2 = "communityOverWhichFriendInfoCollected"
mainProcessIDs(twitterAPI1, db_name, collectionNameToWrite, friends, collectionNameToWrite2, port, True)
step4 = True
if step4: #identify top ranked using frequency and TF-IDF
communityToCommunityFollows = {}
for communityName in communities:
db_name = communityName
collectionName = "friendsOfCommunity"
from MongoDBInterface import getMongoClient
client = getMongoClient(port)
db = client[db_name]
collectionToRead = db[collectionName]
usersCommunityFollows = loadFriends(collectionToRead)
communityToCommunityFollows[communityName] = usersCommunityFollows
topRankedUsingFrequency = 50
getTopNCommunityFollowsInCSV(db_name, "friendInfo", port, usersCommunityFollows, topRankedUsingFrequency)
from TFIDF import generateTFIDFRanking
minFollowsByCommunity = 10
dictionary, influencerToDictCount, tfidf, indexTFIDF, lsi_model, indexLSI, communityLabels, communityVectorsLSI, communityVectorsTFIDF = generateTFIDFRanking(communityToCommunityFollows, minFollowsByCommunity)
topNInfluencersPerCommunity = 500
from TFIDF import writeTopNVectors
writeTopNVectors(communityToCommunityFollows, dictionary, communityVectorsTFIDF, topNInfluencersPerCommunity, port, outputDir) | true |
d629ba6a34b73bbed199cd0671da05f8ccfb026f | Python | MikeDombo/NewsScraper | /Article_Scrape_And_Parse/API/NewsAPI.py | UTF-8 | 2,489 | 2.859375 | 3 | [] | no_license | from urllib2 import urlopen
import json
class NewsAPI(object):
def __init__(self, key):
self.APIKEY = key
self.articles_endpoint = "https://newsapi.org/v1/articles"
self.sources_endpoint = "https://newsapi.org/v1/sources"
def get_articles(self, source, sort_by=None):
query_parameters = {}
if sort_by is not None:
if self.__verify_sortby(sort_by):
query_parameters["sortBy"] = sort_by
else:
raise ValueError("sortBy must be one of: top, latest, or popular")
query_parameters["source"] = source
r = urlopen(self.__build_request_url(self.articles_endpoint, query_parameters))
return json.loads(r.read())
def get_articles_url_list(self, source, sort_by=None):
article_list = []
r = self.get_articles(source, sort_by)
if r["status"] == "ok":
for ar in r["articles"]:
article_list.append(ar["url"])
else:
raise RuntimeError(r["message"])
return article_list
def get_sources(self, language=None, category=None, country=None):
query_parameters = {}
if language is not None:
if self.__verify_languages(language):
query_parameters["language"] = language
else:
raise ValueError("Language must be one of: en, de, or fr")
if category is not None:
if self.__verify_category(category):
query_parameters["category"] = category
else:
raise ValueError("Category must be one of: business, entertainment, gaming, general, music, science-and-nature, sports, or technology")
if country is not None:
if self.__verify_country(country):
query_parameters["country"] = country
else:
raise ValueError("Country must be one of: au, de, gb, in, it, or us")
r = urlopen(self.__build_request_url(self.sources_endpoint, query_parameters))
return json.loads(r.read())
def __build_request_url(self, url, parameters):
parameters["apiKey"] = self.APIKEY
qp = url+"?"
for key, value in parameters.iteritems():
qp += key+"="+value+"&"
return qp[:-1]
@staticmethod
def __verify_sortby(s):
sort_order = ["top", "latest", "popular"]
return s.lower() in sort_order
@staticmethod
def __verify_languages(l):
languages = ["en", "de", "fr"]
return l.lower() in languages
@staticmethod
def __verify_category(c):
categories = ["business", "entertainment", "gaming", "general", "music", "science-and-nature", "sport", "technology"]
return c.lower() in categories
@staticmethod
def __verify_country(c):
countries = ["au", "de", "gb", "in", "it", "us"]
return c.lower() in countries
| true |
647e44b666b759c485a878fb35fffe3e31b27b83 | Python | zchq88/mylearning | /设计模式/行为类模式/责任链模式.py | UTF-8 | 2,272 | 3.53125 | 4 | [
"MIT"
] | permissive | # 使多个对象都有机会处理请求,从而避免了请求的发送者和接受者之间的耦合关系。
# 将这些对象连成一条链,并沿着这条链传递该请求,直到有对象处理它为止。
class request:
_info = {}
def setRequest(self, info):
self._info = info
def getRequest(self):
return self._info
class manager:
_next = None
_name = None
_num = None
def __init__(self, name, num):
self._name = name
self._num = num
def setNext(self, next):
self._next = next
def __canhandle(self, request):
request = request.getRequest()
if request["number"] > self._num:
print(self._name + "不可以处理" + request["Type"] + str(request["number"]) + "天" + "并移交下一级")
return False
else:
return True
def handle(self, request):
if self.__canhandle(request):
if self.echo(request):
return
elif not self._next == None:
self._next.handle(request)
else:
print("不能请假,处理结束")
def echo(self, request):
request = request.getRequest()
print(self._name + "可以处理" + request["Type"] + str(request["number"]) + "天" + "并批准")
return True
if __name__ == "__main__":
a = manager("部门经理", 3)
b = manager("副经理", 7)
c = manager("总经理", 10)
a.setNext(b)
b.setNext(c)
# c.setNext(a)
req = request()
req.setRequest({"Type": "A请假", "number": 2})
a.handle(req)
print("______________________")
req.setRequest({"Type": "B请假", "number": 5})
a.handle(req)
print("______________________")
req.setRequest({"Type": "C请假", "number": 8})
a.handle(req)
print("______________________")
req.setRequest({"Type": "D请假", "number": 11})
a.handle(req)
print("______________________")
# 要点1:请求和处理分开,请求不需要知道谁处理的,处理可以不用知道请求的全貌。
# 要点2:性能问题,请求链式调用性能会成问题,请求链长调试不方便。
# 要点3:可以限制setNext来解决无意识的破坏系统性能问题 | true |
3dd7dab7074194ad32e0cd95e07f045327e157cb | Python | mustafayaylali/Python | /03)class constructor.py | UTF-8 | 1,166 | 3.671875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 16:04:32 2018
@author: ME99689
"""
# %%
class Calisan:
zam_orani=1.8
counter=0
def __init__(self,isim,soyisim,maas): #constructor
self.isim=isim
self.soyisim=soyisim
self.maas=maas
self.email=isim+soyisim+"@hotmail.com"
Calisan.counter=Calisan.counter+1
def giveNameSurname(self):
return (self.isim+" "+self.soyisim)
def zam_yap(self):
self.maas=self.maas+self.maas*self.zam_orani
#isci1=Calisan("Ali","Korkmaz",100)
#print(isci1.maas)
#print(isci1.giveNameSurname())
#class variable
calisan1=Calisan("Abdülkadir","Ömür",4100)
#print("Ilk maas:" , calisan1.maas)
#calisan1.zam_yap()
#print("Yeni maas:" , calisan1.maas)
calisan2=Calisan("Burak","Yilmaz",2200)
calisan3=Calisan("Gustova","Colman",1200)
calisan4=Calisan("Adrian","Mierjezevski",2500)
liste=[calisan1,calisan2,calisan3,calisan4]
max_maas=-1
index=-1
for each in liste:
if(each.maas>max_maas):
max_maas=each.maas
index=each
print("En yüksek maas=",max_maas, "ve adı:",index.giveNameSurname())
#%%
| true |
b009f9663f045df8a6f6e88d788ee95ad762a859 | Python | wildandahru/bismillah | /client.py | UTF-8 | 630 | 2.9375 | 3 | [] | no_license | import socket, os, sys
ip = input('Masukkan Alamat IP yg dituju: ')
port = 9999
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client.connect((ip, port))
except:
print('Ip tidak bisa terhubung')
sys.exit(0)
#Fungsi pada klien ---------------------------------------------------------
def buat_file(dir, client):
nama = client.recv(1024)
while nama:
print((nama).decode())
if "stop" in nama:
break
#Akhir Fungsi ----------------------------------------------------------
acc = client.recv(1024)
print((acc).decode())
dir = input("Masukkan direktory penyimapanan : ")
buat_file(dir, client)
| true |
06b9b78331ebd345877a6ed5362bb17b55825017 | Python | Mrkumar98/Python-Projects-for-Beginners | /Small_Python_Problems/move_zero.py | UTF-8 | 1,000 | 4.09375 | 4 | [
"MIT"
] | permissive | def move_zero(lst):
"""
Given a list of integers, moves all non-zero numbers to the beginning of the list and
moves all zeros to the end of the list. This function returns nothing and changes the given list itself.
For example:
- After calling move_zero([0,1,0,2,0,3,0,4]), the given list should be [1,2,3,4,0,0,0,0] and the function returns nothing
- After calling move_zero([0,1,2,0,1]), the given list should be [1,2,1,0,0] and the function returns nothing
- After calling move_zero([1,2,3,4,5,6,7,8]), the given list should be [1,2,3,4,5,6,7,8] and the function returns nothing
- After calling move_zero([]), the given list should be [] and the function returns nothing
"""
# your code here
no_zero = list()
only_zero = list()
for i in lst:
if i != 0:
no_zero.append(i)
else:
only_zero.append(i)
temp = no_zero + only_zero
i = 0
for e in temp:
lst[i] = e
i += 1
| true |
8385917f836f9fd5eaaa8beb676c9f1a9f240f55 | Python | BtbN/scgi | /scgi/scgi_server.py | UTF-8 | 12,349 | 3.015625 | 3 | [
"Apache-2.0",
"DOC",
"MIT"
] | permissive | #!/usr/bin/env python
"""
A pre-forking SCGI server that uses file descriptor passing to off-load
requests to child worker processes.
"""
import sys
import socket
import os
import select
import errno
import fcntl
import signal
from scgi import passfd
# netstring utility functions
def ns_read_size(input):
size = ""
while 1:
c = input.read(1)
if c == ':':
break
elif not c:
raise IOError, 'short netstring read'
size = size + c
return long(size)
def ns_reads(input):
size = ns_read_size(input)
data = ""
while size > 0:
s = input.read(size)
if not s:
raise IOError, 'short netstring read'
data = data + s
size -= len(s)
if input.read(1) != ',':
raise IOError, 'missing netstring terminator'
return data
def read_env(input):
headers = ns_reads(input)
items = headers.split("\0")
items = items[:-1]
assert len(items) % 2 == 0, "malformed headers"
env = {}
for i in range(0, len(items), 2):
env[items[i]] = items[i+1]
return env
class Child:
def __init__(self, pid, fd):
self.pid = pid
self.fd = fd
self.closed = 0
def close(self):
if not self.closed:
os.close(self.fd)
self.closed = 1
class SCGIHandler:
# Subclasses should override the handle_connection method.
def __init__(self, parent_fd):
self.parent_fd = parent_fd
def serve(self):
while 1:
try:
os.write(self.parent_fd, "1") # indicates that child is ready
fd = passfd.recvfd(self.parent_fd)
except (IOError, OSError):
# parent probably exited (EPIPE comes thru as OSError)
raise SystemExit
conn = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
# Make sure the socket is blocking. Apparently, on FreeBSD the
# socket is non-blocking. I think that's an OS bug but I don't
# have the resources to track it down.
conn.setblocking(1)
os.close(fd)
self.handle_connection(conn)
def read_env(self, input):
return read_env(input)
def handle_connection(self, conn):
"""Handle an incoming request. This used to be the function to
override in your own handler class, and doing so will still work.
It will be easier (and therefore probably safer) to override
produce() or produce_cgilike() instead.
"""
input = conn.makefile("r")
output = conn.makefile("w")
env = self.read_env(input)
bodysize = int(env.get('CONTENT_LENGTH', 0))
try:
self.produce(env, bodysize, input, output)
finally:
output.close()
input.close()
conn.close()
def produce(self, env, bodysize, input, output):
"""This is the function you normally override to run your
application. It is called once for every incoming request that
this process is expected to handle.
Parameters:
env - a dict mapping CGI parameter names to their values.
bodysize - an integer giving the length of the request body, in
bytes (or zero if there is none).
input - a file allowing you to read the request body, if any,
over a socket. The body is exactly bodysize bytes long; don't
try to read more than bodysize bytes. This parameter is taken
from the CONTENT_LENGTH CGI parameter.
output - a file allowing you to write your page over a socket
back to the client. Before writing the page's contents, you
must write an http header, e.g. "Content-Type: text/plain\\r\\n"
The default implementation of this function sets up a CGI-like
environment, calls produce_cgilike(), and then restores the
original environment for the next request. It is probably
faster and cleaner to override produce(), but produce_cgilike()
may be more convenient.
"""
# Preserve current system environment
stdin = sys.stdin
stdout = sys.stdout
environ = os.environ
# Set up CGI-like environment for produce_cgilike()
sys.stdin = input
sys.stdout = output
os.environ = env
# Call CGI-like version of produce() function
try:
self.produce_cgilike(env, bodysize)
finally:
# Restore original environment no matter what happens
sys.stdin = stdin
sys.stdout = stdout
os.environ = environ
def produce_cgilike(self, env, bodysize):
"""A CGI-like version of produce. Override this function instead
of produce() if you want a CGI-like environment: CGI parameters
are added to your environment variables, the request body can be
read on standard input, and the resulting page is written to
standard output.
The CGI parameters are also passed as env, and the size of the
request body in bytes is passed as bodysize (or zero if there is
no body).
Default implementation is to produce a text page listing the
request's CGI parameters, which can be useful for debugging.
"""
sys.stdout.write("Content-Type: text/plain\r\n\r\n")
for k, v in env.items():
print "%s: %r" % (k, v)
class SCGIServer:
DEFAULT_PORT = 4000
def __init__(self, handler_class=SCGIHandler, host="", port=DEFAULT_PORT,
max_children=5):
self.handler_class = handler_class
self.host = host
self.port = port
self.max_children = max_children
self.children = []
self.spawn_child()
self.restart = 0
#
# Deal with a hangup signal. All we can really do here is
# note that it happened.
#
def hup_signal(self, signum, frame):
self.restart = 1
def spawn_child(self, conn=None):
parent_fd, child_fd = passfd.socketpair(socket.AF_UNIX,
socket.SOCK_STREAM)
# make child fd non-blocking
flags = fcntl.fcntl(child_fd, fcntl.F_GETFL, 0)
fcntl.fcntl(child_fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
pid = os.fork()
if pid == 0:
if conn:
conn.close() # in the midst of handling a request, close
# the connection in the child
os.close(child_fd)
self.handler_class(parent_fd).serve()
sys.exit(0)
else:
os.close(parent_fd)
self.children.append(Child(pid, child_fd))
def get_child(self, pid):
for child in self.children:
if child.pid == pid:
return child
return None
def reap_children(self):
while self.children:
(pid, status) = os.waitpid(-1, os.WNOHANG)
if pid <= 0:
break
child = self.get_child(pid)
child.close()
self.children.remove(child)
def do_stop(self):
# Close connections to the children, which will cause them to exit
# after finishing what they are doing.
for child in self.children:
child.close()
def do_restart(self):
self.do_stop()
self.restart = 0
def delegate_request(self, conn):
"""Pass a request fd to a child process to handle. This method
blocks if all the children are busy and we have reached the
max_children limit."""
# There lots of subtleties here. First, we can't use the write
# status of the pipes to the child since select will return true
# if the buffer is not filled. Instead, each child writes one
# byte of data when it is ready for a request. The normal case
# is that a child is ready for a request. We want that case to
# be fast. Also, we want to pass requests to the same child if
# possible. Finally, we need to gracefully handle children
# dying at any time.
# If no children are ready and we haven't reached max_children
# then we want another child to be started without delay.
timeout = 0
while 1:
fds = [child.fd for child in self.children if not child.closed]
try:
r, w, e = select.select(fds, [], [], timeout)
except select.error, e:
if e[0] == errno.EINTR: # got a signal, try again
continue
raise
if r:
# One or more children look like they are ready. Sort
# the file descriptions so that we keep preferring the
# same child.
child = None
for child in self.children:
if not child.closed and child.fd in r:
break
if child is None:
continue # no child found, should not get here
# Try to read the single byte written by the child.
# This can fail if the child died or the pipe really
# wasn't ready (select returns a hint only). The fd has
# been made non-blocking by spawn_child. If this fails
# we fall through to the "reap_children" logic and will
# retry the select call.
try:
ready_byte = os.read(child.fd, 1)
if not ready_byte:
raise IOError # child died?
assert ready_byte == "1", repr(ready_byte)
except socket.error, exc:
if exc[0] == errno.EWOULDBLOCK:
pass # select was wrong
else:
raise
except (OSError, IOError):
pass # child died?
else:
# The byte was read okay, now we need to pass the fd
# of the request to the child. This can also fail
# if the child died. Again, if this fails we fall
# through to the "reap_children" logic and will
# retry the select call.
try:
passfd.sendfd(child.fd, conn.fileno())
except IOError, exc:
if exc.errno == errno.EPIPE:
pass # broken pipe, child died?
else:
raise
else:
# fd was apparently passed okay to the child.
# The child could die before completing the
# request but that's not our problem anymore.
return
# didn't find any child, check if any died
self.reap_children()
# start more children if we haven't met max_children limit
if len(self.children) < self.max_children:
self.spawn_child(conn)
# Start blocking inside select. We might have reached
# max_children limit and they are all busy.
timeout = 2
def get_listening_socket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self.host, self.port))
return s
def serve_on_socket(self, s):
self.socket = s
self.socket.listen(40)
signal.signal(signal.SIGHUP, self.hup_signal)
while 1:
try:
conn, addr = self.socket.accept()
self.delegate_request(conn)
conn.close()
except socket.error, e:
if e[0] != errno.EINTR:
raise # something weird
if self.restart:
self.do_restart()
def serve(self):
self.serve_on_socket(self.get_listening_socket())
def main():
if len(sys.argv) == 2:
port = int(sys.argv[1])
else:
port = SCGIServer.DEFAULT_PORT
SCGIServer(port=port).serve()
if __name__ == "__main__":
main()
| true |
c42d9bbb48c853c9d06ff1a2bc6cc59ac4d13a00 | Python | devMooon/pythonProject | /영화티켓_관리자모드.py | UTF-8 | 1,424 | 3.40625 | 3 | [] | no_license | movieName = "Harry Potter"
theaterNum = 2
seatNum = ["F8", "F9"]
adultNum = 1
studentNum = 0
childNum = 1
peopleNum = 2
existingPeopleNum = 174
price = 12000
print("=================================")
print()
print("해당 영화 관리자 모드로 들어가시겠습니까?")
print("<no입력 시 프로그램이 종료됩니다.?[yes/no]")
YorN = input(">>")
if YorN.lower() == "yes":
print(" =================================")
print(" [%s] 관리자 창" % (movieName))
print(" -------------------------------")
print(" [%s] 추가 인원 및 수입" % (movieName))
print()
#가격 수정
print(" 제 %d 상영관 %s" % (theaterNum, " ".join(seatNum)))
print()
print(" 어른[%d] : %d" % (adultNum, adultNum * 10000))
print(" 청소년[%d] : %d" % (studentNum, studentNum * 8000))
print(" 어린이[%d] : %d" % (childNum, childNum * 5000))
print()
print(" 추가 인원 : %d" % (peopleNum))
print(" 총인원 : %d" % (existingPeopleNum + peopleNum))
print(" 최근 수입 : %d" % (price))
print(" -------------------------------")
print(" =================================")
else:
print("==================================================") # 50
print(" 프로그램을 종료합니다. ")
| true |
3862c12b164dc52df57b8898a76f98a32f59ae60 | Python | mihalea/bitwarden-pyro | /bitwarden_pyro/view/rofi.py | UTF-8 | 4,313 | 2.671875 | 3 | [
"MIT"
] | permissive | import subprocess as sp
from subprocess import CalledProcessError
from collections import namedtuple
from bitwarden_pyro.util.logger import ProjectLogger
Keybind = namedtuple("Kebind", "key event message show")
class Rofi:
"""Start and retrieve results from Rofi windows"""
def __init__(self, args, enter_event, hide_mesg):
self._logger = ProjectLogger().get_logger()
self._keybinds = {}
self._args = args[1:]
self._enter_event = enter_event
self._hide_mesg = hide_mesg
self._keybinds_code = 10
if len(args) > 0:
self._logger.debug("Setting rofi arguments: %s", self._args)
def __extend_command(self, command):
if len(self._args) > 0:
command.extend(self._args)
if not self._hide_mesg:
mesg = []
for keybind in self._keybinds.values():
if keybind.message is not None and keybind.show:
mesg.append(f"<b>{keybind.key}</b>: {keybind.message}")
if len(mesg) > 0:
command.extend([
"-mesg", ", ".join(mesg)
])
for code, keybind in self._keybinds.items():
command.extend([
f"-kb-custom-{code - 9}",
keybind.key
])
return command
def add_keybind(self, key, event, message, show):
"""Create a keybind object and add store it in memory"""
if self._keybinds_code == 28:
raise KeybindException(
"The maximum number of keybinds has been reached"
)
self._keybinds[self._keybinds_code] = Keybind(
key, event, message, show
)
self._keybinds_code += 1
def get_password(self):
"""Launch a window requesting a password"""
try:
self._logger.info("Launching rofi password prompt")
cmd = [
"rofi", "-dmenu", "-p", "Master Password",
"-password", "-lines", "0"
]
if len(self._args) > 0:
cmd.extend(self._args)
proc = sp.run(cmd, check=True, capture_output=True)
return proc.stdout.decode("utf-8").strip()
except CalledProcessError:
self._logger.info("Password prompt has been closed")
return None
def show_error(self, message):
"""Launch a window showing an error message"""
try:
self._logger.info("Showing Rofi error")
cmd = ["rofi", "-e", f"ERROR! {message}"]
if len(self._args) > 0:
cmd.extend(self._args)
sp.run(cmd, capture_output=True, check=True)
except CalledProcessError:
raise RofiException("Rofi failed to display error message")
def show_items(self, items, prompt='Bitwarden'):
"""Show a list of items and return the selectem item and action"""
try:
self._logger.info("Launching rofi login select")
echo_cmd = ["echo", items]
rofi_cmd = self.__extend_command([
"rofi", "-dmenu", "-p", prompt, "-i", "-no-custom"
])
echo_proc = sp.Popen(echo_cmd, stdout=sp.PIPE)
rofi_proc = sp.run(
rofi_cmd, stdin=echo_proc.stdout, stdout=sp.PIPE, check=False
)
return_code = rofi_proc.returncode
selected = rofi_proc.stdout.decode("utf-8").strip()
# Clean exit
if return_code == 1:
return None, None
# Selected item by enter
if return_code == 0:
return selected, self._enter_event
# Selected item using custom keybind
if return_code in self._keybinds:
return selected, self._keybinds.get(return_code).event
self._logger.warning(
"Unknown return code has been received: %s", return_code
)
return None, None
except CalledProcessError:
self._logger.info("Login select has been closed")
return None
class RofiException(Exception):
"""Base class for exceptions thrown by Rofi"""
class KeybindException(Exception):
"""Raised when no more arguments can be added"""
| true |
2bad5b61bbe474967dc4b3d585ca6d6bf1e4ea7b | Python | ggroshansii/ComputerScience | /StripTranscript.py | UTF-8 | 413 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
fileName1 = open("/home/gh0st/Documents/transcript.txt", "r")
strippedSentences = []
for line in fileName1.readlines():
strippedSentences.append(line.rstrip())
fileName2 = open("/home/gh0st/Documents/transcript.txt", "w")
for i in strippedSentences:
fileName2.write(i + " ")
fileName3 = open("/home/gh0st/Documents/transcript.txt", "r")
for line in fileName3:
print(line) | true |
9ef70af3efc6c124b289ea6ebe76a8b21f4d9cff | Python | weinkym/src_miao | /python/read_excel/ydw_list.py | UTF-8 | 382 | 2.703125 | 3 | [] | no_license |
file_object = open('/Users/miaozw/Documents/TEMP/13616511205.txt', 'r')
line_string = file_object.readline()
print(type(line_string))
cpu_string_list = []
cpu_found = False
line = ''
while line_string:
# print(type(line_string))
line_string = file_object.readline()
data_list = line_string.split('@')
if len(data_list) < 9:
continue
print(data_list)
| true |
848a010fcd3b99a6a2d72aaf5f4d9aada2a6181d | Python | Wings-Ji/Handover_simu | /plot.py | UTF-8 | 656 | 2.734375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 18/6/26 下午 9:51
# @Author : Ji
# @File : plot.py
# @Software: PyCharm
import matplotlib.pyplot as plt
name_list = ['relative ESA ', 'increase rate of average delay ']
traditional = [0.796, 0.402]
proposed = [1, 0.116]
x = list(range(len(traditional)))
total_width, n = 0.6, 2
width = total_width / n
plt.bar(x, traditional, width=width,edgecolor = 'b', linewidth = 1,label='tradition', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, proposed, width=width, label='proposed',edgecolor = 'b', linewidth = 1, tick_label=name_list, fc='r')
plt.legend()
plt.show() | true |
7f1b6ced914acbd5b492dd12697bb0d932c201b9 | Python | zhuaa/CS744_UW-Madison | /assignment1/code/part3/task2/small_data/part3_t2_small.py | UTF-8 | 1,295 | 3.03125 | 3 | [] | no_license | import sys
import re
from pyspark.sql import SparkSession
from operator import add
def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls)
def parseNeighbors(urls):
"""Parses a urls pair string into urls pair."""
parts = re.split(r'\s+', urls)
return parts[0], parts[1]
spark = SparkSession\
.builder\
.appName("part3")\
.config("spark.driver.memory", "8g")\
.config("spark.executor.memory", "8g")\
.config("spark.executor.cores", "5")\
.config("spark.task.cpus", "1")\
.getOrCreate()
txt = spark.read.text(sys.argv[2]).rdd.map(lambda r: r[0])
rows = txt.filter(lambda line: line[0]!="#")
links = rows.map(lambda urls: parseNeighbors(urls)).distinct().groupByKey().partitionBy(int(sys.argv[1]))
ranks = links.map(lambda url_neighbors: (url_neighbors[0], 1.0)).partitionBy(int(sys.argv[1]))
for ite in range(10):
contributions = links.join(ranks).flatMap(
lambda url_urls_rank: computeContribs(url_urls_rank[1][0], url_urls_rank[1][1]))
ranks = contributions.reduceByKey(add).mapValues(lambda rank: rank * 0.85 + 0.15)
ite=0
for (link,rank) in ranks.collect():
print("%s has rank: %s" %(link,rank))
ite+=1
if ite==5:
break
| true |
54db3215f48248b438947fadf34aeeaa4dcd449f | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_120/400.py | UTF-8 | 502 | 2.953125 | 3 | [] | no_license | #!/usr/bin/python
# google code jam - c.durr - 2013
# Bullseye
# ad-hoc
def enough(n,r,t):
return 2*n*r + n + 2*(n-1)*n <= t
def solve(r,t):
n1 = 1
while enough(n1,r,t):
n1 *= 2
n0 = n1/2
while n0<n1:
m = (n0+n1)/2
if enough(m,r,t):
n0 = m+1
else:
n1 = m
return n0-1
T = int(raw_input())
for test in range(T):
r,t = [int(i) for i in raw_input().split()]
print 'Case #%d: %d' % (test+1, solve(r,t))
| true |
cc2e383d8c48a853ce88ce8e052d7339868453fe | Python | st33ze/pythonIntroToCS | /numbers/numbers.py | UTF-8 | 1,469 | 4.125 | 4 | [] | no_license | # Module with functions for extracting special numbers.
def get_naturals(init_list, sort=False):
'''
Returns list with natural numbers.
If sorted list needed: get_naturals(list, sort=True).
'''
naturals = []
for num in init_list:
try:
natural = int(num)
if natural > 1: naturals.append(natural)
except: pass
if sort: naturals.sort()
return naturals
def get_primes(upper_range):
'''
Returns list with prime numbers based on given range, using sieve Erastothenes
algorithm.
'''
num_list = list(range(2, upper_range))
i = 0
while i < len(num_list):
for prime in num_list:
# Not prime. Delete from list.
if num_list[i] % prime == 0 and num_list[i] is not prime:
del num_list[i]
break
# Is prime.
elif num_list[i] == prime:
i += 1
break
return num_list
def is_prime(num):
'''Returns True if number is a prime number, else False.'''
for number in range(2,num):
if num % number == 0: return False
return True
def _test():
# naturals_test = ['abc', '2', 7, 39, 40.5, -5, '-3', list(range(10)), 0, 's', '4']
# print(get_naturals(naturals_test,sort=True))
# print(get_primes(10000))
# print(is_prime(1006533))
pass
if __name__ == "__main__":
_test() | true |
d7839778203ed83a123086ff22bf2b17ca1bd2e7 | Python | mrgold92/python | /07-Consumiendo-APIRESTfulll/apirest-get.py | UTF-8 | 1,476 | 3.625 | 4 | [] | no_license | # requests es el objeto que representa la
# conexión http
# - Las funciones de requests genera un objeto
# respuesta desde donde podemos acceder a los
# datos de respuesta
# - text contiene el texto de la respuesta
# - json() retorna el objeto deserializado de una
# respuesta JSON
import requests
# Función GET
# URL de la petición por el método get
r = requests.get('http://api.open-notify.org/iss-now.json')
# Imprimimos el tipo de r -> <class 'requests.models.Response'>
print(type(r))
# Imprimimos el código de estado.
# Ejemplo status_code: 200
# reason para ver el estado en texto -> ok, not found...
print("Código de estado: ", r.status_code)
print("Estado: ", r.reason)
if r.status_code == 200:
# Imprimimos las cabeceras
print("Cabeceras: ", r.headers)
# De las cabeceras, imprimimos el content-type
# Ej de content-type -> application/json
print("Content-type: ", r.headers['content-type'])
if r.headers['content-type'] == 'application/json':
# r.json() Devuelve el contenido de una respuesta en formato json.
# De esa respuesta, obtenemos la distinta información
data = r.json()
print('Latitud: ', data['iss_position']['latitude'])
print('Longitud: ', data['iss_position']['longitude'])
print('Timestamp: ', data['timestamp'])
print('Mensaje: ', data['message'])
else:
print("Contenido: ", r.text)
print("Contenido: ", r.content)
| true |
4020e8008fbe2ce562a7440fb8dd2cbb6b3f5e68 | Python | phoenixcoder/PiCountDown | /countdown.py | UTF-8 | 4,870 | 3.4375 | 3 | [] | no_license | #!/usr/bin/python
import math, datetime, time, threading
import Adafruit_CharLCD as LCD
events = [('Sample Event', '01/01/3000')]
timeFormat = '%m/%d/%Y'
def calculateAndPrintMessage(tevent, lcd, eventName, eventDatetime, timeFormat):
'''Calculates and prints the number of days and hours from the current date
to the date of the event.
::param tevent:: Event object used to signal and stop the current
process externally.
::param lcd:: LCD object for displaying information.
::param eventName:: Name of the event.
::param eventDatetime:: Date and time of the event.
::param timeFormat:: Format of the time stamp listed with the event.
'''
while not tevent.isSet():
print("Event: " + eventName)
lcd.clear()
nowDatetime = datetime.datetime.today()
futureDatetime = datetime.datetime.strptime(eventDatetime, timeFormat)
delta = futureDatetime - nowDatetime
days = delta.days
hours = delta.seconds / 3600
message = eventName + " in\n"
countdownStatement = ''
if days is 1:
countdownStatement = countdownStatement + "{0} day "
else:
countdownStatement = countdownStatement + "{0} days "
if hours is 1:
countdownStatement = countdownStatement + "{1} hour"
else:
countdownStatement = countdownStatement + "{1} hours"
countdownStatement = countdownStatement.format(days, hours)
message = message + countdownStatement
lcd.message(message)
if lcd._cols < len(eventName) or lcd._cols < len(countdownStatement):
maxLen = max(len(eventName), len(countdownStatement))
scrollMessage(lcd, lcd._cols, maxLen)
tevent.wait(60)
def scrollMessage(lcd, lcd_columns, length):
'''Scrolls the current message based on the difference between number of
columns and length of message.
::param lcd:: LCD object for scrolling information.
::param lcd_columns:: The number of columns on the screen.
::param length:: Length of the message.
'''
scrollRange = range(length-lcd_columns)
for i in scrollRange:
time.sleep(0.5)
lcd.move_left()
time.sleep(1)
for i in scrollRange:
time.sleep(0.5)
lcd.move_right()
time.sleep(1)
def moveToNextEvent(index, eventSignalThread, lcd):
'''Creates a new thread for the next event in the list.
::param index:: The number of the event to be displayed.
::param eventSignalThread:: Event object used to indicate when the
calculateAndPrint message should cease
operations.
::param lcd:: LCD object for displaying object.
::returns:: Thread object with new event loaded.
'''
currentEvent = events[index]
newEventThread = threading.Thread(target=calculateAndPrintMessage, args=(eventSignalThread, lcd, currentEvent[0], currentEvent[1], timeFormat))
return newEventThread
def startProgram(events):
'''Starts the countdown program and entry-point for program.
::param events:: List of events to be displayed.
'''
index = 0
currentFuture = events[index]
preposition = " in\n"
lcd = LCD.Adafruit_CharLCDPlate()
lcd_columns = lcd._cols
eventSignalThread = threading.Event()
currentEventThread = threading.Thread(target=calculateAndPrintMessage, args=(eventSignalThread, lcd, currentFuture[0], currentFuture[1], timeFormat))
try:
while True:
if lcd.is_pressed(LCD.UP):
index = (index + 1) % len(events)
eventSignalThread.set()
# Waits until the current thread reads the signal to stop.
currentEventThread.join()
currentEventThread = moveToNextEvent(index, eventSignalThread, lcd)
elif lcd.is_pressed(LCD.DOWN):
index = (index - 1) % len(events)
eventSignalThread.set()
# Waits until the current thread reads the signal to stop.
currentEventThread.join()
currentEventThread = moveToNextEvent(index, eventSignalThread, lcd)
if currentEventThread is not None and not currentEventThread.isAlive():
eventSignalThread.clear()
currentEventThread.start()
time.sleep(1)
except (KeyboardInterrupt):
if currentEventThread is not None and currentEventThread.isAlive():
eventSignalThread.set()
# Waits until the current thread reads the signal to stop.
currentEventThread.join()
print("Count Down Ended. Goodbye!")
if __name__ == '__main__':
startProgram(events)
| true |
f4f88251c817bf15c93803c02cc01285a178d173 | Python | oliviermartin-lam/pupilSimulator | /_utils/interpolateSupport.py | UTF-8 | 2,315 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 16:32:19 2021
@author: omartin
"""
import numpy as np
import scipy.interpolate as interp
import scipy.ndimage as scnd
def interpolateSupport(image,nRes,kind='spline'):
# Define angular frequencies vectors
nx,ny = image.shape
if np.isscalar(nRes):
mx = my = nRes
else:
mx = nRes[0]
my = nRes[1]
if kind == 'nearest':
tmpReal = scnd.zoom(np.real(image),min([mx/nx,my/ny]),order=0)
if np.any(np.iscomplex(image)):
tmpImag = scnd.zoom(np.imag(image),min([mx/nx,my/ny]),order=0)
return tmpReal + complex(0,1)*tmpImag
else:
return tmpReal
else:
# Initial frequencies grid
if nx%2 == 0:
uinit = np.linspace(-nx/2,nx/2-1,nx)*2/nx
else:
uinit = np.linspace(-np.floor(nx/2),np.floor(nx/2),nx)*2/nx
if ny%2 == 0:
vinit = np.linspace(-ny/2,ny/2-1,ny)*2/ny
else:
vinit = np.linspace(-np.floor(ny/2),np.floor(ny/2),ny)*2/ny
# Interpolated frequencies grid
if mx%2 == 0:
unew = np.linspace(-mx/2,mx/2-1,mx)*2/mx
else:
unew = np.linspace(-np.floor(mx/2),np.floor(mx/2),mx)*2/mx
if my%2 == 0:
vnew = np.linspace(-my/2,my/2-1,my)*2/my
else:
vnew = np.linspace(-np.floor(my/2),np.floor(my/2),my)*2/my
# Interpolation
if kind == 'spline':
# Surprinsingly v and u vectors must be shifted when using
# RectBivariateSpline. See:https://github.com/scipy/scipy/issues/3164
tmpReal = interp.fitpack2.RectBivariateSpline(vinit, uinit, np.real(image))
tmpImag = interp.fitpack2.RectBivariateSpline(vinit, uinit, np.imag(image))
else:
tmpReal = interp.interp2d(uinit, vinit, np.real(image),kind=kind)
tmpImag = interp.interp2d(uinit, vinit, np.imag(image),kind=kind)
if np.any(np.iscomplex(image)):
return tmpReal(unew,vnew) + complex(0,1)*tmpImag(unew,vnew)
else:
return tmpReal(unew,vnew) | true |
07b684737783928ec04daf403e3c98bc3750d1bb | Python | Pandinosaurus/Danesfield | /danesfield/geon_fitting/tensorflow/ellipses.py | UTF-8 | 5,638 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | import numpy
"""Demonstration of least-squares fitting of ellipses
__author__ = "Ben Hammel, Nick Sullivan-Molina"
__credits__ = ["Ben Hammel", "Nick Sullivan-Molina"]
__maintainer__ = "Ben Hammel"
__email__ = "bdhammel@gmail.com"
__status__ = "Development"
Requirements
------------
Python 2.X or 3.X
numpy
matplotlib
References
----------
(*) Halir, R., Flusser, J.: 'Numerically Stable Direct Least Squares
Fitting of Ellipses'
(**) http://mathworld.wolfram.com/Ellipse.html
(***) White, A. McHale, B. 'Faraday rotation data analysis with least-squares
elliptical fitting'
"""
class LSqEllipse:
def fit(self, data):
"""Lest Squares fitting algorithm
Theory taken from (*)
Solving equation Sa=lCa. with a = |a b c d f g> and a1 = |a b c>
a2 = |d f g>
Args
----
data (list:list:float): list of two lists containing the x and y data of the
ellipse. of the form [[x1, x2, ..., xi],[y1, y2, ..., yi]]
Returns
------
coef (list): list of the coefficients describing an ellipse
[a,b,c,d,f,g] corresponding to ax**2+2bxy+cy**2+2dx+2fy+g
"""
x, y = numpy.asarray(data, dtype=float)
# Quadratic part of design matrix [eqn. 15] from (*)
D1 = numpy.mat(numpy.vstack([x ** 2, x * y, y ** 2])).T
# Linear part of design matrix [eqn. 16] from (*)
D2 = numpy.mat(numpy.vstack([x, y, numpy.ones(len(x))])).T
# forming scatter matrix [eqn. 17] from (*)
S1 = D1.T * D1
S2 = D1.T * D2
S3 = D2.T * D2
# Constraint matrix [eqn. 18]
C1 = numpy.mat('0. 0. 2.; 0. -1. 0.; 2. 0. 0.')
# Reduced scatter matrix [eqn. 29]
M = C1.I * (S1 - S2 * S3.I * S2.T)
# M*|a b c >=l|a b c >. Find eigenvalues and eigenvectors from this equation [eqn. 28]
eval, evec = numpy.linalg.eig(M)
# eigenvector must meet constraint 4ac - b^2 to be valid.
cond = 4 * numpy.multiply(evec[0, :], evec[2, :]) - numpy.power(evec[1, :], 2)
a1 = evec[:, numpy.nonzero(cond.A > 0)[1]]
# |d f g> = -S3^(-1)*S2^(T)*|a b c> [eqn. 24]
a2 = -S3.I * S2.T * a1
# eigenvectors |a b c d f g>
self.coef = numpy.vstack([a1, a2])
self._save_parameters()
def _save_parameters(self):
"""finds the important parameters of the fitted ellipse
Theory taken form http://mathworld.wolfram
Args
-----
coef (list): list of the coefficients describing an ellipse
[a,b,c,d,f,g] corresponding to ax**2+2bxy+cy**2+2dx+2fy+g
Returns
_______
center (List): of the form [x0, y0]
width (float): major axis
height (float): minor axis
phi (float): rotation of major axis form the x-axis in radians
"""
# eigenvectors are the coefficients of an ellipse in general form
# a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)
a = self.coef[0, 0]
b = self.coef[1, 0] / 2.
c = self.coef[2, 0]
d = self.coef[3, 0] / 2.
f = self.coef[4, 0] / 2.
g = self.coef[5, 0]
# finding center of ellipse [eqn.19 and 20] from (**)
x0 = (c * d - b * f) / (b ** 2. - a * c)
y0 = (a * f - b * d) / (b ** 2. - a * c)
# Find the semi-axes lengths [eqn. 21 and 22] from (**)
numerator = 2 * (a * f * f + c * d * d + g * b * b - 2 * b * d * f - a * c * g)
denominator1 = (b * b - a * c) * ((c - a) * numpy.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
denominator2 = (b * b - a * c) * ((a - c) * numpy.sqrt(1 + 4 * b * b / ((a - c) * (a - c))) - (c + a))
width = numpy.sqrt(numerator / denominator1)
height = numpy.sqrt(numerator / denominator2)
# angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)
# or [eqn. 26] from (***).
phi = .5 * numpy.arctan((2. * b) / (a - c))
self._center = [x0, y0]
self._width = width
self._height = height
self._phi = phi
@property
def center(self):
return self._center
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def phi(self):
"""angle of counterclockwise rotation of major-axis of ellipse to x-axis
[eqn. 23] from (**)
"""
return self._phi
def parameters(self):
return self.center, self.width, self.height, self.phi
def make_test_ellipse(center=[1, 1], width=1, height=.6, phi=3.14 / 5):
"""Generate Elliptical data with noise
Args
----
center (list:float): (<x_location>, <y_location>)
width (float): semimajor axis. Horizontal dimension of the ellipse (**)
height (float): semiminor axis. Vertical dimension of the ellipse (**)
phi (float:radians): tilt of the ellipse, the angle the semimajor axis
makes with the x-axis
Returns
-------
data (list:list:float): list of two lists containing the x and y data of the
ellipse. of the form [[x1, x2, ..., xi],[y1, y2, ..., yi]]
"""
t = numpy.linspace(0, 2 * numpy.pi, 1000)
x_noise, y_noise = numpy.random.rand(2, len(t))
ellipse_x = center[0] + width * numpy.cos(t) * numpy.cos(phi) - height * numpy.sin(t) * numpy.sin(
phi) + x_noise / 2.
ellipse_y = center[1] + width * numpy.cos(t) * numpy.sin(phi) + height * numpy.sin(t) * numpy.cos(
phi) + y_noise / 2.
return [ellipse_x, ellipse_y]
| true |
4ce9265db18356b67cf95df8c9502cdb2a7f99d6 | Python | NeilNjae/szyfrow | /tests/test_amsco.py | UTF-8 | 4,303 | 2.734375 | 3 | [
"MIT"
] | permissive | import pytest
import string
from szyfrow.amsco import *
from szyfrow.support.utilities import *
from szyfrow.support.language_models import transpositions_of
def test_positions():
grid = amsco_positions(string.ascii_lowercase, 'freddy', fillpattern=(1, 2))
assert grid == [[AmscoSlice(index=3, start=4, end=6),
AmscoSlice(index=2, start=3, end=4),
AmscoSlice(index=0, start=0, end=1),
AmscoSlice(index=1, start=1, end=3),
AmscoSlice(index=4, start=6, end=7)],
[AmscoSlice(index=8, start=12, end=13),
AmscoSlice(index=7, start=10, end=12),
AmscoSlice(index=5, start=7, end=9),
AmscoSlice(index=6, start=9, end=10),
AmscoSlice(index=9, start=13, end=15)],
[AmscoSlice(index=13, start=19, end=21),
AmscoSlice(index=12, start=18, end=19),
AmscoSlice(index=10, start=15, end=16),
AmscoSlice(index=11, start=16, end=18),
AmscoSlice(index=14, start=21, end=22)],
[AmscoSlice(index=18, start=27, end=28),
AmscoSlice(index=17, start=25, end=27),
AmscoSlice(index=15, start=22, end=24),
AmscoSlice(index=16, start=24, end=25),
AmscoSlice(index=19, start=28, end=30)]]
def test_encipher_message():
ciphertext = amsco_encipher('hellothere', 'abc', fillpattern=(1, 2))
assert ciphertext == 'hoteelhler'
ciphertext = amsco_encipher('hellothere', 'abc', fillpattern=(2, 1))
assert ciphertext == 'hetelhelor'
ciphertext = amsco_encipher('hellothere', 'acb', fillpattern=(1, 2))
assert ciphertext == 'hotelerelh'
ciphertext = amsco_encipher('hellothere', 'acb', fillpattern=(2, 1))
assert ciphertext == 'hetelorlhe'
ciphertext = amsco_encipher('hereissometexttoencipher', 'encode')
assert ciphertext == 'etecstthhomoerereenisxip'
ciphertext = amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 2))
assert ciphertext == 'hetcsoeisterereipexthomn'
ciphertext = amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 2), fillstyle=AmscoFillStyle.continuous)
assert ciphertext == 'hecsoisttererteipexhomen'
ciphertext = amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(2, 1))
assert ciphertext == 'heecisoosttrrtepeixhemen'
ciphertext = amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 3, 2))
assert ciphertext == 'hxtomephescieretoeisnter'
ciphertext = amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 3, 2), fillstyle=AmscoFillStyle.continuous)
assert ciphertext == 'hxomeiphscerettoisenteer'
def test_decipher_message():
plaintext = 'hereissometexttoencipher'
for key in ['bayes', 'samplekey']:
for fillpattern in [(1, 2), (2, 1)]:
for fillstyle in AmscoFillStyle:
enciphered = amsco_encipher(plaintext, key,
fillpattern=fillpattern, fillstyle=fillstyle)
deciphered = amsco_decipher(enciphered, key,
fillpattern=fillpattern, fillstyle=fillstyle)
assert deciphered == plaintext
def test_amsco_break():
plaintext = sanitise('''It is a truth universally acknowledged, that a single man in
possession of a good fortune, must be in want of a wife. However
little known the feelings or views of such a man may be on his
first entering a neighbourhood, this truth is so well fixed in
the minds of the surrounding families, that he is considered the
rightful property of some one or other of their daughters.''')
expected_key = 'encipher'
expected_fillpattern = (1, 2)
expected_fillsytle = AmscoFillStyle.continuous
expected_score = Pbigrams(plaintext)
ciphertext = amsco_encipher(plaintext, expected_key,
fillpattern=expected_fillpattern, fillstyle=expected_fillsytle)
used_translist = collections.defaultdict(list)
for word in 'encipher fourteen keyword'.split():
used_translist[transpositions_of(word)] += [word]
(key, fillpattern, fillstyle), score = amsco_break(ciphertext,
translist=used_translist)
assert key == transpositions_of(expected_key)
assert fillpattern == expected_fillpattern
assert fillstyle == expected_fillsytle
assert score == pytest.approx(expected_score)
| true |
0d055e903def6ef9e845b8f28e8299bfc78b9343 | Python | codeybear/weatherwise | /schedule/models/Schedule.py | UTF-8 | 5,971 | 2.921875 | 3 | [] | no_license | """All functionality related to schedules"""
from schedule.models import Common
class Schedule:
def __init__(self, **entries):
self.__dict__.update(entries)
Id = 0
Name = ""
StartDate = ""
StartDateDisplay = ""
StatusTypeId = 0
StatusDate = ""
StatusDateDisplay = ""
WorkingDay0 = False
WorkingDay1 = False
WorkingDay2 = False
WorkingDay3 = False
WorkingDay4 = False
WorkingDay5 = False
WorkingDay6 = False
WorkingDays = [] # Represents the above working days information in an array for convenience
class StatusType:
def __init__(self, **entries):
self.__dict__.update(entries)
Id = ""
Name = ""
class ScheduleService:
@classmethod
def GetById(cls, uid):
connection = Common.getconnection()
try:
with connection.cursor() as cursor:
sql = "SELECT * FROM schedule WHERE Id=%s"
cursor.execute(sql, (str(uid)))
result = cursor.fetchone()
schedule = None if result is None else Schedule(**result)
schedule = cls.__GetWorkingDays(schedule)
schedule.StartDateDisplay = schedule.StartDate.strftime("%d/%m/%Y")
if schedule.StatusDate is not None:
schedule.StatusDateDisplay = schedule.StatusDate.strftime("%d/%m/%Y")
return schedule
finally:
connection.close()
@classmethod
def GetAll(cls):
connection = Common.getconnection()
try:
with connection.cursor() as cursor:
sql = """SELECT schedule.*, status_type.Name AS StatusName FROM schedule
INNER JOIN status_type ON status_type.Id = schedule.StatusTypeId
ORDER BY schedule.Name"""
cursor.execute(sql)
results = cursor.fetchmany(cursor.rowcount)
scheduleList = [Schedule(**result) for result in results]
return scheduleList
finally:
connection.close()
@classmethod
def Add(cls, schedule):
connection = Common.getconnection()
schedule = ScheduleService.CheckWorkingDays(schedule)
try:
with connection.cursor() as cursor:
sql = """INSERT INTO `schedule` (`Name`, `StartDate`, `WorkingDay0`, `WorkingDay1`, `WorkingDay2`,
`WorkingDay3`, `WorkingDay4`, `WorkingDay5`, `WorkingDay6`, `StatusTypeId`, `StatusDate`)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """
cursor.execute(sql, (schedule.Name, schedule.StartDate, schedule.WorkingDay0, schedule.WorkingDay1,
schedule.WorkingDay2, schedule.WorkingDay3, schedule.WorkingDay4,
schedule.WorkingDay5, schedule.WorkingDay6, schedule.StatusTypeId,
schedule.StatusDate))
connection.commit()
finally:
connection.close()
@classmethod
def Update(cls, schedule):
connection = Common.getconnection()
schedule = ScheduleService.CheckWorkingDays(schedule)
try:
with connection.cursor() as cursor:
sql = """UPDATE `schedule` SET `Name` = %s, `StartDate` = %s, `WorkingDay0` = %s, `WorkingDay1` = %s,
`WorkingDay2` = %s, `WorkingDay3` = %s, `WorkingDay4` = %s, `WorkingDay5` = %s,
`WorkingDay6` = %s, `StatusTypeId` = %s, `StatusDate` = %s WHERE Id = %s """
cursor.execute(sql, (schedule.Name, schedule.StartDate, schedule.WorkingDay0, schedule.WorkingDay1,
schedule.WorkingDay2, schedule.WorkingDay3, schedule.WorkingDay4,
schedule.WorkingDay5, schedule.WorkingDay6, schedule.StatusTypeId,
schedule.StatusDate, schedule.Id))
connection.commit()
finally:
connection.close()
@classmethod
def Delete(cls, schedule_id):
connection = Common.getconnection()
try:
with connection.cursor() as cursor:
sql = "DELETE FROM schedule WHERE Id = %s"
cursor.execute(sql, (schedule_id))
connection.commit()
finally:
connection.close()
@classmethod
def __GetWorkingDays(cls, schedule):
schedule.WorkingDays = [schedule.WorkingDay0,
schedule.WorkingDay1,
schedule.WorkingDay2,
schedule.WorkingDay3,
schedule.WorkingDay4,
schedule.WorkingDay5,
schedule.WorkingDay6]
return schedule
@classmethod
def CheckWorkingDays(cls, schedule):
if not schedule.WorkingDay0 and not schedule.WorkingDay1 and not schedule.WorkingDay2 \
and not schedule.WorkingDay3 and not schedule.WorkingDay4 and not schedule.WorkingDay5 \
and not schedule.WorkingDay6:
schedule.WorkingDay0 = True
schedule.WorkingDay1 = True
schedule.WorkingDay2 = True
schedule.WorkingDay3 = True
schedule.WorkingDay4 = True
return schedule
@classmethod
def GetStatusTypes(cls):
connection = Common.getconnection()
try:
with connection.cursor() as cursor:
sql = "SELECT Id, Name FROM status_type"
cursor.execute(sql)
results = cursor.fetchmany(cursor.rowcount)
# Convert list of dicts to list of classes
statusTypeList = [StatusType(**result) for result in results]
return statusTypeList
finally:
connection.close()
| true |
c7b7315e4f4d15bb7f7dca5e27c8ca0400bd8349 | Python | NLPDev/OCR | /ocr.py | UTF-8 | 2,984 | 2.625 | 3 | [] | no_license | import pytesseract
import cv2
img=cv2.imread(r'text.jpg')
# Get text in the image
text=pytesseract.image_to_string(img)
if (2>3) or (2>1):
print(text)
print(text)
# Convert string into hexadecimal
# hex_text = text.encode("hex")
# print(hex_text)
# import PyPDF2
# import csv
# import sys
# import glob
# import errno
# import json
#
#
# def isnumeric(s):
# return all(c in "0123456789." for c in s) and any(c in "0123456789" for c in s)
#
# path = '*.pdf'
# files = glob.glob(path)
#
#
# for name in files:
# read_pdf = PyPDF2.PdfFileReader(name)
# number_of_pages = read_pdf.getNumPages()
# page = read_pdf.getPage(3)
# page_content = page.extractText()
#
# aa=page_content.split('\n')
#
# data=[]
#
# dic={
# "Title": "PITCH PERPECT",
# "Title_id": "FM33106746",
# "Genre": "Film",
# "Actor": "ANNA KENDRICK/SKYLAR ASTIN",
# "Production Co": "BROWNSTONE PRODUCTIONS (III)",
# "Director": "JASON MOORE",
# "Screenwriter":"",
# "Country of Origin": "UNITED STATES OF AMERICA",
# "Production year": "2012",
# "Production Duration": "112 00",
# "Music Duration": "87.21"
# }
#
# st_point=-1
# j = 0
#
# for cur in aa:
# data.append(cur)
# j=j+1
#
# if cur=="Owner(s) of Copyright ":
# st_point=j
#
# flag=0
# read_num=0
# num=0
# tot=0
# ins=[]
# ins.append([])
# trac=[]
#
# for k in range(st_point, j):
# if flag==1:
# flag=0
# if len(data[k])>1 and data[k]!="CA":
# str=data[k]
# str1=str[0:1]
# ins[tot].append(str1)
# str1=str[1:len(str)]
# ins.append([])
# if read_num==1:
# read_num=0
# trac.append(tot)
# ins[tot].append(str1)
# else:
# ins[tot+1].append(str1)
# print(str1)
# tot=tot+1
#
# if isnumeric(str1):
# read_num=1
#
# else:
#
# if data[k].isdigit() and len(data)>4:
# flag=1
# ins[tot].append(data[k])
# if isnumeric(data[k]) and flag!=1 and read_num==0:
# read_num=1
# if len(data[k])>6:
# str=data[k]
# ll = str.find(".")
# str1=str[0:ll+3]
#
# ins[tot].append(str1)
# str1=str[ll+3:len(str)]
# ins[tot].append(str1)
#
#
#
#
# print(len(trac))
# for ll in trac:
# print(ll)
#
#
#
# #
# # d = {}
# # aa="Namdfse"
# # d[aa] = "Luke"
# # d["Country"] = "Canada"
# # d["city"]=["asdf","afd"]
# # tt={"ti":"fad", "duf":"dsf"}
# # d["trac"]=tt
# #
# # res=json.dumps(d, indent=4)
# #
# # print(res)
# # f= open("guru99.txt","w+")
# # f.write(res)
| true |
eee9cc5e144eee40ad90e9152dc9ec0759972423 | Python | Adlizm/NavigationSys | /main.py | UTF-8 | 4,521 | 2.609375 | 3 | [] | no_license | import sys
import OpenGL
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GL import *
import pywavefront
from pywavefront import visualization
import numpy as np
home = pywavefront.Wavefront ( 'Trabalho2CG.obj ' )
wid,hei = 480,480
posX,posY,posZ = 0,2,10
focusX,focusY,focusZ = 0,2,0
mouseX,mouseY = int(wid/2),int(hei/2)
theta,phi = np.pi,0
distaceOfFocus = 10
WdirectX,WdirectZ = 0,0.1
DdirectZ,DdirectX = 0,0.1
Keyspressed = []
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(wid,hei)
glutCreateWindow('Tabalho 2 Computacao Grafica')
glClearColor(.8,.8,1.,1.)
glShadeModel(GL_SMOOTH)
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
'''
glEnable(GL_LIGHTING)
lightZeroPosition = [2.,10.,2.,1.]
lightZeroColor = [4.0,4.0,4.0,4.0] #green tinged
glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.02)
glEnable(GL_LIGHT0)
'''
glutSetCursor(GLUT_CURSOR_NONE) #Cursor do mouse Invisivel
glutDisplayFunc(display)
glutKeyboardFunc(keydown)
glutKeyboardUpFunc(keyup)
glutPassiveMotionFunc(mouseMove)
glMatrixMode(GL_PROJECTION)
gluPerspective(40.,1.,1.,40.)
glMatrixMode(GL_MODELVIEW)
gluLookAt(posX,posY,posZ,focusX,focusY,focusZ,0,1,0)
glutWarpPointer(mouseX,mouseY)
glPushMatrix()
glutMainLoop()
def display():
movimentOfCamera()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glPushMatrix()
color = [1.0,0.,0.,1.]
visualization.draw(home)
glPopMatrix()
glutSwapBuffers()
def mouseMove(x,y):
global wid,hei
global posX,posY,posZ
global focusX,focusY,focusZ
global mouseX,mouseY
global phi,theta,distaceOfFocus
global WdirectX,WdirectZ,DdirectX,DdirectZ
dx = mouseX-x
dy = mouseY-y
rotX = map(dx,0,wid,0,np.pi)
rotY = map(dy,0,hei,0,np.pi)
theta += rotX
phi += rotY
if phi > np.pi/2:
phi = np.pi/2;
elif phi < -np.pi/2:
phi = -np.pi/2
focusX = np.cos(phi)*np.sin(theta)*distaceOfFocus
focusY = np.sin(phi)*distaceOfFocus
focusZ = np.cos(phi)*np.cos(theta)*distaceOfFocus
WdirectX = (focusX - posX)/100 #para forcar tamanho do vetor direção para 0.1
WdirectZ = (focusZ - posZ)/100
DdirectX = np.sin(theta - np.pi/2)*np.cos(phi)*0.1
DdirectZ = np.cos(theta - np.pi/2)*np.cos(phi)*0.1
glutWarpPointer(mouseX,mouseY)
glLoadIdentity()
gluLookAt(posX,posY,posZ,focusX,focusY,focusZ,0,1,0)
glutPostRedisplay()
def keyup(key,x,y):
global Keyspressed
index = 0
for k in Keyspressed:
if ord(key) == k:
Keyspressed.pop(index)
break
index += 1
def keydown(key,x,y):
global Keyspressed
keyPress = ord(key)
if(keyPress == 119 or keyPress == 115 or keyPress == 100 or keyPress == 97 or keyPress == 122 or keyPress == 32):
#W,S,D,A,Z,SpaceBar
ok = True
for k in Keyspressed:
if k == keyPress:
ok = False
break
if ok:
Keyspressed.append(keyPress)
elif(keyPress == 27):
#ESC
glutLeaveMainLoop() #Sair do Programa
def map(Value,Min,Max,Newmin,Newmax):
#Retorna um valor equivalente do intevalo [Min,Max] para o intervalo [Nexmin,Newmax]
x = (Newmax-Newmin)/(Max-Min)
y = Value-Min
return y*x + Newmin
def movimentOfCamera():
global Keyspressed
global posX,posY,posZ,focusX,focusY,focusZ
global WdirectX,WdirectZ,DdirectX,DdirectZ
for keyPress in Keyspressed:
if(keyPress == 119): #W
posZ += WdirectZ
posX += WdirectX
focusZ += WdirectZ
focusX += WdirectX
elif(keyPress == 115): #S
posZ -= WdirectZ
posX -= WdirectX
focusZ -= WdirectZ
focusX -= WdirectX
elif(keyPress == 100): #D
posZ += DdirectZ
posX += DdirectX
focusZ += DdirectZ
focusX += DdirectX
elif(keyPress == 97): #A
posZ -= DdirectZ
posX -= DdirectX
focusZ -= DdirectZ
focusX -= DdirectX
elif(keyPress == 122): #Z -> abaixar
posY -= 0.1
focusY -= 0.1
elif(keyPress == 32): #Space bar -> levantar
posY += 0.1
focusY += 0.1
glLoadIdentity()
gluLookAt(posX,posY,posZ,focusX,focusY,focusZ,0,1,0)
glutPostRedisplay()
if __name__ == '__main__': main()
| true |
a9fc284f8445d6c5231860105cc91a0da9fe2a5f | Python | ArtyomKozyrev8/tkinter_training | /2404tr.py | UTF-8 | 722 | 2.796875 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk
app = Tk()
app.geometry("400x400")
subframe = Frame(app, width=300, bg="red", height=300, relief=SUNKEN, bd=5)
#subframe.grid(column=0, row=0, padx=100)
subframe.pack(fill=X)
button1 = Button(subframe, text="NNNNN", width=20).grid(column=0, row=0, pady=10, padx=10)
button2 = Button(subframe, text="YYYYYYYY", width=20).grid(column=1, row=0)
button3 = Button(subframe, text="12231233", width=20).grid(column=0, row=1, pady=10)
button4 = Button(subframe, text="LOL!!", width=20).grid(column=1, row=1)
labelsubframe = LabelFrame(app, text="Fuuuu", bg="yellow", padx=100, pady=100)
label = Label(labelsubframe, text="FU!!!")
label.pack()
labelsubframe.pack()
app.mainloop()
| true |
7f83df6e8e4003e1cc7c32a0ec21aca302bfcb43 | Python | mensahd/bark | /modules/runtime/commons/roadgraph_generator.py | UTF-8 | 4,601 | 2.671875 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
from bark.world.map import *
class RoadgraphGenerator:
def __init__(self, roadgraph):
self.roadgraph = roadgraph
def get_lane_id_by_pos(self, lane_section, pos):
for lane_id, lane in lane_section.get_lanes().items():
if lane.lane_position == pos:
return lane_id
return None
def generate(self, road_map):
# add vertices
for road_id, road in road_map.get_roads().items():
# there could be mult. lane_sections
for _, lane_section in enumerate(road.lane_sections):
for lane_id, lane in lane_section.get_lanes().items():
self.roadgraph.add_lane(road_id, lane)
# add successors,predecessors
for road_id, road in road_map.get_roads().items():
successor_road_id = road.link.successor.id # this is the position!!!!!! (-4, 4)
predecessor_road_id = road.link.predecessor.id # this is the position!!!!!! (-4, 4)
if successor_road_id > 1000:
continue
successor_road = road_map.get_roads()[successor_road_id]
try:
predecessor_road = road_map.get_roads()[predecessor_road_id]
# from last element in lane sections
predecessor_lane_section = predecessor_road.lane_sections[-1]
except:
print("Road has no predeseccor road.")
successor_lane_section = successor_road.lane_sections[0]
# TODO (@hart): there could be mult. lane_sections
for _, lane_section in enumerate(road.lane_sections):
for lane_id, lane in lane_section.get_lanes().items():
# add successor edge
successor_lane_position = lane.link.successor.id
successor_lane_id = self.get_lane_id_by_pos(successor_lane_section, successor_lane_position)
if successor_lane_id is not None:
self.roadgraph.add_successor(lane_id, successor_lane_id)
# does not always have predecessor
try:
predecessor_lane_position = lane.link.predecessor.id
# search for predecessor_lane_position in previos lane section
predecessor_lane_id = self.get_lane_id_by_pos(predecessor_lane_section, predecessor_lane_position)
# if found add; convert predecessor to successor
if predecessor_lane_id is not None:
self.roadgraph.add_successor(predecessor_lane_id, lane_id)
except:
print("Road has no predeseccor road.")
# add neighbor edges
for road_id, road in road_map.get_roads().items():
for _, lane_section in enumerate(road.lane_sections):
for lane_id, lane in lane_section.get_lanes().items():
if lane.lane_position is not 0:
inner_lane_pos = lane.lane_position - 1 if lane.lane_position > 0 else lane.lane_position + 1
inner_lane_id = self.get_lane_id_by_pos(lane_section, inner_lane_pos)
if inner_lane_id is not None:
self.roadgraph.add_inner_neighbor(inner_lane_id, lane_id)
self.roadgraph.add_outer_neighbor(inner_lane_id, lane_id)
# map.junctions
for _, junction in road_map.get_junctions().items():
for _, connection in junction.get_connections().items():
incoming_road = road_map.get_roads()[connection.incoming_road]
connecting_road = road_map.get_roads()[connection.connecting_road]
pre_lane_section = incoming_road.lane_sections[0]
successor_lane_section = connecting_road.lane_sections[0]
for lane_link in connection.lane_links:
#print(lane_link.from_id, lane_link.to_id) # TODO (@hart): this is actually the lane pos
# add successor edge
pre_lane_id = self.get_lane_id_by_pos(pre_lane_section, lane_link.from_id)
successor_lane_id = self.get_lane_id_by_pos(successor_lane_section, lane_link.to_id)
if successor_lane_id is not None and pre_lane_id is not None:
self.roadgraph.add_successor(pre_lane_id, successor_lane_id) | true |
12f5cb00406ac7983495d04f6d4723c06c43046f | Python | msullivancm/CursoEmVideoPython | /Mundo1-ExerciciosFundamentos/ex004.py | UTF-8 | 717 | 3.9375 | 4 | [] | no_license | algo = input('Digite algo: ')
print('O tipo de {} é {}'.format(algo, type(algo)))
print('{} é número? {}'.format(algo, algo.isalnum()))
print('{} é alpha? {}'.format(algo, algo.isalpha()))
print('{} é alpha? {}'.format(algo, algo.isalphanum()))
print('{} é ascii? {}'.format(algo, algo.isascii()))
print('{} é decimal? {}'.format(algo, algo.isdecimal()))
print('{} é digito? {}'.format(algo, algo.isdigit()))
print('{} é identificador? {}'.format(algo, algo.isidentifier()))
print('{} é minúsculo? {}'.format(algo, algo.islower()))
print('{} é imprimível? {}'.format(algo, algo.isprintable()))
print('{} é espaço? {}'.format(algo, algo.isspace()))
print('{} é title? {}'.format(algo, algo.istitle()))
| true |