blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d8308d06443759d2e409e194a4dd1c54523819ce | Python | prussiap/pru_vide | /pru_vide_python/pru_vide.py | UTF-8 | 1,295 | 3.0625 | 3 | [] | no_license | from Adafruit_MCP230xx import Adafruit_MCP230XX
import sys
import time
if __name__ == '__main__':
# ***************************************************
# Set num_gpios to 8 for MCP23008 or 16 for MCP23017!
# ***************************************************
#mcp = Adafruit_MCP230XX(address = 0x20, num_gpios = 8) # MCP23008
mcp = Adafruit_MCP230XX(address = 0x20, num_gpios = 16) # MCP23017
# Set pins 0, 1 and 2 to output (you can set pins 0..15 this way)
# mcp.config(4, mcp.OUTPUT) #LED if present
mcp.config(7, mcp.OUTPUT) #SSR sous vide
mcp.config(3, mcp.OUTPUT)
# Set pin 3 to input with the pullup resistor enabled
mcp.pullup(2, 1) #Bottom button
mcp.pullup(1, 1) #Second to bottom button
# Read input pin and display the results
# print "Pin 3 = %d" % (mcp.input(6) >> 3)
# Python speed test on output 0 toggling at max speed
print "Starting blinky on pin 0 (CTRL+C to quit)"
while (True):
print "Pin 5 = %d" % (mcp.input(2))
print "Pin 6 = %d" % (mcp.input(1))
# mcp.output(4, 1) # Pin 0 High for led
mcp.output(3, 1)
mcp.output(7, 1)
time.sleep(1);
# mcp.output(4, 0) # Pin 0 Low
mcp.output(3, 0)
mcp.output(7, 0)
time.sleep(1);
| true |
92985c8b11ee14fa78a72b5e9d72ad27e18f86bb | Python | Olivewind-Sejong/pythonSimpleChatBot | /test_morphs.py | UTF-8 | 400 | 2.84375 | 3 | [] | no_license | '''
Created on 2018. 1. 28.
@author: hillk
'''
import sys
from konlpy.tag import Twitter
twitter = Twitter()
print('한글 문장을 입력하세요.')
try:
while True:
sys.stdout.write('>> ')
sys.stdout.flush()
text = sys.stdin.readline().strip()
if text:
answer = twitter.morphs(text)
print(answer)
except KeyboardInterrupt:
pass
| true |
3ba0c7f663d5da7b3a75e3bce3567fe837e3a53b | Python | modsim/junn | /junn/networks/mixins/prediction.py | UTF-8 | 1,270 | 2.5625 | 3 | [
"DOC",
"BSD-2-Clause"
] | permissive | """Prediction signatures mixin."""
import tensorflow as tf
class PredictionSignatureMixin:
"""Prediction singatures mixin for NeuralNetwork s."""
def get_signatures(self): # noqa: D102
predict = self.prediction_fn
signatures = {}
@tf.function(input_signature=[tf.TensorSpec([None, None], dtype=tf.float32)])
def predict_no_channel(image):
return predict(image[..., tf.newaxis])[:, :, 0]
signatures['predict_no_channel'] = predict_no_channel
@tf.function(input_signature=[tf.TensorSpec([], dtype=tf.string)])
def predict_png(png_data):
image = tf.io.decode_png(png_data)
image = tf.cast(image, tf.float32)
prediction = predict(image)
# either pre multiply, or post multiply
# one yields a gray scale image, the other basically only a binary b/w image
prediction = prediction * 255
prediction = tf.cast(prediction, tf.uint8)
prediction = tf.image.encode_png(prediction, compression=0)
return dict(
predict_png_bytes=prediction
) # important _bytes suffix for TF Serving
signatures['predict_png'] = predict_png
return signatures
| true |
37d57ef0a166f606515219e3806a1723940f70ad | Python | JHernandezElena/DataAcquisition-Transformation | /Practica 6 - Bad format csv.py | UTF-8 | 296 | 2.828125 | 3 | [] | no_license | import csv
def load_csv(path):
with open(path, "r") as input_file:
reader = csv.reader(input_file, delimiter = ";")
matrix = [row for row in reader]
return matrix
load_path = "data_sets/bad_format.csv"
matrix = load_csv(load_path)
print(matrix)
##falta hacer la media
| true |
a3a928e3ee58572d459c76fd8d028d6d45bd7972 | Python | Tirklee/python3-demo | /T2_11 Python 交换变量 3.py | UTF-8 | 253 | 4.0625 | 4 | [] | no_license | # 不使用临时变量:
# -*- coding: UTF-8 -*-
# 用户输入
x = int(input('输入 x 值: '))
y = int(input('输入 y 值: '))
x = x + y
y = x - y
x = x - y
print('交换后 x 的值为: {}'.format(x))
print('交换后 y 的值为: {}'.format(y)) | true |
32f4b773b651f3414cd540c941a76cdf893f8fba | Python | apluscs/Fun-Problems | /anagrams/solveSLOW.py | UTF-8 | 2,426 | 3.296875 | 3 | [] | no_license | import time #SLOW AF, 146 sec for 'barbarabush'
biglist=[]
def isPrefix(prefix, dict): #if any word in dict starts with prefix
st=0
end=len(dict)-1
while st<=end: #binary search
mid=(st+end)/2
curr=dict[mid]
if len(prefix)<=len(curr) and curr[:len(prefix)]==prefix:
return True
elif prefix < curr:
end=mid-1
else:
st=mid+1
return False
def findAna(commit,prefix,suffix,dict):
global biglist
# print(prefix+" "+suffix)
if len(prefix)==0 and len(suffix)==0:
# print(commit)
if commit not in biglist:
biglist.append(commit[:])
return
if not isPrefix(prefix,dict):
return
if prefix in dict: #prefix is an actual word, try adding it to commit and set prefix to empty string
# print("Found!")
findAna(commit+[prefix],"",suffix,dict)
for i in range(len(suffix)): #try moving each char in suffix to prefix
findAna(commit,prefix+suffix[i],suffix[:i]+suffix[i+1:],dict)
def read_process_data():
with open('dictionary.txt') as f:
content = ' '.join(f.readlines()).replace('\n','').replace('\r','').lower()
return content.split()
def filter(content,key):
newDict=[]
keyFrqs=[0]*26
for j in range(len(key)):
c=ord(key[j])-ord('a')
keyFrqs[c]+=1
for j in range(len(content)):
word=content[j]
wordFrq=[0]*26
for i in range(len(word)):
c=ord(word[i])-ord('a')
wordFrq[c]+=1
flag=False
for i in range(26):
if(keyFrqs[i]<wordFrq[i]):
flag=True
break
if(flag):
continue
newDict.append(word)
return newDict
def main():
startTime=time.time()
key =''.join(sorted(raw_input("Enter a word: ").lower().replace(' ','')))
print(key)
dict=read_process_data() #array
dict=filter(dict,key) #only words that can fit into key, so much faster!
# print(dict)
i=0
N=len(key)
while(i!=N): #start only with unique letters in key as prefix, no point in doing 'a' and 'a' twice
print(key[i])
findAna([],key[i],key[:i]+key[i+1:],dict)
while(i!=N-1 and key[i+1]==key[i]):
i+=1
i+=1
print(biglist) #answer
endTime=time.time()
print("Time elapsed: "+str(endTime-startTime))
main()
| true |
d2cda29d27ccbda172b84d4e1c778f91b977bb1f | Python | GuilhermeMaciel75/Exercicios_GeekUniversity_python | /Seção7/Exercícios_Parte1/39.1.py | UTF-8 | 434 | 3.859375 | 4 | [] | no_license | n = int(input("Digite o número de linhas do Triângulo de Pascal: "))
triangulo = []
for x in range(n):
linha = []
for y in range(x +1):
if y == 0:
linha.append(1)
elif x == y:
linha.append(1)
else:
linha.append(triangulo[x - 1][y - 1] + triangulo[x - 1][y])
triangulo.append(linha)
for z in range(len(triangulo)):
print(*triangulo[z]) | true |
f1d4083b83c66457c8500a1f06345b03ce0c61fa | Python | Spielmaschine-2310/OddEvenCombi | /evenoddcombine.py | UTF-8 | 1,848 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#(c) Stefan Murawski 2014 (s.murawski@batronix.com)
from intelhex import IntelHex
import sys
import os
def main():
if len(sys.argv) > 1:
currentworkdir = os.path.abspath(os.path.dirname(sys.argv[0]))
evenfilepath = str(os.path.abspath(sys.argv[1]))
oddfilepath = str(os.path.abspath(sys.argv[2]))
newfilepath = os.path.join(currentworkdir , "output.hex")
tempfileeven = open(evenfilepath, "r")
tempfileodd = open(oddfilepath, "r")
evenfile = IntelHex()
evenfile.loadfile(tempfileeven,evenfilepath.split(".")[-1])
#evenfile = IntelHex(evenfilepath)
oddfile = IntelHex()
oddfile.loadfile(tempfileodd,oddfilepath.split(".")[-1])
#oddfile = IntelHex(oddfilepath)
evendict = evenfile.todict()
odddict = oddfile.todict()
newdict = {}
newindex = 0
if evenfile.maxaddr() >= oddfile.maxaddr():
maxaddr = evenfile.maxaddr()
else:
maxaddr = oddfile.maxaddr()
#for i in range(len(evendict)):
for i in range(0,maxaddr+1): #Evtl immer bei 0 und nicht bei inputfile.minaddr() anfangen
try:
newdict[newindex] = evendict[i]
except KeyError: #Leere Adressen werden manchmal beim Speichern übersprungen
#newdicteven[newindex] = 0xFF
pass
newindex+=1
try:
newdict[newindex] = odddict[i]
except KeyError: #Leere Adressen werden manchmal beim Speichern übersprungen
#newdicteven[newindex] = 0xFF
pass
newindex+=1
newhex = IntelHex(newdict)
output = open(newfilepath, 'w')
newhex.write_hex_file(output)
output.close()
if __name__ == "__main__":
main() | true |
911320a18964daadfa8885d2f214e37793f1623e | Python | Clement-Valot/ADSA-Among-us | /Step2 Find Impostors by sight/Player.py | UTF-8 | 3,826 | 3.625 | 4 | [] | no_license | #This class is very useful to store the following information on Players:
# -his ID differentiating him of others
# -if he is alive or dead
# -if he is innocent or not
# -His Impostorness coefficient at each round
# -His overall Impostorness coefficient being the sum of all elements of the above matrix
class Player:
def __init__(self, ID):
self.ID = ID
self.alive=True
self.innocent=False
self.round_coeff=[0,0,0,0,0,0,0,0,0,0]
self.imp_coeff=0
#Method that returns the object player corresponding to ID.
#We need it since we mostly work with player ID for lists and matrices.
def Get_Player_From_ID(players, ID):
for player in players:
if ID==player.ID:
return player
#Method that returns the list of players seen by the player_who_saw.
#players is the list of all players (dead or alive, crewmate or impostors)
#player_who_saw is the player who we want to know who he saw
#matrix is the adjacency matrix that is going to tell us who saw who on a particular round.
def Get_list_seen(players, player_who_saw, matrix):
suspects=[]
for player_ID in range(10):
if matrix[player_who_saw.ID][player_ID]==1:
player=Get_Player_From_ID(players, player_ID)
if player.alive==True and player.innocent==False:
suspects.append(player_ID)
return suspects
#Method that returns the list of players seen by the impostors.
#We can't use the above function because it returns the alive and
#not innocent player while we want impostors to be able to kill innocent
#players as well.
def Get_list_seen_impostor(players, player_who_saw, matrix):
suspects=[]
for player_ID in range(10):
if matrix[player_who_saw.ID][player_ID]==1:
player=Get_Player_From_ID(players, player_ID)
if player.alive==True:
suspects.append(player_ID)
return suspects
#Method useful for Impostors to know who they saw in a round to know who they can kill.
def Get_Players_Killable(players, Impostors, matrix):
killable_players=[]
for impostor_ID in Impostors:
impostor=Get_Player_From_ID(players, impostor_ID)
killable_players+=Get_list_seen_impostor(players, impostor, matrix)
return list(set(killable_players)) #To remove duplicates
#We get the players still alive in the game which can be seen by player_who_sees.
#It is not the same as the Get_list_seen method since it doesn't take into account
#the player_who_sees in the returned list because a player can't see himself, and
#also because we need to remove impostor number 2 if player_who_sees is impostor number 1
#because they can't see each other (only if there are still 2 impostors alive in the game)
def Get_Players_Alive(players, player_who_sees, Impostors):
alive_players=[]
for player in players:
#We don't add the ID of the player since he can't see himself
#We don't add the ID of the player if he is dead
if player!=player_who_sees and player.alive==True:
#Now we have to distinguish 2 cases:
#The player is not an impostor then he can see everybody.
#The player is an impostor then he can't see the other impostor (if 2 impostors remain)
if (not(len(Impostors)==2 and (player.ID in Impostors))):
alive_players.append(player.ID)
return alive_players
#Method that returns the best suspect depending on its impostorness coefficient.
#Only used in critical situation to kill a player based on probability and not on
#certainty like every other method.
def Get_Best_Suspect(players):
coeff=0
best_suspect=None
for player in players:
if player.imp_coeff>coeff:
best_suspect=player
coeff=player.imp_coeff
return best_suspect
| true |
5ea2bffca4731511ef87895f3a155a99972de673 | Python | maxwell-geospatial/slopefailure_prob_models | /Python/py_predict_grid.py | UTF-8 | 3,422 | 2.609375 | 3 | [] | no_license | #==========================DATA PREP========================================
#Import needed libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import rasterio as rio
import pyspatialml as pml
get_ipython().run_line_magic('matplotlib', 'inline')
#Import specific modules, functions, or methods
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pickle
#===========================Save and Load Model===================================
#Path to model
pkl_filename = "py_model.pkl"
#Load model
with open(pkl_filename, 'rb') as file:
pickle_model = pickle.load(file)
#==========================PREDICT TO GRID STACK======================================
#Read in raster grid
#We had best luck with TIF file
#Change band names to match variables
r_preds = pml.Raster("SET YOUR PATH/stack2.tif")
print(r_preds.names)
r_preds.rename({'stack2_1':"slp",
'stack2_2':"sp21",
'stack2_3':"sp11",
'stack2_4': "sp7",
'stack2_5':"rph21",
'stack2_6':"rph11",
'stack2_7':"rph7",
'stack2_8':"diss21",
'stack2_9':"diss11",
'stack2_10':"diss7",
'stack2_11':"slpmn21",
'stack2_12':"slpmn11",
'stack2_13':"slpmn7",
'stack2_14':"sei",
'stack2_15':"hli",
'stack2_16':"asp_lin",
'stack2_17':"sar",
'stack2_18':"ssr21",
'stack2_19':"ssr11",
'stack2_20':"ssr7",
'stack2_21':"crossc21",
'stack2_22':"crossc11",
'stack2_23':"crossc7",
'stack2_24':"planc21",
'stack2_25': "planc11",
'stack2_26':"planc7",
'stack2_27':"proc21",
'stack2_28':"proc11",
'stack2_29':"proc7",
'stack2_30':"longc21",
'stack2_31':"longc11",
'stack2_32':"longc7",
'stack2_33':"us_dist",
'stack2_34':"state_dist",
'stack2_35':"local_dist",
'stack2_36':"strm_dist",
'stack2_37':"strm_cost",
'stack2_38':"us_cost",
'stack2_39':"state_cost",
'stack2_40':"local_cost",
'stack2_41':"steve",
'stack2_42':"dspm",
'stack2_43':"drain"}
)
print(r_preds.names)
#Predict class probabilities for each raster cell
result = r_preds.predict_proba(estimator=pickle_model)
#Write result to file
result.write("SET YOUR PATH/slp_pred.tif")
#Read in and plot resulting prediction
#First band is predicted probability for not slope failure, second band is for slope failure
m_result = rio.open("SET YOUR PATH/slp_pred.tif")
m_result_arr = m_result.read(2)
plt.rcParams['figure.figsize'] = [10, 8]
plt.imshow(m_result_arr, cmap="YlOrRd", vmin=0, vmax=1)
| true |
8bf1b318995c56191adac92be67f05592ffedd7d | Python | Aura-Zlata/Instructiunea-WHILE | /WHILE1.gyp | UTF-8 | 335 | 3.875 | 4 | [] | no_license | """
Se introduc succesiv numere nenule până la introducerea numărului 0.
Să se afişeze suma tuturor numerelor introduse.
Exemplu: Date de intrare 3 5 4 2 0 Date de ieşire 14.
"""
n=eval(input('Introduceti un numar:'))
s=0
while n!=0:
s+=n
n=eval(input('Introduceti un numar:'))
print('Suma numerelor este:',s)
| true |
1b5e7d8e5fd6f20829914fef358151b9b2c734c4 | Python | edtechhub/gtasks | /tasks/tests/test_process.py | UTF-8 | 3,422 | 2.6875 | 3 | [] | no_license | import unittest
from unittest.mock import Mock, patch
from tasks.process import _non_interactive, _filter_tasks
class TestNonInteractive(unittest.TestCase):
def actionator(self):
return Mock()
def piper(self):
return Mock()
@patch("tasks.process._print_single_task", new=Mock())
@patch("tasks.process._serialize_task")
def test_one_pipe_call_when_not_piping_serparately(self, serializer):
actionator = self.actionator()
piper = self.piper()
tasks = [1, 2, 3, 4, 5]
_non_interactive(tasks, actionator, piper, pipe_separately=False)
assert len(piper.pipe.mock_calls) == 1
@patch("tasks.process._print_single_task", new=Mock())
@patch("tasks.process._serialize_task")
def test_multiple_pipe_calls_when_piping_serparately(self, serializer):
actionator = self.actionator()
piper = self.piper()
tasks = [1, 2, 3, 4, 5]
_non_interactive(tasks, actionator, piper, pipe_separately=True)
assert len(piper.pipe.mock_calls) == 5
class TestFilterTasks(unittest.TestCase):
def task(self):
t = Mock()
t.title = "test"
t.notes = "test notes"
t.sub_tasks = []
return t
def test_strategy_not_found(self):
with self.assertRaises(ValueError):
_filter_tasks([], match='a', strategy='unknown')
class TestFilterTasksDefaultStrategy(TestFilterTasks):
def test_filter_empty_list(self):
tasks = _filter_tasks([], match="a")
assert tasks == []
def test_single_task_getting_filtered_out(self):
t = self.task()
tasks = _filter_tasks([t], match="a")
assert tasks == []
def test_single_task_getting_filtered_in(self):
t = self.task()
t.title = 'a'
tasks = _filter_tasks([t], match="a")
assert tasks == [t]
def test_children_getting_filtered_out(self):
t = self.task()
t.title = 'a'
t.sub_tasks = [self.task(), self.task()]
tasks = _filter_tasks([t], match="a")
assert tasks == [t]
assert tasks[0].sub_tasks == []
def test_single_child_filtered_in(self):
t = self.task()
t.title = 'a'
c = self.task()
c.notes = 'contains an a'
t.sub_tasks = [c, self.task()]
tasks = _filter_tasks([t], match="a")
assert tasks == [t]
assert tasks[0].sub_tasks == [c]
class TestFilterTasksParentMatchStrategy(TestFilterTasks):
def test_children_are_kept_if_only_parent_matches(self):
t = self.task()
t.title = 'a'
c = self.task()
c.title = 'b'
t.sub_tasks = [c, c]
tasks = _filter_tasks([t], match="a", strategy="parent-match")
assert tasks == [t]
assert tasks[0].sub_tasks == [c, c]
def test_children_are_kept_if_they_match_too(self):
t = self.task()
t.title = 'a'
c = self.task()
c.title = 'a'
t.sub_tasks = [c, c]
tasks = _filter_tasks([t], match="a", strategy="parent-match")
assert tasks == [t]
assert tasks[0].sub_tasks == [c, c]
def test_child_matches_but_not_parent(self):
t = self.task()
t.title = 'a'
c = self.task()
c.title = 'b'
t.sub_tasks = [c, c]
tasks = _filter_tasks([t], match="b", strategy="parent-match")
assert tasks == []
| true |
7889de254558cd5950096ed1b74819afbe14f8aa | Python | SHLo/algorithms | /median-maintenance/utils.py | UTF-8 | 1,733 | 3.484375 | 3 | [] | no_license | import sys
def parse(file_name):
nums = []
with open(file_name) as lines:
for line in lines:
nums.append(int(line))
return nums
class Heap():
def __init__(self, select):
self.nums = []
self.select = select
def __len__(self):
return len(self.nums)
def root(self):
_root = None
if len(self.nums):
_root = self.nums[0]
return _root
def add(self, num):
nums = self.nums
nums.append(num)
self._up_heapify(len(nums) - 1)
def _up_heapify(self, pos):
nums = self.nums
select = self.select
if pos == 0:
return
parent = (pos - 1) / 2
if select(nums[pos], nums[parent]) != nums[parent]:
self._swap(pos, parent)
self._up_heapify(parent)
def _swap(self, pos_1, pos_2):
nums = self.nums
nums[pos_1], nums[pos_2] = nums[pos_2], nums[pos_1]
def extract_root(self):
nums = self.nums
root = nums[0]
nums[0] = nums.pop()
self._down_heapify(0)
print('extract_root: ', nums)
return root
def _down_heapify(self, pos):
nums = self.nums
select = self.select
left = pos * 2 + 1
right = pos * 2 + 2
if pos >= len(nums) / 2:
return
elif left == len(nums) - 1:
candidate = left
else:
if select(nums[left], nums[right]) == nums[left]:
candidate = left
else:
candidate = right
if select(nums[candidate], nums[pos]) != nums[pos]:
self._swap(candidate, pos)
self._down_heapify(candidate)
| true |
d2347c7287e356092e4889fa1f21f09d0516d700 | Python | ripiuk/sound-recording-studio-expert | /experts/audio_interface_expert.py | UTF-8 | 1,030 | 3.09375 | 3 | [] | no_license | from experts import base
class AudioInterface(base.Expert):
data_file_name = 'audio_interface'
def run_in_console(self):
for q_num, question in enumerate(self.questions):
answer = input(f'{q_num}. {question} (1: no, 2: probably no, 3: do not know, 4: probably, 5: yes) -> ')
if answer == '1':
self.handle_answer(q_num, 0)
elif answer == '2':
self.handle_answer(q_num, 1)
elif answer == '3':
continue
elif answer == '4':
self.handle_answer(q_num, 3)
elif answer == '5':
self.handle_answer(q_num, 4)
else:
break
for el in self.outcomes:
print(el['producer'], el['priori_probability'])
result = self.get_result()
print(f"\nResult:\nProducer: {result.get('producer')}\nModel: {result.get('model')}\n")
if __name__ == "__main__":
sound_card = AudioInterface()
sound_card.run_in_console()
| true |
7f8d12599508ad15d54cef69ac476df424bed366 | Python | ddieruf/engine-simulator | /device/listener.py | UTF-8 | 1,626 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python3
import requests
import util
import sys
from pprint import pprint
_thread = None
def start(pubsub):
global _thread
print("Starting listener")
sampleRate = util.getSampleRate()#in seconds
pubsub.subscribe(**{util.getSubscriptionName(): messageHandler})
_thread = pubsub.run_in_thread(sleep_time=sampleRate)
print("Listener started")
return
def stop():
print("Stopping listener")
global _thread
if _thread is not None:
_thread.stop()
print("Listener stopped")
return
def messageHandler(message):
#print(message["data"])
#POST to server
headers = {'content-type': 'application/json'}
try:
r = requests.post(util.getServiceEndpoint() + "/sensordata",headers=headers, data=message["data"])
except Exception as error:
print("Error posting data to server")
pprint(vars(error))
print(vars(r.request))
pprint(vars(r))
else:
if r.status_code is not 200:
print("Data was not posted to endpoint")
print(vars(r.request))
pprint(vars(r))
else:
pass
return
def test_listener():
#check endpoint
print("Starting listener test")
try:
print(" Sending GET to " + util.getServiceEndpoint() + "/sensordata")
r = requests.get(util.getServiceEndpoint() + "/sensordata")
except Exception as error:
pprint(vars(error))
raise Exception("Error testing listener endpoint")
else:
if r.status_code is not 200:
print(vars(r.request))
pprint(vars(r))
raise Exception("Endpoint did not pass healthcheck")
else:
print(" 200 Passed")
print("End listener test")
return | true |
7353f3a0fa8222c53a6698f099839171369cb4e1 | Python | jirifilip/pyIDS | /pyids/model_selection/grid_search.py | UTF-8 | 1,500 | 2.6875 | 3 | [
"MIT"
] | permissive | import numpy as np
import itertools
from typing import Tuple, Dict
from .param_space_optimizer import ParameterSpaceOptimizer
class GridSearch(ParameterSpaceOptimizer):
def __init__(
self,
func,
func_args_spaces: Dict[str, Tuple[int, int]],
max_iterations=500
):
self.func = func
self.func_args_spaces = func_args_spaces
self.max_iterations = max_iterations
self.procedure_data = []
param_spaces = []
for arg_name, arg_space in self.func_args_spaces.items():
param_spaces.append(arg_space)
self.params_array_generator = itertools.product(*param_spaces)
def fit(self):
self.score_params_dict = dict()
parameter_names = list(self.func_args_spaces.keys())
current_iteration = 0
for lambda_params in self.params_array_generator:
current_lambda_params = dict(zip(parameter_names, lambda_params))
score = self.func(current_lambda_params)
self.score_params_dict.update({score: dict(params=lambda_params, score=score)})
if current_iteration >= self.max_iterations:
break
current_iteration += 1
maximum_score = max(self.score_params_dict.keys())
self.best_params = self.score_params_dict[maximum_score]
return self.best_params | true |
f96824e8537fe63f8ca868e0fbfc4a74086c9b96 | Python | RodoDenDr0n/UCU_labs | /Lab 3/sum_7.py | UTF-8 | 89 | 3.25 | 3 | [] | no_license | n = int(input())
result = 0
for i in range(0, n+1, 7):
result += i
print(result)
| true |
4a136a92620c2669315040d019277dc04de67786 | Python | infomotin/Neural-Networks-from-Scratch-in-Python | /src/pandas/exele_reader.py | UTF-8 | 683 | 2.796875 | 3 | [] | no_license | import pandas as pd
df = pd.read_csv('./src/pandas/data.csv')
# print(df.shape)
# row, col = df.shape
# print(row, ' ', col)
# print(df[5:80])
# print(df.columns)
# print(df["EMP_NAME"])
# print(df.EMP_NAME)
# print(df.head())
# print(df[['CARD_NO', 'EMP_NAME', 'DESIG_NAME', 'GROSS_SAL', 'BANK_AMT']])
# print((to_int(df['BANK_AMT']).mean()))
# print(df.describe())
# print(int(df[df["GROSS_SAL"]]) > 30000)
# print(df[df["GROSS_SAL"].str.replace(',', '')] > 30000)
# print(df.loc[:8])
# print(df.iloc[:8])
# df.set_index("Test_Set")
# print(df.index)
print(df.head())
df.to_csv('./src/pandas/wr.csv')
# df.set_index("name", inplace=True)
print(df.head())
df1 = pd.Series()
| true |
6f0ae9ea062edeea73881d685c2474110e79407f | Python | sshukla31/leetcode | /binary_search_tree_iterator.py | UTF-8 | 2,025 | 4 | 4 | [] | no_license | '''
Implement an iterator over a binary search tree (BST). Your iterator will be initialized with the root node of a BST.
Calling next() will return the next smallest number in the BST.
Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.
'''
from commons.binary_tree import Node
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.stack = []
self.add_nodes(root) # pre-compute only left nodes of left sub-tree
def hasNext(self):
"""
:rtype: bool
"""
return True if self.stack else False
def next(self):
"""
:rtype: int
"""
if self.stack:
node = self.stack.pop()
self.add_nodes(node.right)
return node.data
else:
return None
def add_nodes(self, root):
while root:
self.stack.append(root)
root = root.left
class BSTIterator2(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.stack = []
self.add_nodes(root) # pre-compute every node
def hasNext(self):
"""
:rtype: bool
"""
return True if self.stack else False
def next(self):
"""
:rtype: int
"""
if self.stack:
return self.stack.pop().data
else:
return None
def add_nodes(self, root):
if root is not None:
self.add_nodes(root.left)
self.stack.append(root)
self.add_nodes(root.right)
if __name__=='__main__':
root = Node(4);
root.left = Node(2);
root.right = Node(6);
root.left.left = Node(1);
root.left.right = Node(3);
root.right.left = Node(5);
root.right.right = Node(7);
# bti = BSTIterator(root)
bti = BSTIterator2(root)
while bti.hasNext():
print bti.next()
| true |
14482e601fd9f4d05c788d7496b3aaff07915807 | Python | K-Jay9/daily_expense_tracker | /app.py | UTF-8 | 5,764 | 2.765625 | 3 | [] | no_license | from tkinter import Tk, BOTTOM, LEFT, RIGHT, CENTER, TOP, BOTH, font, Label, Entry, Listbox, END, StringVar, PhotoImage
from tkinter.ttk import Frame, Style, Button
from json import load, dump
# Importing the utility functions
from utility import get_time
# Importing the constants
from utility import NAME, DAILY, WEEKLY, MONTHLY, GEO, myfont, menu_font, money, tran, number_font, stat_font, total, theme, blue, green, red
# Code Constants and variables
mylist = None
tot = None
'''
The backend of the application
'''
def get():
# get the entered amount and set the input field to default
n = amnt.get()
amnt.set('Enter Amount')
nt = note.get()
note.set('Enter Note')
t = get_time()
# create a new dictionary of the entered amount and a timestamp as a key
record = { f"{t}" : [f"{n}", f"{nt}"]}
# Append to transactions.json file
with open('./transactions.json', 'r+') as f:
data = load(f)
data['Records'].append(record)
# Get the new cash after the transaction
data['Cash'] = str(int(data['Cash']) + int(n))
# set the money variable to the new cash
global money
money = data['Cash']
# move the cursor to the beginning of the file
f.seek(0)
# update the data in the file with the new details
dump(data,f)
#The decorated string
string = f"{t} {int(n)} {nt}"
# insert the transaction to the UI and the new cash
mylist.insert(END,string)
tot.config(text=str(money))
# Getting the data from the Json file
def get_data():
with open('./transactions.json') as f:
data = load(f)
# Place the acquired data into variables
dic = data['Records']
global money
money = data['Cash']
return dic
'''
This is the 'Frontend' / UI of the application.
'''
# Load all the widgets and window features of the app
def initUI(root):
# Adding the title to the window
root.title(NAME)
# Initilise the Geometry
root.geometry(GEO)
# Get the data from the json file
d = get_data()
# The menu bar frame
menu_bar(root)
body(root)
scroll(root, d)
# The bottom frame where transaction is added
bt_frame(root)
# The menu of the app
def menu_bar(root):
# The menu bar frame
top_frame = Frame(root)
top_frame.pack(fill=BOTH)
# The Daily menu button
Button(top_frame, text=DAILY, style="C.TButton").pack(side=LEFT, fill=BOTH, expand=True)
# The Weekly menu button
Button(top_frame, text=WEEKLY, style="C.TButton").pack(side=LEFT, fill=BOTH, expand=True)
# The Mothly menu button
Button(top_frame, text=MONTHLY, style="C.TButton").pack(side=LEFT, fill=BOTH, expand=True)
# The layout of the body
def body(root):
top_frame = Frame(root)
top_frame.pack(fill=BOTH)
# The Balance section
# The amount number label
global tot
tot = Label(top_frame,text=money, padx=20,pady=20, fg=green, bg='white', font =number_font)
tot.pack(side=RIGHT)
# The currency label
Label(top_frame, text='Ksh',padx=20,pady=20, fg='white', bg=theme, font=myfont).pack(side=RIGHT)
# The transactions label
Label(top_frame, text='Transactions', bg=theme, fg=blue, font=myfont, pady=10).pack(side=LEFT, padx=10)
# The transactions view where our Json is displayed
def scroll(root, data):
# The middle section full of transactions
bod = Frame(root)
bod.pack(fill=BOTH, expand=True)
# The listbox for the transactions
# set the global variable to be equal to the Listbox for global manipulation
global mylist
mylist = Listbox(bod, font=tran, fg='maroon',bg='white')
# Add the transactions from the json to the listbox
for i in data:
for a,b in i.items():
string = f"{str(a)} {int(b[0])} {b[1]}"
mylist.insert(END, string)
mylist.pack(fill=BOTH,padx=20,pady=10, expand=True)
# The Entry frame at the bottom that adds to the transactions
def bt_frame(root):
f = Frame(root, padding='0.3i')
f.pack(side=BOTTOM, fill=BOTH)
# The input field
g = Entry(f, font=number_font, textvariable=amnt, justify=CENTER)
g.pack(side=LEFT, padx = 3, ipady=6)
e = Entry(f, font=number_font, textvariable=note, justify=CENTER)
e.pack(side=LEFT, padx = 3, ipady=6)
Button(f, text='Add', style="C.TButton", command=get).pack(fill=BOTH)
# The styling or the buttons and the theme
def styling(root):
# tell tcl where to find the awthemes packages
root.tk.eval("""
set base_theme_dir awthemes/
package ifneeded awthemes 10.2.1 \
[list source [file join $base_theme_dir awthemes.tcl]]
package ifneeded colorutils 4.8 \
[list source [file join $base_theme_dir colorutils.tcl]]
package ifneeded awdark 7.11 \
[list source [file join $base_theme_dir awdark.tcl]]
""")
# remove maximize/mininize button
root.resizable(0,0)
# load the awdark and awlight themes
root.tk.call("package", "require", 'awdark')
# Initialising Style and loading in a dark thene
style = Style()
style.theme_use('awdark')
#configure buttons
style.configure("TButton", padding=6, relief="flat", font=menu_font)
# button active/hover tweaks
style.map("C.TButton",
foreground=[('pressed', 'black'), ('active', 'black')],
background=[('pressed', '!disabled', 'gray'), ('active', 'gray')]
)
# Initialising the window
window = Tk()
amnt = StringVar(window, value='Enter Amount')
note = StringVar(window, value='Enter Note')
# Add the Theme initilise the UI
styling(window)
initUI(window)
window.mainloop()
| true |
5bf0385002cc0b7cd209e002a42fb72a26fb39d9 | Python | Scottars/nis_website | /dataservice/zmq/zmq_pub_xsub_xpub_example/Publisher.py | UTF-8 | 2,841 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# Publisher.py
import csv
import time
import argparse
from dataservice.zmq.zmq_pub_xsub_xpub_example.utl import get_broker,get_publisher,get_subscriber
import zmq
def get_publisher(address, port):
context = zmq.Context()
socket = context.socket(zmq.PUB)
connect_addr = 'tcp://%s:%s' % (address, port)
socket.connect(connect_addr)
return socket
def get_subscriber(address, port, topics):
# Subscriber can register one more topics once
context = zmq.Context()
socket = context.socket(zmq.SUB)
connect_addr = 'tcp://%s:%s' % (address, port)
socket.connect(connect_addr)
if isinstance(topics, str):
socket.subscribe(topics)
elif isinstance(topics, list):
[socket.subscribe(topic) for topic in topics]
return socket
def get_broker(xsub_port, xpub_port):
context = zmq.Context()
xsub_socket = context.socket(zmq.XSUB)
xsub_addr = 'tcp://*:%s' % xsub_port
xsub_socket.bind(xsub_addr)
# make xsub receive any message
xsub_socket.send(b'\x01')
xpub_addr = 'tcp://*:%s' % xpub_port
xpub_socket = context.socket(zmq.XPUB)
xpub_socket.bind(xpub_addr)
# make xpub receive verbose messages
xpub_socket.setsockopt(zmq.XPUB_VERBOSE, 1)
# zmq.proxy(xsub_socket, xpub_socket)
return xsub_socket, xpub_socket
class Publisher(object):
def __init__(self, topic, broker_address, broker_port, data, rate):
'''
:param topic: the topic associated with messages
:param broker_address: broker public IP
:param broker_port: XSub port number
:param data: csv file path
:param rate: publishing rate, unit is second
'''
self.topic = topic
self.pub_socket = get_publisher(broker_address, broker_port)
self.data = data
self.rate = rate
def publish_data(self):
with open(self.data, newline='') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
row.insert(0, self.topic)
record = ','.join(row)
self.pub_socket.send_string(record)
print('[Publisher] Published message: %s' % record)
time.sleep(self.rate)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--topic', type=str, help='Topic')
parser.add_argument('-a', '--address', type=str, help='Broker public IP address')
parser.add_argument('-p', '--port', type=str, help='Broker XSub port number')
parser.add_argument('-f', '--file', type=str, help='Data file path')
parser.add_argument('-r', '--rate', type=int, help='Publishing rate in second')
args = parser.parse_args()
pub = Publisher(args.topic, args.address, args.port, args.file, args.rate)
pub.publish_data()
| true |
05dfc148672e73136d3c8440fecde409450177ba | Python | VirajParab/stock | /backend/stock/workers/get_cash_flow_statement.py | UTF-8 | 2,772 | 2.578125 | 3 | [
"MIT"
] | permissive | import logging
import pandas as pd
from stock.models import CashFlow
from stock.models import MyStock
from yahooquery import Ticker
logger = logging.getLogger("stock")
M = 10 ** 6
B = 10 ** 9
class MyCashFlowStatement:
def __init__(self, symbol):
self.stock = MyStock.objects.get(symbol=symbol)
def get(self):
s = Ticker(self.stock.symbol, timeout=15)
# all numbers convert to million
df = s.cash_flow(frequency="q")
if "unavailable" in df or "error" in df:
logger.error("{}: {}".format(self.stock.symbol, df))
return
# DB doesn't like NaN
df = df.where(pd.notnull(df), 0)
mapping = {
"beginning_cash": "BeginningCashPosition",
"ending_cash": "EndCashPosition",
"free_cash_flow": "FreeCashFlow",
"investing_cash_flow": "InvestingCashFlow",
"net_income": "NetIncome",
"operating_cash_flow": "OperatingCashFlow",
"da": "DepreciationAndAmortization",
"capex": "CapitalExpenditure",
"from_continuing_financing_activity": "CashFlowFromContinuingFinancingActivities",
"change_in_working_capital": "ChangeInWorkingCapital",
"stock_based_compensation": "StockBasedCompensation",
"change_in_cash_supplemental_as_reported": "ChangeInCashSupplementalAsReported",
"sale_of_investment": "SaleOfInvestment",
"purchase_of_investment": "PurchaseOfInvestment",
"common_stock_issuance": "CommonStockIssuance",
"repurchase_of_capital_stock": "RepurchaseOfCapitalStock",
"change_in_inventory": "ChangeInInventory",
"dividend_paid": "CashDividendsPaid",
"change_in_account_payable": "ChangeInAccountPayable",
"change_in_account_receivable": "ChangesInAccountReceivables",
"purchase_of_business": "PurchaseOfBusiness",
"net_other_financing_charges": "NetOtherFinancingCharges",
"net_other_investing_changes": "NetOtherInvestingChanges",
}
# enumerate data frame
for row in df.itertuples(index=False):
i, created = CashFlow.objects.get_or_create(
stock=self.stock, on=row.asOfDate.date()
)
for key, val in mapping.items():
try:
tmp = float(getattr(row, val))
except AttributeError:
tmp = 0
# if tmp is a large number, it's unlikely a rate,
# eg. tax rate, thus convert it to B.
if abs(tmp) > M:
tmp = tmp / B
# set value
setattr(i, key, tmp)
i.save()
| true |
a1f03272de902a93e817c58b216ff05424dc8d22 | Python | andresguaita/Codigos-de-prueba | /FP_tabla_multiplicar_con_funciones.py | UTF-8 | 682 | 4.53125 | 5 | [] | no_license | #Elaborar una función que muestre la tabla de multiplicar del valor que
#le enviemos como parámetro. Definir un segundo parámetro llamado termino
#que por defecto almacene el valor 10. Se deben mostrar tantos
#términos de la tabla de multiplicar como lo indica el segundo parámetro.
#Llamar a la función desde el bloque principal de nuestro
#programa con argumentos nombrados.
def tabla(numero, terminos=10):
for x in range(terminos):
va=x*numero
print(va,",",sep="",end="")
print()
# bloque principal
print("Tabla del 3")
tabla(3)
print("Tabla del 3 con 5 terminos")
tabla(3,5)
print("Tabla del 3 con 20 terminos")
tabla(terminos=20,numero=3)
| true |
aa53853c0c0c91e4d5bf9d61c4b202551c4f34ad | Python | aikermerry/tensenflow | /MNIST/forward.py | UTF-8 | 696 | 2.765625 | 3 | [] | no_license | ##定义前向传播过程
import tensorflow as tf
INPUT_NODE = 784
OUT_NODE = 10
LAYER1_NODE = 500
def forward(x,regularizer):
w1 = get_weight([INPUT_NODE,LAYER1_NODE],regularizer)
b1 =get_bias([LAYER1_NODE])
y1 = tf.nn.relu(tf.matmul(x,w1)+b1)
w2 = get_weight([LAYER1_NODE,OUT_NODE],regularizer)
b2 = get_bias([OUT_NODE])
y = tf.matmul(y1,w2)+b2
return y ,w1, w2
def get_weight(shape,regularizer):
w = tf.Variable(tf.truncated_normal(shape,stddev = 0.1))
if regularizer !=None: tf.add_to_collection("losses",tf.contrib.layers.l2_regularizer(regularizer)(w))
return w
def get_bias(shape):
b = tf.Variable(tf.zeros(shape))
return b
| true |
5a06707727e8022b7d5f39dbf99e357387f3ce96 | Python | misja-alma/aoc2020 | /day6.py | UTF-8 | 758 | 3.34375 | 3 | [] | no_license | def group_to_chars(group):
chars = set(group)
chars.discard('\n')
return chars
def group_to_shared_chars(group):
members = group.split("\n")
chars_per_member = list(filter(lambda s: len(s) > 0, map(group_to_chars, members)))
return set.intersection(*chars_per_member)
if __name__ == '__main__':
inputs = open('input_day6.txt', 'r')
groups = inputs.read().split("\n\n")
inputs.close()
chars_per_group = map(group_to_chars, groups)
len_per_group = map(len, chars_per_group)
print("Part 1: {}".format(sum(list(len_per_group))))
shared_chars_per_group = map(group_to_shared_chars, groups)
len_per_group = map(len, shared_chars_per_group)
print("Part 2: {}".format(sum(list(len_per_group))))
| true |
310ec75302ba04f17390e1d658de895e7ee03ea6 | Python | Rishabhh/LeetCode-Solutions | /Dynamic_programming/63_Unique_path_II.py | UTF-8 | 782 | 3 | 3 | [] | no_license | # O(M * N) run-time. O(N) space-complexity.
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
if not obstacleGrid or len(obstacleGrid) == 0 or obstacleGrid[0][0] == 1:
return 0
m, n = len(obstacleGrid), len(obstacleGrid[0])
prev_row, cur_row = None, [0] * n
cur_row[0] = 1
for i in range(m):
for j in range(n):
if obstacleGrid[i][j] == 0:
if prev_row:
cur_row[j] += prev_row[j]
if j > 0:
cur_row[j] += cur_row[j-1]
prev_row = cur_row
cur_row = [0] * n
return prev_row[-1]
| true |
45e1caa232c5a56d2aa20f0cc723014e860249b7 | Python | tallowen/whereisowen | /data_collector.py | UTF-8 | 2,523 | 2.65625 | 3 | [] | no_license | import json
import os
import calendar
from datetime import datetime
import requests
from requests_oauthlib import OAuth1
output_filename = os.environ.get('WHEREIS_OUTPUT_FILE', 'public/stories.json')
account_id = os.environ['ACCOUNT_ID']
consumer_key = os.environ['CONSUMER_KEY']
consumer_secret = os.environ['CONSUMER_SECRET']
access_token = os.environ['ACCESS_TOKEN']
access_token_secret = os.environ['ACCESS_TOKEN_SECRET']
twitter_oauth = OAuth1(consumer_key, consumer_secret,
access_token, access_token_secret)
twitter_api = 'https://api.twitter.com/1.1/%s'
def get_timestamp(twitter_timestamp):
twitter_datetime = datetime.strptime(twitter_timestamp, "%a %b %d %H:%M:%S +0000 %Y") # Tue Apr 26 08:57:55 +0000 2011
return calendar.timegm(twitter_datetime.utctimetuple())
def twitter_get(end_point):
if os.environ.get('OFFLINE', False):
print 'Using fake data' + '\n' * 2
with open('twitter_test_data.json') as f:
import json
return json.loads(f.read())
path = twitter_api % end_point
return requests.get(path, auth=twitter_oauth).json()
def filter_text(tweet):
"""
Return the text as we want it to be displayed.
Filters out shortened links and image links.
"""
text = tweet['text']
entities = tweet['entities']
if 'urls' in entities:
for url in entities['urls']:
text = text.replace(url['url'], url['display_url'])
if 'media' in entities:
for media_element in entities['media']:
if media_element['type'] == 'photo':
text = text.replace(media_element['url'], '')
return text
def get_stories():
stories = []
for tweet in twitter_get('statuses/user_timeline.json?user_id=%s' % account_id):
coordinates = None
if tweet['coordinates']:
coordinates = tweet['coordinates']['coordinates']
picture = None
if 'media' in tweet['entities']:
try:
picture = tweet['entities']['media'][0]['media_url_https']
except KeyError:
picture = None
stories.append({
'coordinates': coordinates,
'text': filter_text(tweet),
'image': picture,
'id': tweet['id'],
'time': get_timestamp(tweet['created_at'])
})
return stories
if __name__ == '__main__':
data = get_stories()
with open(output_filename, 'w') as f:
f.write(json.dumps(data))
print data
| true |
d4fb1e66751c5b924d93b79ad95f0f41a85fcc97 | Python | aptend/antlr4-learn | /CSV/tableListener.py | UTF-8 | 1,194 | 3 | 3 | [] | no_license | """
Visit a CSV parsed tree, and output a list of maps:
[
{Details=Mid Bonus, Month=June, Amount="$2,000"},
{Details=, Month=January, Amount=\"""zippo""\"},
{Details=Total Bonuses, Month="", Amount="$5,000"}
]
"""
from CSVListener import CSVListener
from CSVParser import CSVParser
class TableListener(CSVListener):
def __init__(self):
self.hdrs = []
self.curr_row_fields = []
self.table = []
def result(self):
return self.table
def exitText(self, ctx: CSVParser.TextContext):
self.curr_row_fields.append(ctx.TEXT().getText())
def exitString(self, ctx: CSVParser.StringContext):
self.curr_row_fields.append(ctx.STRING().getText())
def exitNone(self, ctx: CSVParser.NoneContext):
self.curr_row_fields.append(None)
def exitHdr(self, ctx: CSVParser.HdrContext):
self.hdrs = self.curr_row_fields[:]
def enterRow(self, ctx: CSVParser.RowContext):
self.curr_row_fields = []
def exitRow(self, ctx: CSVParser.RowContext):
if ctx.parentCtx.getRuleIndex() == CSVParser.RULE_hdr:
return
self.table.append(dict(zip(self.hdrs, self.curr_row_fields)))
| true |
decd279d7e817c8b06280c833c9ce97d9aa27533 | Python | TurtleLabs/MScA-Robotics-Capstone | /scav-hunt/scavbot.py | UTF-8 | 8,576 | 2.53125 | 3 | [
"MIT"
] | permissive | import os
import time
from datetime import datetime, date
from easygopigo3 import EasyGoPiGo3
import picamera
import glob
from scaveye import ObjectClassificationModel, ConeClassificationModel,take_picture, record_video
from coneutils import detect
class ScavBot:
def __init__(self, image_model_dir, cone_model_dir, image_dir, cone_image_dir, params, boundaries, log_dir='logs'):
self.gpg = EasyGoPiGo3()
self.dist_sensor = self.gpg.init_distance_sensor(port="AD1")
self.servo = self.gpg.init_servo("SERVO1")
self.servo.rotate_servo(100)
self.params = params
self.boundaries = boundaries
self.image_dir = image_dir
# Image Model
self.image_model = ObjectClassificationModel(
model_dir = image_model_dir,
image_dir = image_dir,
min_conf_threshold=0.3,
use_TPU=True)
# Cone Detection Model
self.cone_detection_model = ConeClassificationModel(
model_dir = cone_model_dir,
image_dir = cone_image_dir,
graph_name='cone_detect.tflite',
min_conf_threshold=0.3,
use_TPU=True)
# Log File
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.log_path = os.path.join(log_dir, 'log_'+ str(date.today())+'.txt')
def log(self, txt):
with open(self.log_path, 'a') as f:
f.write(txt)
f.write('\n')
def find_cone(self, color):
bounds = self.boundaries[color]
return detect.findCone(bounds)
def center_cone(self, color):
print('Finding {} cone'.format(color))
centered = False
current_degree = 0
while not centered:
time.sleep(.5)
cone_x = self.find_cone(color)
if cone_x is False:
if current_degree > 360:
return False
self.gpg.turn_degrees(-20)
current_degree += -20
if cone_x > .6:
self.gpg.turn_degrees(10)
elif cone_x < .4:
self.gpg.turn_degrees(-10)
else:
centered = True
print('Found {} cone!'.format(color))
return True
def find_cone_new(self, color,cones):
return detect.findcone_mod(color,cones)
def center_cone_with_tfmodel(self, color):
print('Finding {} cone'.format(color))
color_dict = {'blue':0,
'green':1,
'orange':2,
'purple':3,
'red':4,
'yellow':5}
conecolor_index = color_dict[color]
centered = False
current_degree = 0
cone_image_path = '/home/pi/Pictures/Cones/'+color+'/'
backup_image_path = '/home/pi/Pictures/Cones/backup/'+color+'/'
while not centered:
time.sleep(.5)
#cone_x = self.find_cone(color)
take_picture(cone_image_path)
cones = self.cone_detection_model.classify(cone_image_path)
# if cone_x is False:
# if current_degree > 360:
# return false
# self.gpg.turn_degrees(-20)
# current_degree += -20
#print(cones)
cone_x = self.find_cone_new(conecolor_index,cones)
print('Cone is at : ',cone_x)
if cone_x is False:
if current_degree > 360:
return false
self.gpg.turn_degrees(-20)
current_degree += -20
if cone_x > .65:
self.gpg.turn_degrees(10)
elif cone_x <.35:
self.gpg.turn_degrees(-10)
else:
centered = True
files = glob.glob(cone_image_path+'*')
filename = os.path.basename(files[0])
print('destination:{}'.format(backup_image_path+filename))
os.rename(files[0], backup_image_path+filename)
#os.remove(files[0])
print('Found {} cone!'.format(color))
return True
def drive_to_cone(self, color):
self.center_cone_with_tfmodel(color)
print('Driving to {} cone'.format(color))
# Drive to cone at full bore
self.gpg.set_speed(self.params['h_spd'])
ob_dist = self.dist_sensor.read_mm()
t0 = time.time()
while ob_dist >= self.params['cone_dist'] or ob_dist ==0:#sometimes distance sensor gives 0mm reading erroneously
self.gpg.forward()
ob_dist = self.dist_sensor.read_mm()
print('Distance to cone:',ob_dist)
# Every three seconds, recenter the cone
if time.time() - t0 > 3:
self.gpg.set_speed(self.params['m_spd'])
self.gpg.stop()
print('Recentering')
self.center_cone_with_tfmodel(color)
t0 = time.time()
self.gpg.stop()
print("Distance Sensor Reading: {} mm ".format(ob_dist))
# Back away to the exact distance at a slower speed
self.gpg.set_speed(self.params['l_spd'])
while ob_dist < self.params['radius']:
self.gpg.backward()
ob_dist = self.dist_sensor.read_mm()
self.gpg.stop()
print("MADE IT!")
def circum_navigate(self, color):
# Set the speed to medium speed
self.gpg.set_speed(self.params['m_spd'])
#print("I will now cicle the cone at {} mm ".format(self.params['radius']))
# Circumscibe a circle around the cone
# rotate gpg 90 degrees to prep for the orbit
self.gpg.turn_degrees(-90)
if color == 'red':
radius = 40
print('orbiting red cone at {}'.format(radius))
print("I will now cicle the cone at {} mm ".format(radius))
self.gpg.set_speed(self.params['h_spd']) #high speed for red cone
self.gpg.orbit(300, radius)
elif color == 'green':
radius = 50
print("I will now cicle the cone at {} mm ".format(radius))
self.orbit_and_take_picture(150, radius, color, turn_90=True)
self.orbit_and_take_picture(100, radius, color)
elif color == 'yellow':
radius = 60
print("I will now cicle the cone at {} mm ".format(radius))
self.orbit_and_take_picture(150, radius, color, turn_90=True)
self.orbit_and_take_picture(100, radius, color)
elif color =='purple':
radius = 70
print("I will now cicle the cone at {} mm ".format(radius))
self.orbit_and_take_picture(150, radius, color, turn_90=True)
self.orbit_and_take_picture(100, radius, color)
def orbit_and_take_picture(self, degrees, radius, color, turn_90=False):
self.gpg.orbit(degrees, radius)
picture_path = os.path.join(self.image_dir, color)
#TODO: make this path a default argument
video_path = '/home/pi/Videos/'
if not os.path.exists(picture_path):
os.makedirs(picture_path)
if not os.path.exists(video_path):
os.makedirs(video_path)
if turn_90:
self.gpg.turn_degrees(90)
drive_cm = 10
self.gpg.drive_cm(-drive_cm)
record_video(video_path,cone_color=color,duration=3)
cone_object = self.image_model.classify_video(video_path+color)
print('!!!!!!!!!!!!Behind {} cone I found {}'.format(color,cone_object))
self.gpg.drive_cm(drive_cm)
self.gpg.turn_degrees(-90)
else:
#TODO: What if turn_90 is false?
pass
def classify_and_log(self, color):
image_dir = os.path.join(self.image_model.image_dir, color)
classes, probs, objects = self.image_model.classify(image_dir)
txt = ','.join([str(datetime.now()), color, str(objects)])
self.log(txt)
print('Logged: ', txt)
return txt
def main(self, color):
self.center_cone(color)
self.drive_to_cone(color)
self.circum_navigate(color)
self.classify_and_log(color)
if __name__ == '__main__':
import config
from coneutils import calibrate
boundaries_dict = calibrate.load_boundaries('coneutils/boundaries.json')
bot = ScavBot(
image_model_dir='Sample_TFLite_model',
image_dir='/home/pi/Pictures/scav_hunt',
params=config.params,
boundaries = boundaries_dict
)
| true |
7441eb91b0d43c3c4f637f490b8fb4d3a52363a7 | Python | wbddxyz/python_sutff | /ex27_heights_tb (1).py | UTF-8 | 2,206 | 4.4375 | 4 | [] | no_license | '''
ex28_heights_tb.py
Tom Bain
03/11/20
J27C76 Software Design and Development
Problem: Find maximum, minimum and calculate an average.
Example of output:
Alice is the tallest at 194 cm
Jamie is the shortest at 145 cm
The average height is 174 cm
'''
# code from ex25
def get_index_maximum_value(data):
'''
Returns the index value of the largest element within the data array/list.
Assumes at least one element in the data.
Assumes no duplicates.
Assumes no sorting.
'''
max_value = data[0]
max_index = 0
for x in range(1, len(data)):
if (data[x] > max_value):
max_value = data[x]
max_index = x
return max_index
# code from ex26
def get_index_minimum_value(data):
'''
Returns the index value of the smallest element within the data array/list.
Assumes at least one element in the data.
Assumes no duplicates.
Assumes no sorting.
'''
min_value = data[0]
min_index = 0
for x in range(1, len(data)):
if (data[x] < min_value):
min_value = data[x]
min_index = x
return min_index
# code from ex22
def get_average(data):
'''
Returns the average element value of a array/list.
Assumes at least one element in the data.
'''
# first calculate the total
total = 0
for height in data:
total += height
# now calculate the average
average = total / len(data)
return average
def main():
names = ['Alice', 'Giovanni', 'Henry', 'Jamie', 'Karen',
'Lloyd', 'Michelle', 'Nicola', 'Roger', 'Marita']
heights = [194, 181, 161, 145, 175, 183, 178, 177, 165, 185]
# Find the tallest person
index_max_value = get_index_maximum_value(heights)
print(
f'{names[index_max_value]} is the tallest at {heights[index_max_value]} cm')
# Find the shortest person
index_min_value = get_index_minimum_value(heights)
print(
f'{names[index_min_value]} is the shortest at {heights[index_min_value]} cm')
# Calculate the average height
average = get_average(heights)
print(f'The average height is {average:.0f} cm')
if __name__ == '__main__':
main()
| true |
f73e0217992d2d6a89ccec840236f761e3ad7ee5 | Python | hiephm/Splendor-Bot | /Convenient Solver Stuff/MakeMove_Full.py | UTF-8 | 5,454 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 9 17:37:47 2019
@author: huber.288
"""
from InputVector import *
import numpy as np
import pickle
from NeuralNet import *
import torch
from copy import deepcopy
import os
cur_path = os.path.dirname(__file__)
new_path1 = os.path.relpath('..\\Game Data\\GemCombos.p', cur_path)
Gs=pickle.load(open( new_path1, "rb" ))
MType=pickle.load(open( cur_path + '\\MType_Full.p', "rb" ))
def MakeMove(Game,playern,NN,Levels):
Player = Game.player[playern]
IV=InputVector_Full(Game,102)
Out=NN(torch.FloatTensor(IV))
Out=np.array(Out)
GMoves=RankMoves(Out,Game,Player,playern)
if GMoves:
MakeMove_GMove(Game,playern,GMoves[0])
else:
values = Out[1:7]
gems=ExchangeGems(Game,playern,values)['Gems']
Game.TakeGems(playern,gems)
def ExchangeGems(Game,playern,values):
BestVal = 0
BestCombo = np.array([0,0,0,0,0,0])
gems = Game.player[playern].gems
for combo in Gs:
legal_combo = np.minimum(deepcopy(combo),Game.gems)
if max(legal_combo) > 1 and Game.gems[np.argmax(legal_combo)] < 4: #Don't bother checking 2 of same kind if not legal
continue
c = legal_combo + gems
for _ in range(4):
val = np.dot(c,values) #value of taking all gems
num = sum(c)
if num <= 10:
if val > BestVal:
BestVal = val
BestCombo = c - gems
break
LeastVal = 2
LeastInd = 0
for ind, n in enumerate(c):
if n>0 and values[ind]<LeastVal:
LeastVal = values[ind]
LeastInd = ind
change = np.zeros(6)
change[LeastInd] += 1
c -= change.astype(int)
Answer = {}
Answer['Gems']=BestCombo
Answer['Value']=BestVal
return Answer
def MakeMove_GMove(Game,playern,GMove):
if not GMove:
#print('No possible moves')
return Game
if GMove[0]==1:
Game.TakeGems(playern,GMove[2])
return Game
if GMove[0]>=2 and GMove[0]<=4:
Game.BuyCard(playern,GMove[1][0],GMove[1][1],GMove[2])
return Game
if GMove[0]==5:
Game.ReserveCard(playern,GMove[1][0],GMove[1][1],GMove[2])
return Game
def RankMoves(Out,Game,Player,playern): #Ranks top 10 moves based on "Out" values. For each move, entry 0 is move type, entry 1 is the card, entry 2 is the gems
Probs=Out[7:]
BestInds=np.argsort(Probs)[::-1]
GMoves=[]
for i in BestInds:
A=[]
A.append(MType[i])
if MType[i]==1: #Take Gems
gems=Gs[i-12]
if Game.CheckGems(playern,gems):
A.append([])
A.append(gems)
GMoves.append(A)
if MType[i]==2 and i-8<len(Game.cards[0]): #Buy Card
deckn=0
cardn=i-8
gems=np.maximum(Game.cards[deckn][cardn].cost - Player.bonuses, np.zeros(5)) #TODO: Allow purchase with gold
gems = list(gems)
gems.append(0)
gems=np.array(gems).astype(int)
if Game.CheckBuy(playern,deckn,cardn,gems):
A.append([deckn,cardn])
A.append(gems)
GMoves.append(A)
if MType[i]==3 and i-4<len(Game.cards[1]): #Buy Card
deckn=1
cardn=i-4
gems=np.maximum(Game.cards[deckn][cardn].cost - Player.bonuses, np.zeros(5))
gems = list(gems)
gems.append(0)
gems=np.array(gems).astype(int)
if Game.CheckBuy(playern,deckn,cardn,gems):
A.append([deckn,cardn])
A.append(gems)
GMoves.append(A)
if MType[i]==4 and i<len(Game.cards[2]): #Buy Card
deckn=2
cardn=i
gems=np.maximum(Game.cards[deckn][cardn].cost - Player.bonuses, np.zeros(5))
gems = list(gems)
gems.append(0)
gems=np.array(gems).astype(int)
if Game.CheckBuy(playern,deckn,cardn,gems):
A.append([deckn,cardn])
A.append(gems)
GMoves.append(A)
return GMoves
def MakeMove_TreeSearch(Game,playern,NN,Levels,TopMoves):
#Levels determines the number of moves to look ahead (-1 means none)
Player=Game.player[playern]
IV=InputVector_Full(Game,102)
Out=NN(torch.FloatTensor(IV))
Out=np.array(Out)
GMoves=RankMoves(Out,Game,Player,playern)
if Levels==-1: #A way to generalize to non-tree searched models
if GMoves:
return 0,GMoves[0],np.inf
else:
return 0,[],np.inf
BestScore=-1
BestTurnsToWin=np.inf
BestMove=[]
for i in range(int(np.min([TopMoves,len(GMoves)]))):
G=deepcopy(Game)
G=MakeMove_GMove(G,playern,GMoves[i])
score=G.player[playern].VPs
if score>=15:
TurnsToWin=1
else:
TurnsToWin=np.inf
if Levels!=0 and BestTurnsToWin>1 and score<15:
score,Move,TurnsToWin=MakeMove_TreeSearch(G,playern,NN,Levels-1,TopMoves)
TurnsToWin+=1
if TurnsToWin<BestTurnsToWin or (BestTurnsToWin == np.inf and score>BestScore):
BestScore=score
BestMove=GMoves[i]
BestTurnsToWin=TurnsToWin
return BestScore,BestMove,BestTurnsToWin
| true |
be6230417004b890dea0a9b36855f89b1ac53f40 | Python | gjwei/leetcode-python | /easy/Implement strStr().py | UTF-8 | 331 | 3.59375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
created by gjwei on 4/25/17
"""
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
return haystack.index(needle)
s = Solution()
print s.strStr('abvcs', 'g') | true |
2dd3c8b60afcce4e99c0f612d346240ccb1364b0 | Python | sanjkm/ProjectEuler | /p23/abundant_sum.py | UTF-8 | 969 | 3.109375 | 3 | [] | no_license | # abundant_sum.py
# Sum all numbers that cannot be expressed as the sum of
# 2 abundant numbers
import sys
sys.path.insert(0, '/home/osboxes/ProjEuler/Utilities')
from factors import sum_all_proper_divisors
def gen_abundant_sums (n, abundant_list, abundant_sum_list, max_num):
for num in abundant_list:
if (n+num) <= max_num:
abundant_sum_list.append (n + num)
max_num = 28123
abundant_list = []
abundant_sum_list = []
for i in range(1, max_num):
if sum_all_proper_divisors(i) > i: # abundant criterion
abundant_list.append(i)
gen_abundant_sums (i, abundant_list, abundant_sum_list, max_num)
abundant_sum_set = set (abundant_sum_list)
abundant_sum_list = list (abundant_sum_set)
print sum(range(max_num+1)) - sum (abundant_sum_list)
non_sum_list = []
for i in range(1, max_num+1):
if i not in abundant_sum_list:
# print i
non_sum_list.append(i)
print non_sum_list[20:40]
| true |
5a898d8a25179e60a3f865316765e3ac3e580f92 | Python | travisgoodspeed/goodwatch | /bin/batterylife.py | UTF-8 | 1,241 | 2.9375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/python3
## This is a quick little python script that takes the output of
## energytrace-util, averages the current consumption for all samples
## after ten seconds, and then predicts battery life from a 100mAH
## CR2016 battery.
## Usage: et 60 | python3 batterylife.py
import sys;
capacity=100 #mAH
ampsum=0.0;
ampcount=0;
lastamp=0;
#Ugly shotgun parser to ignore comments and early records.
for line in sys.stdin:
if line[0]=='#':
pass;
else:
words=line.split();
time=float(words[0]);
lastamp=amps=float(words[1]);
milliamps=amps*1000.0;
#We only count after the first 20 seconds, as booting takes 5 to 10 seconds.
if time>20.0:
ampcount=ampcount+1;
ampsum=ampsum+amps;
if ampcount>0:
microamp=lastamp*1000000.0;
print("%f µA final consumption"%microamp);
ampavg=ampsum/(ampcount*1.0);
milliamp=ampavg*1000.0;
microamp=ampavg*1000000.0;
print("%f µA average consumption"%microamp);
hours=100/milliamp;
days=hours/24.0;
years=days/365.2425;
months=years*12.0;
print("%f months of CR2016 battery life."%months);
if months<1:
print("%f hours of CR2016 battery life."%hours);
| true |
e6a62901efcac5a024d1db6458541ae1a2434279 | Python | Alex-Linhares/co.py.cat | /copycat/temperature.py | UTF-8 | 8,465 | 3.953125 | 4 | [
"MIT"
] | permissive | import math
class Temperature(object):
def __init__(self):
self.reset()
def reset(self):
self.actual_value = 100.0
self.last_unclamped_value = 100.0
self.clamped = True
self.clampTime = 30
def update(self, value):
self.last_unclamped_value = value
if self.clamped:
self.actual_value = 100.0
else:
self.actual_value = value
def clampUntil(self, when):
self.clamped = True
self.clampTime = when
# but do not modify self.actual_value until someone calls update()
def tryUnclamp(self, currentTime):
if self.clamped and currentTime >= self.clampTime:
self.clamped = False
def value(self):
return 100.0 if self.clamped else self.actual_value
def getAdjustedValue(self, value):
return value ** (((100.0 - self.value()) / 30.0) + 0.5)
"""
def getAdjustedProbability(self, value):
if value == 0 or value == 0.5 or self.value() == 0:
return value
if value < 0.5:
return 1.0 - self.getAdjustedProbability(1.0 - value)
coldness = 100.0 - self.value()
a = math.sqrt(coldness)
c = (10 - a) / 100
f = (c + 1) * value
return max(f, 0.5)
"""
def getAdjustedProbability(self, value):
"""
This function returns the probability for a decision.
Copied above.
Please look at the last line of it. Strangely, it was
return max(f, 0.5). Does that make sense? Let's compare
some results. Where it was (0.5), we obtained, for example:
iiijjjlll: 670 (avg time 1108.5, avg temp 23.6)
iiijjjd: 2 (avg time 1156.0, avg temp 35.0)
iiijjjkkl: 315 (avg time 1194.4, avg temp 35.5)
iiijjjkll: 8 (avg time 2096.8, avg temp 44.1)
iiijjjkkd: 5 (avg time 837.2, avg temp 48.0)
wyz: 5 (avg time 2275.2, avg temp 14.9)
xyd: 982 (avg time 2794.4, avg temp 17.5)
yyz: 7 (avg time 2731.9, avg temp 25.1)
dyz: 2 (avg time 3320.0, avg temp 27.1)
xyy: 2 (avg time 4084.5, avg temp 31.1)
xyz: 2 (avg time 1873.5, avg temp 52.1)
Now, let's see what return max(f, 0.0000) does:
wyz: 7 (avg time 3192.9, avg temp 13.1)
xyd: 985 (avg time 2849.1, avg temp 17.5)
yyz: 6 (avg time 3836.7, avg temp 18.6)
xyy: 1 (avg time 1421.0, avg temp 19.5)
xyz: 1 (avg time 7350.0, avg temp 48.3)
They *seem* better (in the strict sense that we've obtained both
lower T and more times of wyz.) But they're *not* statistically
significant (for 1000 runs).
Now... looking at the code... it seems to be a mess... what does
function f() even mean in intuitive terms?
Work it does, but dude... quite a hack.
Another run, with return f @line89:
wyz: 8 (avg time 4140.5, avg temp 13.3)
yyz: 6 (avg time 2905.2, avg temp 14.5)
xyd: 982 (avg time 3025.4, avg temp 17.6)
dyz: 4 (avg time 4265.0, avg temp 17.7)
Does it even matter? Another (quick) run, I think with return (0.5):
dyz: 1 (avg time 5198.0, avg temp 15.3)
wyz: 3 (avg time 4043.7, avg temp 17.1)
yyz: 9 (avg time 3373.6, avg temp 21.0)
xyd: 84 (avg time 5011.1, avg temp 23.3)
xyy: 3 (avg time 4752.0, avg temp 27.9)
Compared to return(0.99):
xyd: 1000 (avg time 1625.2, avg temp 17.3)
Comparing to return f --> Statistically significant.
Comparing to return(0.5) --> same, so this return value does something.
Now running return(0.0):
xyz: 3 (avg time 3996.7, avg temp 81.1)
dyz: 46 (avg time 5931.7, avg temp 82.6)
xd: 17 (avg time 6090.3, avg temp 83.8)
xyd: 934 (avg time 7699.8, avg temp 88.1)
It's bad overall, but at least it's statistically significant!
return (-f * (math.log2(f))) # Entropy test #1 (global).
wyz: 123 (avg time 5933.1, avg temp 16.5)
xyy: 200 (avg time 6486.7, avg temp 27.8)
yyz: 330 (avg time 6310.2, avg temp 38.5)
dyz: 75 (avg time 6393.3, avg temp 39.6)
yzz: 5 (avg time 4965.0, avg temp 59.3)
xyz: 160 (avg time 6886.2, avg temp 60.2)
xd: 4 (avg time 2841.0, avg temp 61.8)
dz: 3 (avg time 3721.0, avg temp 62.1)
xyd: 100 (avg time 5853.1, avg temp 67.5)
Here we get an intuitive result: entropy/uncertainty seems better at
exploring a whole range of possible solutions. It even seems, at least
to me, better than the distribution obtained by the original copycat.
instead of log2, trying ln --> return (-f * math.log(f)):
wyz: 78 (avg time 7793.7, avg temp 16.6)
xyy: 202 (avg time 9168.5, avg temp 27.5)
wxz: 1 (avg time 3154.0, avg temp 33.4)
dyz: 63 (avg time 7950.3, avg temp 41.7)
yyz: 217 (avg time 8147.4, avg temp 41.7)
xyz: 201 (avg time 7579.7, avg temp 62.5)
xxy: 1 (avg time 7994.0, avg temp 64.8)
yzz: 8 (avg time 4672.6, avg temp 65.7)
xd: 9 (avg time 9215.2, avg temp 68.1)
xyd: 217 (avg time 7677.9, avg temp 73.8)
dz: 3 (avg time 20379.0, avg temp 77.3)
(quickly) trying out (1-this_entropy_function):
xyd: 100 (avg time 2984.3, avg temp 18.2)
And that's beautiful! One wants an inverse function that punishes
exploration and creativity, that takes all the fluidity off
the system.
But somehow this completely messes up with abc abd iijjkk:
jijjkk: 66 (avg time 3200.1, avg temp 61.3)
iijjkk: 114 (avg time 5017.2, avg temp 63.5)
dijjkk: 23 (avg time 2209.0, avg temp 67.3)
iijjkl: 748 (avg time 3262.8, avg temp 70.0)
iijjkd: 49 (avg time 2315.9, avg temp 76.3)
Which leads me to suspect that someone may have overfitted the
model for either xyz or iijjkk or some other problem, and one
improvement there means disaster here.
Something tells me to invert again to 1-entropy... and bingo!
iijjll: 59 (avg time 797.4, avg temp 19.8)
iijjkl: 41 (avg time 696.1, avg temp 28.5)
My guess is that some code is prefering to find groups in the
opposite form that it likes finding the "symmetry/opposite"
concepts of the xyz problem.
Sould compare & contrast the unhappiness and relevance of both
the opposite/symmetry codelets and the grouping/chunking codelets.
My hunch is the sameness group code: something there that
interacts with Temperature is wicked, and should be relatively
easy to find the error.
Here's why: the following run was done on (1-entropy(f)):
mrrlll: 77 (avg time 2195.7, avg temp 41.4)
mrrd: 2 (avg time 1698.0, avg temp 42.6)
mrrkkl: 20 (avg time 1317.8, avg temp 46.6)
mrrkkd: 1 (avg time 1835.0, avg temp 48.6)
If (1-entropy(f)) binds the system into a tight corridor of possibilities,
then why does it easily get the samenessGroup right? If this is right,
then running just entropy(f) should have big trouble with samenessGroup.
Let's see:
nrrkkk: 11 (avg time 3637.8, avg temp 64.6)
drrkkk: 3 (avg time 5921.3, avg temp 66.2)
mrrkkd: 7 (avg time 6771.3, avg temp 74.6)
mrrkkl: 79 (avg time 3723.0, avg temp 74.9)
So there we are: the system is unable to find that change samenessGroup
to next letterCategory, so there ought to be something very different
in the code that:
* Interacts with Temperature (things like unhappiness, relevance, depth,
urgency, and whatever else interacts with T)
* something very close to samenessGroup... sameGroup, sameness,
sameNeighbors, etc... is encoded in a form that is *directly opposite*
to other concepts/categories/codlets, etc.
Need to play with this more... and WTF is f anyways?
"""
if value == 0 or value == 0.5 or self.value() == 0:
return value
if value < 0.5:
return 1.0 - self.getAdjustedProbability(1.0 - value)
coldness = 100.0 - self.value()
a = math.sqrt(coldness)
c = (10 - a) / 100
f = (c + 1) * value
return (0 + (-f * math.log2(f))) # max(f, 0.0000)
| true |
966415584d01e3a3f2c2552cf777e1095c5ac049 | Python | hosseinfani/learning_nn | /2013MikolovWV/src/dal/dal_mikolov_bi_sg.py | UTF-8 | 4,042 | 2.53125 | 3 | [] | no_license | import torch
from torch.utils.data import Dataset, DataLoader
from nltk import ngrams
from torch import optim
import scipy
from scipy import sparse
from datetime import datetime
import numpy as np
import sys
sys.path.extend(['../src'])
import params
class PrepareData(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return self.X.shape[0]
def __getitem__(self, idx):#default of torch: float32, default of np: float64 (double)
return torch.as_tensor(self.X[idx].toarray()).float(), torch.as_tensor(self.y[idx].toarray()).view(1,1).float()
def load_data(rawdata, sample=None):
try:
data = scipy.sparse.load_npz('./../data/mikolov_bi_sg_data.npz')
except:
titles = []
with open(rawdata, encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
words = line.split()
#vocabs = vocabs.union(set(words))
titles.append(words)
stream_tokens = [token for title in titles for token in title]
vocabs = list(dict.fromkeys(stream_tokens).keys())
with open('./../data/vocabs.txt', 'w', encoding='utf-8') as f:
for token in vocabs:
f.write(f'{token}\n')
params.wv['w'] = 2
bigrams = [ngram for ngram in ngrams(stream_tokens, params.wv['w'])]#pair of (i, j)
#future: n-grams to bigram pairs
#n2bi_grams = [ngram for title in titles for ngram in ngrams(title, params.wv['w'])]
# create batches
params.wv['v'] = len(vocabs)
training_size = len(bigrams)
if sample:
training_size = sample
#
#Buy more RAM!!
#data = torch.empty(training_size, 1 * params.wv['v'] + 1)#1-hot vector + label (index of next word)
#Sparse Matrix and bucketing
data = sparse.lil_matrix((training_size, 1 * params.wv['v'] + 1))
data_ = np.zeros((params.wv['b'], 1 * params.wv['v'] + 1))
j = -1
for i, grams in enumerate(bigrams):
if i >= training_size: break
X = np.zeros((1, len(vocabs)))
X[0, vocabs.index(grams[0])] = 1
y_index = vocabs.index(grams[1])
y = np.asarray([y_index]).reshape((1,1))
X_y = np.hstack([X, y])
try:
j += 1
data_[j] = X_y
except:
s = int(((i / params.wv['b']) - 1) * params.wv['b'])
e = int(s + params.wv['b'])
data[s: e] = data_
j = 0
data_[j] = X_y
if (i % params.wv['b'] == 0):
print(f'Loading {i}/{len(bigrams)} instances!{datetime.now()}')
if j > -1:
data[-j-1:] = data_[0:j+1]
scipy.sparse.save_npz('./../data/mikolov_bi_sg_data.npz', data.tocsr())
#Sparse Matrix => Slow!
# for i, grams in enumerate(bigrams):
# if i >= training_size: break
# # input
# X = sparse.csr_matrix((1, len(vocabs)))
# X[0, vocabs.index(grams[0])] = 1
#
# # label: in our case, we have |V|-classifier and the class# is the index of word in the vocab
# y_index = vocabs.index(grams[1])
#
# y = sparse.csr_matrix((1,1))
# y[0, 0] = y_index
# X_y = sparse.hstack([X, y])
#
# data[i] = X_y
# if (i%1000 == 0):
# print(f'Loading {i}/{len(bigrams)} instances!')
# don't have enough memory => bring it to batches
# if params.lm['g']:
# data.cuda()
return PrepareData(X=data[:, :-1], y=data[:, -1])
def load_vocabs():
with open('./../data/vocabs.txt', 'r', encoding='utf-8') as f:
vocabs = f.readlines()
return [word.strip() for word in vocabs]
if __name__ == "__main__":
load_data(rawdata='./../../2003BengioNLM/data/news_title_preprocess.txt', sample=None)
print(len(load_vocabs())) | true |
e491afc4c58dcba5553c1fe46a9a793d27016daf | Python | opcod/test | /选择结构.py | UTF-8 | 806 | 3.75 | 4 | [] | no_license | def op():
try:
age = int(input("您几岁了,请输入一个整数数字:"))
if 0 < age <= 9:
pr = "你还是个小孩儿"
print(pr)
elif 9 < age <= 20:
pr = "前途无量啊年轻人"
print(pr)
elif 20 < age <= 30:
pr = "年轻人好好工作"
print(pr)
elif age > 30 and age <= 100:
pr = "您的年龄有点大额"
print(pr)
elif age > 100 and age <= 120:
pr = "长命的人啊"
print(pr)
else:
print("你不是地球的吧")
except:
print("您输入的不是数字,请重新输入!")
return(op())
op()
#怎么是实现它可以循环输入的不是数字的话
#完成于20200306 18:40 | true |
2f94723ba08fae46322d36d31e732688e10276a7 | Python | LLCampos/tacoAtacoOOGroup3 | /ServicesList.py | UTF-8 | 5,273 | 3.296875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# 2015-2016 Complementos de Programacao
# Grupo 3
# 43134 Luís Filipe Leal Campos
# 48392 Mariana Vieira De Almeida Nave
from UserList import UserList
from Service import Service
from TimeTT import Time
from FileUtil import *
class ServicesList(UserList):
"""A collection of Services. The behaviour of this collection is similar to the one of the list type"""
# Index of element with driver's name in a line of a services file
INDEXDriverName = 0
# Index of element with vehicle's plate in a line of a services file
INDEXVehiclePlate = 1
# Index of element with clients's name in a line of a services file
INDEXClientName = 2
# Index of departure hour in a line of a services file
INDEXDepartureHour = 3
# Index of arrival hour in a line of a services file
INDEXArrivalHour = 4
# Index of circuit id in a line of a services file
INDEXCircuitId = 5
# Index of circuit kms in a line of a services file
INDEXCircuitKms = 6
# Index of driver's status in a line of a services file
INDEXDriverStatus = 7
# Index of driver's accumlated time in a line of a services file
INDEXAccumulatedTime = 8
# Index of element with car's autonomy in kms in a line of a services file
INDEXINDEXVehicAutonomy = 9
# Index of element with accumulated kms in a line of a services file
INDEXAccumulatedKms = 10
def __init__(self, file_name=None):
"""Creates a ServicesList composed by Services objects,
from a file with a list of services.
Requires: If given, file_name is str with the name of a .txt file containing
a list of services organized as in the examples provided in
the general specification (omitted here for the sake of readability).
Ensures:
if file_name is given:
a ServiceList, composed by objects of class Service that correspond to the services listed
in file with name file_name. In this ServiceList, drivers terminating their services earlier
have priority over the ones terminating later; lexicographic order of drivers's names
decides eventual ties in each case above.
if file_name is none:
a empty ServiceList.
"""
# creates empty ServicesList
UserList.__init__(self)
# if file_name is given, self is populated with Services corresponding to the
# services on the file file_name
if file_name is not None:
inFile = FileUtil(file_name)
for line in inFile.getContent():
servData = line.rstrip().split(", ")
servDriver = servData[ServicesList.INDEXDriverName]
servPlate = servData[ServicesList.INDEXVehiclePlate]
servClient = servData[ServicesList.INDEXClientName]
servDeparTime = Time(servData[ServicesList.INDEXDepartureHour])
servArrivalTime = Time(servData[ServicesList.INDEXArrivalHour])
servCircuit = servData[ServicesList.INDEXCircuitId]
servCircuitKms = servData[ServicesList.INDEXCircuitKms]
servDriverStatus = servData[ServicesList.INDEXDriverStatus]
newService = Service(servDriver, servPlate, servClient, servDeparTime, servArrivalTime, \
servCircuit, servCircuitKms, servDriverStatus)
self.append(newService)
def emptyServices(self, drivers, vehicles):
"""Creates an accessory ServicesList to be used in the first working period,
after attribution of vehicles to the available drivers.
Requires: drivers and vehicles are collections of drivers and vehicles, respectively.
Ensures: A ServicesList regarding the working period prior to the first of the day (ie 0709).
This will be useful if one considers the first working period of the day (0911),
where vehicles are not attributed to drivers and no service List is available.
Thus, vehicles, lexicographic sorted by plate, are attributed to drivers
according to their entry hour (and name, in case of tie). All the service-related information is
set as a "no service" (_no_client_, _no_circuit_, service kms = 0), Arrival and Departure
hours are set as the Driver's entry hour and being ready to work,
drivers' status is standby, of course!
"""
# sort drivers for the 1st period (0911) according to Drivers' EntryHour
# and in case of tie, Drivers' name
d = sorted(drivers.values())
v = sorted(vehicles.keys())
j = 0
for i in d:
driverName = i.getDriverName()
vehiclePlate = v[j]
serv = Service(driverName, vehiclePlate, "", i.getDriverEntryHour(), i.getDriverEntryHour(), "", "", "")
serv.noService()
self.append(serv)
j += 1
def __eq__(self, other_ServicesList):
pass
def __str__(self):
"""String representation of the ServiceList. Returns the driver's names."""
output = ""
for service in self:
output += service.getServiceDriver() + '\n'
# returns output without the last newline char
return output.strip()
| true |
0bac691335f7403e363d3fd20ff6e93ff8796df8 | Python | ljshLLW/homework | /h3/TestMaxSubArray.py | UTF-8 | 314 | 2.59375 | 3 | [] | no_license | import unittest
from MaxSubarray import MaxSubArray
class TestMaxSubArrayFunc(unittest.TestCase):
def test_Empty(self):
self.assertEqual(0,MaxSubArray([]))
def test_AB_BH(self):
self.assertEqual(7,MaxSubArray([7,-4,-6]))
if __name__ == '__main__':
unittest.main(verbosity=2)
| true |
771ac4c81d3db59d1fa964ddf482921a6a5a7597 | Python | anumike/dragon | /lesson.py | UTF-8 | 1,484 | 3.8125 | 4 | [] | no_license | import random
print("you in dark room misty castle.")
print("you have 3 door.plase choose one.")
playerChoice = input("choose1,2,3 or 4...")
if playerChoice == "1":
print("you find room with golden.you a rich man!")
print("you win!")
elif playerChoice == "2":
print("door open and angry trol hit you he's weapon.")
print("sorry, but you game over.")
elif playerChoice == "3":
print("you went to room and find a slippy dragon.")
print("you can do:")
print("1) roberry dragon golden")
print("2) walk to sun")
dragonChoice = input("chose 1 or 2")
if dragonChoice == "1":
print("sorry,but dragon get up and eat you and you yummy.")
print("sorry,but you game over.")
elif dragonChoice == "2":
print("you run of misty castel")
print("you win!")
else:
print("sorry but you don't write 1 or 2!")
elif playerChoice == "4":
print("you went to room with cat")
print("you should says number(1,10) that he expect")
number = int(input("your number is..."))
cat_number = random.randint(1, 10)
if number == cat_number:
print("cat says: it's true number")
print("you are free")
print("you win!")
else:
print("cat says:it's don't true number, my number is: %s" % cat_number)
print("fro now you don'tsee sun")
print("sorry,but you game over")
else:
print("sorry,but you don't write 1,2,3 or 4")
print("restart game and play")
| true |
c10bb7c8d891b2ddb0818697a9f5eb351d42c539 | Python | HLTCHKUST/Xpersona | /crosslingual/tools/lowercase_and_remove_accent.py | UTF-8 | 1,250 | 3.078125 | 3 | [
"MIT"
] | permissive | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import unicodedata
import six
def convert_to_unicode(text):
"""
Converts `text` to Unicode (if it's not already), assuming UTF-8 input.
"""
# six_ensure_text is copied from https://github.com/benjaminp/six
def six_ensure_text(s, encoding='utf-8', errors='strict'):
if isinstance(s, six.binary_type):
return s.decode(encoding, errors)
elif isinstance(s, six.text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
return six_ensure_text(text, encoding="utf-8", errors="ignore")
def run_strip_accents(text):
"""
Strips accents from a piece of text.
"""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
for line in sys.stdin:
line = convert_to_unicode(line.rstrip().lower())
line = run_strip_accents(line)
print(u'%s' % line.lower())
| true |
6c5eb99d1d9a0f6ac882439fcfc2fd7bf43b9273 | Python | jessekelly881/gitty | /gitter.py | UTF-8 | 1,546 | 3 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import os
# Random number generation. Used for num of commits per day
def cleanRandNum(n):
if(n < 1):
ret = 0
else:
ret = int(round(n,0))
return ret
def genRand():
mu, sigma = 8, 10
s = np.random.normal(mu, sigma)
return cleanRandNum(s)
# Commit dummy file
def commitDummyFile(year, month, day, num):
fname = "%s/%s/%s/%s" % (year, month, day, num)
GIT_COMMITTER_DATE = "%s-%s-%s 12:00:00" % (year, month, day)
GIT_AUTHOR_DATE = "%s-%s-%s 12:00:00" % (year, month, day)
GIT_MESSAGE = "%s-%s-%s-%s" % (year, month, day, num)
GIT_COMMIT_DATE = "%s-%s-%s 12:00:00" % (year, month, day)
command = "export GIT_COMMITTER_DATE='%s'; export GIT_AUTHOR_DATE='%s'; git add '%s' -f; git commit -m '%s' --date '%s'"% (
GIT_COMMITTER_DATE,
GIT_AUTHOR_DATE,
fname,
GIT_MESSAGE,
GIT_COMMIT_DATE
)
if not os.system(command) == 0:
# Check for failure and wait
return
# Create dummy file
def createDummyFile(year, month, day, num):
fname = "%s/%s/%s/%s" % (year, month, day, num)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, "w") as f:
f.write("DUMMY TEXT")
return
# Loop through days and create commits
for year in [2015, 2016]:
for month in range(1,13):
for day in range(1,32):
for n in range(0, genRand()):
createDummyFile(year, month, day, n)
commitDummyFile(year, month, day, n)
| true |
affdb215a4fd6e340561572f427769133f2f6dac | Python | guocheng45/Projects | /GTExercises/Test1/TkintsO.py | UTF-8 | 1,827 | 3.203125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from Tkinter import *
class Application():
def __init__(self,Mframe):
self.Mframe = Mframe
self.main_widget()
self.other_widget()
def main_widget(self):
self.top_lb = Label(self.Mframe,fg = "blue",bg = "green",text = "这里是你想要输入的标题",font = ('Tempus Sans ITC', 15))
self.top_lb.grid(row = 0,column = 0,padx = 15,pady = 2) #布局:行、列、控件周围x方向空白大小、控件周围y方向空白大小
def other_widget(self):
self.lb_frame = LabelFrame(self.Mframe,height=200,width=300)
self.lb_frame.grid(row=1,column=0,padx=15,pady=2)
self.lb1 = Label(self.lb_frame,text="用户名:")
self.lb1.grid(row=0,column=0,padx=15,pady=2)
#输入框
self.str_ent1 = StringVar()
self.enty1 = Entry(self.lb_frame,textvariable = self.str_ent1)
self.enty1.grid(row=0,column=1,padx=15,pady=2)
self.lb2 = Label(self.lb_frame,text="密 码:")
self.lb2.grid(row=1,column=0,padx=15,pady=2)
#输入框
self.str_ent2 = StringVar()
self.enty2 = Entry(self.lb_frame,textvariable = self.str_ent2,show="*")
self.enty2.grid(row=1,column=1,padx=15,pady=2)
#button按钮
self.bt1 = Button(text = "提交",width=10) #, command=self.runtime()#height是text行数;width是
self.bt1.grid(row=3,column = 0,padx=15,pady=2) #sticky="e"/w左对齐,e右对齐(west西左西,east东右东)
# def show_info(self):
# self.lsbox = labe
#
# def runtime(self):
# print (self.str_ent1.get())
# print (self.str_ent2.get())
MainFrame = Tk()
MainFrame.title("what do you want to say?")
Application(MainFrame)
MainFrame.mainloop()
| true |
631e23b71c9d4076c8a16e02f6022cde5f3e383b | Python | TearKnow/python | /demo/liaoxuefeng/06_4_decorator.py | UTF-8 | 158 | 2.859375 | 3 | [] | no_license |
def log(func):
def wrapper(*args, **kw):
print('class function')
return func(*args, **kw)
return wrapper
@log
def now():
print('2018-08-22')
now()
| true |
60192ccc0b0e061b985e79e2b3b3564e710edb1a | Python | hamcheeseburger/coding | /chapter3/3_4.py | UTF-8 | 919 | 3.65625 | 4 | [] | no_license | # p.99 1이 될 때까지
# N, K = map(int, input().split())
#
# count = 0
# # 1이하라면 빠져나오기
# while N > 1:
# # 나누어진다면 나누기부터
# if N % K == 0:
# N = N // K
# # 안된다면 -1
# else:
# N -= 1
#
# count += 1
#
# print(count)
# 아래는 효율적인 코드
N, K = map(int, input().split())
count = 0
while True:
# loop를 돌 때 마다 1씩 빼는 것이 아니고
# 나눌 수 있는 수 까지 한번에 빼버리는 것
# 예시 test case : N = 70 , K = 13
# loop를 5번 돌아서 13의 배수로 만드는 것이 아니고
# loop 한 번으로 13의 배수를 만드는 것
target = (N // K) * K
count += (N - target)
N = target
# 더 이상 나눌 수 없는 경우
if N < K:
break
# 실질적으로 나누는 부분
count += 1
N = N // K
count += (N - 1)
print(count) | true |
e0b497a943fc40a038fea2181be24eb7d87200c9 | Python | Leelow/upper-body-clustering | /lib/infoFile.py | UTF-8 | 983 | 2.953125 | 3 | [
"MIT"
] | permissive | from os.path import isfile, dirname, relpath
from lib.file import File
from _cluster import Cluster
class InfoFile:
def __init__(self, path):
if isfile(path) is False:
raise ValueError('The file "' + path + '" does not exist.')
self.path = path
self.directory = relpath(dirname(path), './')
# Get a cluster
def get_cluster(self):
cluster = Cluster(self.directory, '0')
# Open file
with open(self.path, 'r') as lines:
# Read lines
for line in lines:
# Extract values
values = line.split(' ')
cluster.add_file(File(self.directory,
values[0],
int(values[1]),
int(values[2]),
int(values[3]),
map(float, values[4:-1])))
return cluster
| true |
9aa1f79d9f775457a289f64178e54c0dc56eaec0 | Python | ggrabuskie/ros | /catkin_ws/src/rftest/scripts/talker.py | UTF-8 | 5,109 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that published std_msgs/Strings messages
## to the 'chatter' topic
import rospy
import serial
import struct
from time import sleep, time
from std_msgs.msg import String
from rftest.msg import Mobility
ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=None)
ser.setDTR(True) #if the extra pins on the ttl usb are connected to m0 & m1 on the ebyte module
ser.setRTS(True) #then these two lines will send low logic to both which puts the module in transmit mode 0
#take serial device, data to send
#return length of data sent OR -1 on error
def putRF(rf_uart, data): #arguments to make function more self-contained and function-like
rf_uart.setDTR(True) #if the extra pins on the ttl usb are connected to m0 & m1 on the ebyte module
rf_uart.setRTS(True) #then these two lines will send low logic to both which puts the module in transmit mode 0
print("put at")
print(time())
rf_uart.write(b's'+data+b'f') #start byte + payload + stop byte
rf_uart.flush() #waits until all data is written
print("wrote")
print(len(data))
return len(data)
#take serial device, size of expected data in bytes
#return data received, OR -1 on error
def getRF(rf_uart, size_of_payload): #added argument to make it more function-like
rf_uart.setDTR(True) #if the extra pins on the ttl usb are connected to m0 & m1 on the ebyte module
rf_uart.setRTS(True) #then these two lines will send low logic to both which puts the module in transmit mode 0
print("get at")
print(time())
print(rf_uart.in_waiting)
while True:
n = rf_uart.read(1) #read bytes one at a time
#if not n:
# print("nothing in n")
# return 0
if n == b's': #throw away bytes until start byte is encountered
data = rf_uart.read(size_of_payload) #read fixed number of bytes
n = rf_uart.read(1) #the following byte should be the stop byte
if n == b'f':
print('success')
print(data)
return data
else: #if that last byte wasn't the stop byte then something is out of sync
print("failure")
return -1
#else:
# continue #if first byte doesn't match keep tossing bytes til they match
return 0
def talker():
pub = rospy.Publisher('telecommand', Mobility, queue_size=1000)
rospy.init_node('receive', anonymous=True)
rate = rospy.Rate(1000000) # 1mhz
msg = Mobility()
while not rospy.is_shutdown():
data = getRF(ser, 9)
print("read")
print(data)
if((data != -1) and (data != 0)):
print("received")
mobility_string = struct.unpack('9b', data)
print('unpacked')
print(mobility_string)
print(time())
msg.left = mobility_string[0]
msg.right = mobility_string[1]
msg.j1 = mobility_string[2]
msg.j2 = mobility_string[3]
msg.j3 = mobility_string[4]
msg.j4 = mobility_string[5]
msg.j51 = mobility_string[6]
msg.j52 = mobility_string[7]
msg.mode = mobility_string[8]
rospy.loginfo(msg)
pub.publish(msg)
timestamp = 0
timestamp = time()
packed = struct.pack('1I', timestamp)
putRF(ser, packed)
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| true |
e74802c919a18491feca9343e4fa5b323b6ff06c | Python | jmontara/become | /Data Structures/Queues/queue_start.py | UTF-8 | 268 | 4.0625 | 4 | [] | no_license | # try out the python queue functions
from collections import deque
# create a new empty queue
queue = deque()
# add some elements
queue.append(1)
queue.append(2)
queue.append(3)
print queue
# pop an item from the right
x = queue.popleft()
print(x)
print(queue)
| true |
9521c3a33b27eb39e3460b19a1e6a0d52c244e69 | Python | benallard/quarto-gym | /gym_quarto/wrapper.py | UTF-8 | 1,871 | 2.765625 | 3 | [] | no_license | import random
from gym import Wrapper
from gym.envs.registration import register
class OnePlayerWrapper(Wrapper):
""" We emulate the second player so that each step is seen from the same player
"""
def __init__(self, env, other_player):
super(OnePlayerWrapper, self).__init__(env)
self.other_player = other_player
def reset(self):
obs = self.env.reset()
self.other_player.reset(self.game)
self.other_first = random.choice([True, False])
if self.other_first:
# Make the first step now
action, _ = self.other_player.predict(obs)
obs, _, done, _ = self.env.step(action)
return obs
def step(self, action):
obs, self_rew, done, info = self.env.step(action)
#self.render()
if done:
if info['invalid']:
# We just disqualified ourself
info['winner'] = 'Env'
else:
info['winner'] = 'Agent'
return obs, self_rew, done, info
# Let other play
action, _state = self.other_player.predict(obs)
obs, rew, done, info = self.env.step(action)
if done:
if info['invalid']:
# Other player made a bad move, don't reward the Agent
reward = 0
info['winner'] = 'Agent'
elif info['draw']:
# Same reward for both
reward = rew
info['winner'] = 'Draw'
else:
# If the second won the game, give negative reward to the agent
reward = -rew
info['winner'] = 'Env'
else:
reward = self_rew
info['winner'] = None
return obs, reward, done, info
def seed(self, seed):
self.other_player.seed(seed)
return [seed]
| true |
48fb38e499faab6591ade27c109ef28625476623 | Python | Alkhithr/Mary | /think-python/chapter13_data_structures/word_frequency_analysis.py | UTF-8 | 1,216 | 3.6875 | 4 | [] | no_license | # Write a program that
# reads a file,
# breaks each line into words,
# strips whitespace and punctuation from the words,
# and converts them to lowercase.
import string
import json
fin = open('pg55650.txt')
word_count = dict()
for line in fin:
for p in string.punctuation:
line = line.replace(p, '|')
for w in string.whitespace:
line = line.replace(w, '|')
words = line.split('|')
for word in words:
if len(word) > 1:
word = word.lower()
if word_count.get(word) is None:
word_count[word] = 1
else:
word_count[word] += 1
# get top 20
frequency_ordinal = []
for word in word_count:
if frequency_ordinal.count(word_count[word]) == 0:
frequency_ordinal.append(word_count[word])
frequency_ordinal.sort(reverse=True)
word_count_top_20 = dict()
for frequency in frequency_ordinal[0:20]:
for word in word_count:
if word_count[word] == frequency:
if word_count_top_20.get(word) is None:
word_count_top_20[frequency] = [word]
else:
word_count_top_20[frequency] += [word]
print(json.dumps(word_count_top_20, indent=2))
| true |
3cb1b8121073a0b5bba382c99cf4873a58d9c39e | Python | Indiana3/python_exercises | /wb_chapter5/exercise130.py | UTF-8 | 1,633 | 4.25 | 4 | [] | no_license | ##
# Read, tokenize and mark the unary operators
# in a mathematical expression
#
from exercise129 import tokenGenerator
## Identify unary operators "+" and "-" in a list of tokens
# @param t a list of tokens
# @return a new list where unary operators have been replaced
# by "u+" and "u-" respectively
#
def unaryIdentifier(t):
# Create a new list to store tokens
mod_tokens = []
# For each element of the tokens list
i = 0
while i < len(t):
# Check if the element is "+" or "-"
if t[i] == "+" or t[i] == "-":
# Check if it's the first element of the list or
# if preceded by an operator or an open parenthesis
if i == 0 or t[i-1] == "(" or \
t[i-1] == "[" or t[i-1] == "{" or \
t[i-1] == "+" or t[i-1] == "-" or \
t[i-1] == "*" or t[i-1] == "/" or \
t[i-1] == "**":
# Mark the operator with "u" char
mod_token = "u" + t[i]
mod_tokens.append(mod_token)
i = i+1
continue
# If the last conditions are false, add
# the element without modifications
mod_tokens.append(t[i])
i = i+1
return mod_tokens
# Read a string from user, tokenize it and
# mark the unary operators
def main():
exp = input("Please enter a valid mathematical expression: ")
# Display the tokens list
tokens = tokenGenerator(exp)
# print(tokens)
# Display the tokens list with unary operators marked (if any)
print(unaryIdentifier(tokens))
# Call the main function
if __name__ == "__main__":
main()
| true |
68a5e6cf27723c406448e9b159ad84b76011d927 | Python | strickvl/exercism | /python/hamming/hamming.py | UTF-8 | 296 | 3.6875 | 4 | [] | no_license | def distance(strand_a: str, strand_b: str) -> int:
if len(strand_a) != len(strand_b):
raise ValueError("the two strings must be of equal length")
inconsistent_elements = [1 for val in range(len(strand_a)) if strand_a[val] != strand_b[val]]
return sum(inconsistent_elements)
| true |
2da8e99e99bd67ab96741031ca9e3d77afc081f1 | Python | Lazysisphus/Zen-Thought-on-LeetCode | /2020名企高频面试题/数组/0054螺旋矩阵.py | UTF-8 | 1,037 | 3.078125 | 3 | [] | no_license | class Solution:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
def a_circle(start):
x_end = col - start - 1
y_end = row - start - 1
for j in range(start, x_end + 1):
res.append(matrix[start][j])
if y_end > start:
for i in range(start + 1, y_end + 1):
res.append(matrix[i][x_end])
if y_end > start and x_end > start:
for j in range(x_end - 1, start - 1, -1):
res.append(matrix[y_end][j])
if y_end - 1 > start and x_end > start:
for i in range(y_end - 1, start, -1):
res.append(matrix[i][start])
if not matrix or not matrix[0]:
return []
row = len(matrix)
col = len(matrix[0])
start = 0
res = []
while start * 2 < row and start * 2 < col:
a_circle(start)
start += 1
return res | true |
f61a0f2264d831a84c61433afe29c16e9aee7417 | Python | goshulina/docx2txt | /app.py | UTF-8 | 5,077 | 2.640625 | 3 | [] | no_license | from docx2txt.docx2txt import *
def numbering(num, lvl, NUM):
'''
определение метода нумерации списка и начального элемента, с которого начинается нумерация
'''
word_namespace = '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}'
wnum = word_namespace + 'num'
wabstractNumId = word_namespace + 'abstractNumId'
wabstractNum = word_namespace + 'abstractNum'
value = word_namespace + 'val'
numId = word_namespace + 'numId'
wlvl = word_namespace + 'lvl'
ilvl = word_namespace + 'ilvl'
wstart = word_namespace + 'start'
wnumFmt = word_namespace + 'numFmt'
wlvlText = word_namespace + 'lvlText'
abstractNumId = 'None'
numer = None
for num1 in NUM.getiterator(wnum):
if num1.get(numId) == num:
abstractNumId = num1.find(wabstractNumId).get(value)
break
for abstract in NUM.getiterator(wabstractNum):
if abstract.get(wabstractNumId) == abstractNumId:
for tagwlvl in abstract.getiterator(wlvl):
if tagwlvl.get(ilvl) == lvl:
numer = (tagwlvl.find(wnumFmt).get(value), tagwlvl.find(wstart).get(value),
tagwlvl.find(wlvlText).get(value))
break
break
if not numer:
numer = (None, None, None)
return numer
def process(docx, img_dir=None):
text = u''
# unzip the docx in memory
# zipf = zipfile.ZipFile(docx)
# filelist = zipf.namelist()
# get header text
# there can be 3 header files in the zip
# header_xmls = 'word/header[0-9]*.xml'
# for fname in filelist:
# if re.match(header_xmls, fname):
# text += xml2text(zipf.read(fname))
# get main text
# doc_xml = 'word/document.xml'
# text += xml2text(zipf.read(doc_xml))
text += xml2text(docx)
# get footer text
# there can be 3 footer files in the zip
# footer_xmls = 'word/footer[0-9]*.xml'
# for fname in filelist:
# if re.match(footer_xmls, fname):
# text += xml2text(zipf.read(fname))
# if img_dir is not None:
# # extract images
# for fname in filelist:
# _, extension = os.path.splitext(fname)
# if extension in [".jpg", ".jpeg", ".png", ".bmp"]:
# dst_fname = os.path.join(img_dir, os.path.basename(fname))
# with open(dst_fname, "w") as dst_f:
# dst_f.write(zipf.read(fname))
#
# zipf.close()
# return text.strip()
return text
def xml2text(path):
zipf = zipfile.ZipFile(path)
number_xml = 'word/numbering.xml'
doc_xml = 'word/document.xml'
try:
document_numbers = zipf.read(number_xml)
number_xml = ET.XML(document_numbers)
nmbr = 'not none'
except Exception:
nmbr = None
document_text = zipf.read(doc_xml)
zipf.close()
text = u''
root = ET.fromstring(document_text)
j = 0
lowletter = 1
previous_list = 1
lowerRoman = 1
for n, child in enumerate(root.iter()):
if child.tag == qn('w:numPr'):
lvl = child.find(qn('w:ilvl')).get(qn('w:val'))
num = child.find(qn('w:numId')).get(qn('w:val'))
if nmbr != None:
nmbr = numbering(num, lvl, number_xml)
current_list = int(num)
# print(num, lvl, nmbr, previous_list, current_list, j)
if nmbr[0] == 'bullet' or nmbr[0] == 'none':
text += "\t" * int(lvl) + str(nmbr[2]) + " "
# pass
elif nmbr[0] == 'decimal':
# Детектим первый элемент в новом списке
if previous_list < current_list:
j = 1
previous_list = current_list
text += "\t" * int(lvl) \
+ str(j) + ". "
lowletter = 1
elif previous_list == current_list and int(lvl) == 0:
j += + 1
text += "\t" * int(lvl) \
+ str(j) + ". "
elif nmbr[0] == 'lowerLetter':
if previous_list == current_list and int(lvl) != 0:
text += "\t" * int(lvl) + chr(int(nmbr[1]) + lowletter - 2 + ord('a')) + ". "
lowletter += 1
elif nmbr[0] is None:
pass
else:
pass
elif child.tag == qn('w:t'):
t_text = child.text
text += t_text if t_text is not None else ''
elif child.tag == qn('w:tab'):
text += '\t'
elif child.tag in (qn('w:br'), qn('w:cr')):
text += '\n'
elif child.tag == qn('w:p'):
text += '\n'
text = ''.join(text)
return text
xml2text(docx_path)
# TODO rewrite in module, add footer and header
| true |
08a4bc46d1dfba420def3c7ebc362298fc40bb3d | Python | tnakaicode/jburkardt-python | /matrix_exp/kms.py | UTF-8 | 12,498 | 3.421875 | 3 | [] | no_license | #! /usr/bin/env python
#
def kms ( alpha, m, n ):
#*****************************************************************************80
#
## KMS returns the KMS matrix.
#
# Formula:
#
# A(I,J) = ALPHA^abs ( I - J )
#
# Example:
#
# ALPHA = 2, N = 5
#
# 1 2 4 8 16
# 2 1 2 4 8
# 4 2 1 2 4
# 8 4 2 1 2
# 16 8 4 2 1
#
# ALPHA = 1/2, N = 5
#
# 1 1/2 1/4 1/8 1/16
# 1/2 1 1/2 1/4 1/8
# 1/4 1/2 1 1/2 1/4
# 1/8 1/4 1/2 1 1/2
# 1/16 1/8 1/4 1/2 1
#
# Properties:
#
# A is Toeplitz: constant along diagonals.
#
# A is symmetric: A' = A.
#
# Because A is symmetric, it is normal.
#
# Because A is normal, it is diagonalizable.
#
# A is persymmetric: A(I,J) = A(N+1-J,N+1-I).
#
# A is centrosymmetric: A(I,J) = A(N+1-I,N+1-J).
#
# A has an L*D*L' factorization, with L being the inverse
# of the transpose of the matrix with 1's on the diagonal and
# -ALPHA on the superdiagonal and zero elsewhere, and
# D(I,I) = (1-ALPHA^2) except that D(1,1)=1.
#
# det ( A ) = ( 1 - ALPHA^2 )^(N-1).
#
# The inverse of A is tridiagonal.
#
# A is positive definite if and only if 0 < abs ( ALPHA ) < 1.
#
# The family of matrices is nested as a function of N.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 17 February 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Numerical solution of the eigenvalue problem for Hermitian
# Toeplitz matrices,
# SIAM Journal on Matrix Analysis and Applications,
# Volume 10, Number 2, April 1989, pages 135-146.
#
# Parameters:
#
# Input, real ALPHA, the scalar that defines A.
# A typical value is 0.5.
#
# Input, integer M, N, the number of rows and columns of A.
#
# Output, real A(M,N), the matrix.
#
import numpy as np
a = np.zeros ( ( m, n ) )
for i in range ( 0, m ):
for j in range ( 0, n ):
if ( alpha == 0.0 and i == j ):
a[i,j] = 1.0
else:
a[i,j] = alpha ** ( abs ( i - j ) )
return a
def kms_determinant ( alpha, n ):
#*****************************************************************************80
#
## KMS_DETERMINANT computes the determinant of the KMS matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 17 February 2015
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, real ALPHA, the parameter.
#
# Input, integer N, the order of the matrix.
#
# Output, real VALUE, the determinant.
#
import numpy as np
if ( n == 1 ):
value = 1.0
else:
value = ( 1.0 - alpha * alpha ) ** ( n - 1 )
return value
def kms_determinant_test ( ):
#*****************************************************************************80
#
## KMS_DETERMINANT_TEST tests KMS_DETERMINANT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 17 December 2015
#
# Author:
#
# John Burkardt
#
import platform
from kms import kms
from r8_uniform_ab import r8_uniform_ab
from r8mat_print import r8mat_print
print ( '' )
print ( 'KMS_DETERMINANT_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' KMS_DETERMINANT computes the KMS determinant.' )
seed = 123456789
m = 5
n = m
r8_lo = 0.0
r8_hi = 1.0
alpha, seed = r8_uniform_ab ( r8_lo, r8_hi, seed )
a = kms ( alpha, m, n )
r8mat_print ( m, n, a, ' KMS matrix:' )
value = kms_determinant ( alpha, n )
print ( '' )
print ( ' Value = %g' % ( value ) )
#
# Terminate.
#
print ( '' )
print ( 'KMS_DETERMINANT_TEST' )
print ( ' Normal end of execution.' )
return
def kms_eigen_right ( alpha, n ):
#*****************************************************************************80
#
## KMS_EIGEN_RIGHT returns the right eigenvectors of the KMS matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 10 April 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Spectral decomposition of Kac-Murdock-Szego matrices,
# Unpublished technical report.
#
# Parameters:
#
# Input, real ALPHA, the parameter.
# Eigenvalue computations require 0 <= ALPHA <= 1.
#
# Input, integer N, the order of A.
#
# Output, real A(N,N), the right eigenvector matrix.
#
import numpy as np
t = kms_eigenvalues_theta ( alpha, n )
a = np.zeros ( ( n, n ) )
for i in range ( 0, n ):
for j in range ( 0, n ):
a[i,j] = np.sin ( ( float ) ( i + 1 ) * t[j] ) \
- alpha * np.sin ( float ( i ) * t[j] )
return a
def kms_eigenvalues ( alpha, n ):
#*****************************************************************************80
#
## KMS_EIGENVALUES returns the eigenvalues of the KMS matrix.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 10 April 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Spectral decomposition of Kac-Murdock-Szego matrices,
# Unpublished technical document.
#
# Parameters:
#
# Input, real ALPHA, the scalar that defines A.
# Eigenvalue computations require 0 <= ALPHA <= 1.
#
# Input, integer N, the order of the matrix.
#
# Output, real LAM(N), the eigenvalues.
#
import numpy as np
theta = kms_eigenvalues_theta ( alpha, n )
lam = np.zeros ( n )
for i in range ( 0, n ):
lam[i] = ( 1.0 + alpha ) * ( 1.0 - alpha ) \
/ ( 1.0 - 2.0 * alpha * np.cos ( theta[i] ) + alpha * alpha )
return lam
def kms_eigenvalues_theta ( alpha, n ):
#*****************************************************************************80
#
## KMS_EIGENVALUES_THETA returns data needed to compute KMS eigenvalues.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 10 April 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Spectral decomposition of Kac-Murdock-Szego matrices,
# Unpublished technical document.
#
# Parameters:
#
# Input, real ALPHA, the scalar that defines A.
# Eigenvalue computations require 0 <= ALPHA <= 1.
#
# Input, integer N, the order of the matrix.
#
# Output, real T(N), the angles associated with
# the eigenvalues.
#
import numpy as np
step_max = 100
t = np.zeros ( n )
for i in range ( 0, n ):
#
# Avoid confusion in first subinterval, where f(0) = 0.
#
if ( i == 0 ):
xa = 0.0001
else:
xa = float ( i ) * np.pi / float ( n + 1 )
fxa = kms_eigenvalues_theta_f ( alpha, n, xa )
xb = float ( i + 1 ) * np.pi / float ( n + 1 )
fxb = kms_eigenvalues_theta_f ( alpha, n, xb )
if ( 0.0 < fxa ):
temp = xa
xa = xb
xb = temp
temp = fxa
fxa = fxb
fxb = temp
for step in range ( 0, step_max ):
xc = 0.5 * ( xa + xb )
fxc = kms_eigenvalues_theta_f ( alpha, n, xc )
#
# Return if residual is small.
#
if ( abs ( fxc ) <= 0.0000001 ):
break
#
# Return if interval is small.
#
if ( abs ( xb - xa ) <= 0.0000001 ):
break
if ( fxc < 0.0 ):
xa = xc
fxa = fxc
else:
xb = xc
fxb = fxc
t[i] = xc
return t
def kms_eigenvalues_theta_f ( alpha, n, t ):
#*****************************************************************************80
#
## KMS_EIGENVALUES_THETA_F evaluates a function for KMS eigenvalues.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 10 April 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Spectral decomposition of Kac-Murdock-Szego matrices,
# Unpublished technical document.
#
# Parameters:
#
# Input, real ALPHA, the scalar that defines A.
# Eigenvalue computations require 0 <= ALPHA <= 1.
#
# Input, integer N, the order of the matrix.
#
# Input, real T, an angle associated with the eigenvalue.
#
# Output, real VALUE, the function value.
#
import numpy as np
n_r8 = float ( n )
value = np.sin ( ( n_r8 + 1.0 ) * t ) \
- 2.0 * alpha * np.sin ( n_r8 * t ) \
+ alpha * alpha * np.sin ( ( n_r8 - 1.0 ) * t )
return value
def kms_inverse ( alpha, n ):
#*****************************************************************************80
#
## KMS_INVERSE returns the inverse of the KMS matrix.
#
# Formula:
#
# if ( I = J )
# if ( I = 1 )
# A(I,J) = -1/(ALPHA^2-1)
# elseif ( I < N )
# A(I,J) = -(ALPHA^2+1)/(ALPHA^2-1)
# elseif ( I = N )
# A(I,J) = -1/(ALPHA^2-1)
# elseif ( J = I + 1 or I = J + 1 )
# A(I,J) = ALPHA/(ALPHA^2-1)
# else
# A(I,J) = 0 otherwise
#
# Example:
#
# ALPHA = 2, N = 5
#
# -1 2 0 0 0
# 2 -5 2 0 0
# 1/3 * 0 2 -5 2 0
# 0 0 2 -5 2
# 0 0 0 2 -1
#
# Properties:
#
# A is symmetric: A' = A.
#
# Because A is symmetric, it is normal.
#
# Because A is normal, it is diagonalizable.
#
# A is persymmetric: A(I,J) = A(N+1-J,N+1-I).
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 March 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Numerical solution of the eigenvalue problem for Hermitian
# Toeplitz matrices,
# SIAM Journal on Matrix Analysis and Applications,
# Volume 10, Number 2, April 1989, pages 135-146.
#
# Parameters:
#
# Input, real ALPHA, the scalar that defines A.
#
# Input, integer N, the order of A.
#
# Output, real A(N,N), the matrix.
#
import numpy as np
a = np.zeros ( ( n, n ) )
bot = alpha * alpha - 1.0
for i in range ( 0, n ):
for j in range ( 0, n ):
if ( i == j ):
if ( j == 0 ):
a[i,j] = - 1.0 / bot
elif ( j < n - 1 ):
a[i,j] = - ( alpha * alpha + 1.0 ) / bot
elif ( j == n - 1 ):
a[i,j] = -1.0 / bot
elif ( i == j + 1 or j == i + 1 ):
a[i,j] = alpha / bot
return a
def kms_plu ( alpha, n ):
#*****************************************************************************80
#
## KMS_PLU returns the PLU factors of the KMS matrix.
#
# Example:
#
# ALPHA = 0.5, N = 5
#
# P = Identity matrix
#
# L =
#
# 1 0 0 0 0
# 1/2 1 0 0 0
# 1/4 1/2 1 0 0
# 1/8 1/4 1/2 1 0
# 1/16 1/8 1/4 1/2 1
#
# U =
#
# 1 1/2 1/4 1/8 1/16
# 0 3/4 3/8 3/16 3/32
# 0 0 3/4 3/8 3/16
# 0 0 0 3/4 3/8
# 0 0 0 0 3/4
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 23 March 2015
#
# Author:
#
# John Burkardt
#
# Reference:
#
# William Trench,
# Numerical solution of the eigenvalue problem for Hermitian
# Toeplitz matrices,
# SIAM Journal on Matrix Analysis and Applications,
# Volume 10, Number 2, April 1989, pages 135-146.
#
# Parameters:
#
# Input, real ALPHA, the scalar that defines A.
# A typical value is 0.5.
#
# Input, integer N, the order of the matrix.
#
# Output, real P(N,N), L(N,N), U(N,N), the PLU factors.
#
import numpy as np
p = np.zeros ( ( n, n ) )
for j in range ( 0, n ):
p[j,j] = 1.0
l = np.zeros ( ( n, n ) )
l[0,0] = 1.0
for i in range ( 1, n ):
l[i,0] = alpha * l[i-1,0]
for j in range ( 1, n ):
for i in range ( j, n ):
l[i,j] = l[i-j,0]
u = np.zeros ( ( n, n ) )
for j in range ( 0, n ):
u[0,j] = l[j,0]
for i in range ( 1, n ):
u[i,j] = l[j,i] * ( 1.0 - alpha * alpha )
return p, l, u
def kms_test ( ):
#*****************************************************************************80
#
## KMS_TEST tests KMS.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 17 February 2015
#
# Author:
#
# John Burkardt
#
import platform
from r8_uniform_ab import r8_uniform_ab
from r8mat_print import r8mat_print
print ( '' )
print ( 'KMS_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' KMS computes the KMS matrix.' )
seed = 123456789
m = 5
n = 5
r8_lo = 0.0
r8_hi = 1.0
alpha, seed = r8_uniform_ab ( r8_lo, r8_hi, seed )
a = kms ( alpha, m, n )
r8mat_print ( m, n, a, ' KMS matrix:' )
#
# Terminate.
#
print ( '' )
print ( 'KMS_TEST' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
kms_test ( )
timestamp ( )
| true |
c6ec3e6fb9ccdddcebaddf458e834d054e96233f | Python | mwhite/JSONAlchemy | /tests/test_jsonalchemy.py | UTF-8 | 18,743 | 2.53125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import pytest
import random
import json
import re
import copy
import datetime
import dateutil.parser
from functools import partial
from decimal import Decimal
from sqlalchemy import *
from sqlalchemy import exc
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects import postgresql
from jsonalchemy import (CreateJSONView as _CreateJSONView,
InvalidJSONSchemaError, JSONSchemaConflict)
CreateJSONView = partial(_CreateJSONView, replace=True)
def parse_date(string):
if len(string) == 16:
string += 'Z'
datetime = dateutil.parser.parse(string)
return datetime
SCHEMAS = {
'string': {
"title": "A String",
"type": "string",
"enum": ["unicorns", "penguins", "pythons"],
"_python_type": str
},
'decimal': {
"title": "A Decimal",
"type": "string",
"format": "decimal",
"enum": list(map(Decimal, ["1.0", "2.5", "10.99234234"])),
"_python_type": Decimal
},
'float': {
"title": "A Float",
"type": "number",
"enum": [1.0, 2.5, 10.99],
"_python_type": float
},
'integer': {
"title": "An Integer",
"type": "integer",
"enum": [1, 2, 3, 4],
"_python_type": int
},
'boolean': {
"title": "A Boolean",
"type": "boolean",
"enum": [True, False],
"_python_type": bool
},
'datetime': {
"title": "A Datetime",
"type": "string",
"format": "date-time",
"enum": list(map(parse_date, [
"2007-04-05T14:31Z",
"2005-03-02T12:30-02:00",
"2005-04-05T17:45Z"
])),
"_python_type": datetime.datetime
},
'datetime-no-tz': {
"title": "A Datetime with no timezone",
"type": "string",
"format": "date-time",
"enum": list(map(parse_date, [
"2007-04-05T14:31",
"2005-03-02T12:30",
"2005-04-05T17:45"
])),
"_python_type": datetime.datetime
},
'date': {
"title": "A Date",
"type": "string",
"format": "date",
"enum": list(map(lambda s: parse_date(s).date(), [
"2007-04-05",
"2005-03-02",
"2005-04-05"
])),
"_python_type": datetime.date
},
#'time': {
#"title": "A Time",
#"type": "string",
#"format": "time",
#"enum": ["14:31Z", "12:30-02:00", "17:45"]
#},
'geopoint': {
"title": "A Geopoint",
"type": "string",
"format": "geopoint",
"enum": [
"-71.1043443253471, 42.3150676015829",
"-72.1043443253471, 43.3150676015829",
"-70.1043443253471, 44.3150676015829",
],
"_python_type": object # doesn't map back to a python type
}
}
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
@pytest.fixture(scope="module")
def engine(request):
engine = create_engine(
'postgresql://postgres:postgres@localhost/jsonalchemy_test',
# echo=True
)
request.addfinalizer(lambda: engine.dispose())
return engine
@pytest.fixture(scope="module")
def models(engine):
Base = declarative_base()
class Tenant(Base):
__tablename__ = 'tenants'
id = Column(Integer, primary_key=True)
name = Column(String(200))
class FormType(Base):
__tablename__ = 'form_types'
id = Column(Integer, primary_key=True)
name = Column(String(200))
class Form(Base):
__tablename__ = 'forms'
id = Column(Integer, primary_key=True)
tenant_id = Column(Integer, ForeignKey('tenants.id'), index=True)
tenant = relationship(Tenant, backref='forms')
type_id = Column(Integer, ForeignKey('form_types.id'), index=True)
type = relationship(FormType, backref='forms')
data = Column(postgresql.JSONB)
class foo(object):
pass
models = foo()
models.Tenant = Tenant
models.FormType = FormType
models.Form = Form
engine.execute("CREATE EXTENSION IF NOT EXISTS postgis")
engine.execute("DROP TABLE IF EXISTS forms CASCADE")
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
engine.execute("INSERT INTO tenants (name) VALUES ('mike'), ('bob')")
engine.execute("INSERT INTO form_types (name) VALUES ('type 1'), ('type 2'), "
"('type 3')")
# test data set 1: all types, no nulls, no nesting
data = []
for i in range(10):
data.append(dict((k, random.choice(v['enum'])) for k, v in
SCHEMAS.items()))
engine.execute("INSERT INTO forms (tenant_id, type_id, data) VALUES " +
",".join(["(1, 1, '%s')" % json.dumps(
foo, cls=JSONEncoder)
for foo in data]))
# test data set 2: nesting, some missing, and some nulls!
json_data = []
for i in range(10000):
data = {}
if random.choice([True, False]):
data['foo'] = {
'bar': random.choice([1, 2, 5, None])
}
if random.choice([True, False]):
data['eggs'] = {
'spam': random.choice(["asdf", "hjkl", "baz"])
}
json_data.append(data)
engine.execute("INSERT INTO forms (tenant_id, type_id, data) VALUES " +
",".join(["(1, 2, '%s')" % json.dumps(data, cls=JSONEncoder)
for data in json_data]))
# test data set 3: array of objects
json_data = []
for i in range(100):
data = {
'id': i,
'array': [
{
'baz': random.choice(range(4)),
'quux': random.choice([True, False])
},
{
'baz': random.choice(range(4)),
'quux': random.choice([True, False])
}
]
}
json_data.append(data)
engine.execute("INSERT INTO forms (tenant_id, type_id, data) VALUES " +
",".join(["(1, 3, '%s')" % json.dumps(data, cls=JSONEncoder)
for data in json_data]))
return models
@pytest.fixture(scope="module")
def session(engine):
return sessionmaker(bind=engine)()
def test_basic_types(session, models):
"""Tests all basic types in a non-nested schema."""
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 1)
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': SCHEMAS
})
session.execute(create_view)
result = list(session.execute('SELECT * from foo'))
assert len(result)
assert len(result[0]) == len(SCHEMAS) + 2
for row in result:
for k, v in [i for i in row.items() if i[0] not in ('forms_id',
'forms_data')]:
prop = k.split('.')[1]
python_type = SCHEMAS[prop]['_python_type']
assert isinstance(v, python_type)
if python_type != object:
assert v in SCHEMAS[prop]['enum']
def test_date_part_columns_are_created(session, models):
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 1)
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': {
'datetime': SCHEMAS['datetime']
}
}, extract_date_parts=['year', 'month', 'day'])
session.execute(create_view)
#result = list(session.execute(
#'SELECT "data.datetime_year", "data.datetime_month", '
#'"data.datetime_day" FROM foo'))
result = list(session.execute("""SELECT
"data.datetime_year", "data.datetime_month", "data.datetime_day"
FROM foo"""))
assert all(
r["data.datetime_year"] in map(float, [2005, 2007]) and
r["data.datetime_month"] in map(float, [3, 4]) and
r["data.datetime_day"] in map(float, [2, 5]) for r in result)
def test_array_of_objects(session, models):
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 3)
session.execute('set search_path to "$user", public')
create_view = CreateJSONView('foobar', q, models.Form.data, {
'type': 'object',
'id_property': 'id',
'properties': {
'id': {
'type': 'integer'
},
'array': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'baz': {
'type': 'integer'
},
'quux': {
'type': 'boolean'
}
}
}
}
}
}, use_materialized_view=True)
session.execute(create_view)
session.commit()
assert list(session.execute("""SELECT definition FROM pg_views where
viewname='foobar_array'"""))[0][0]
assert list(session.execute("""SELECT definition FROM pg_views where
viewname='foobar'"""))[0][0]
assert list(session.execute("""SELECT definition FROM pg_matviews where
matviewname='forms_array_json'"""))[0][0]
results = list(session.execute("""
SELECT foobar."data.id", foobar_array."array.baz",
foobar_array."array.quux"
FROM foobar JOIN foobar_array ON foobar."data.id" = foobar_array.parent_id
"""))
assert len(results) == 100 * 2
for result in results:
assert result[0] in range(100)
assert result[1] in range(5)
assert result[2] in [True, False]
def test_nested_data_with_nulls(session, models):
"""
Tests nested data, and both types of nulls:
- where there was no value for that property in the JSON
- where the value for that property was null
"""
schema = {
'type': 'object',
'properties': {
'foo': {
'type': 'object',
'properties': {
'bar': {
'type': 'integer'
}
}
},
'eggs': {
'type': 'object',
'properties': {
'spam': {
'type': 'string'
}
}
}
}
}
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 2)
create_view = CreateJSONView('foo2', q, models.Form.data, schema)
session.execute(create_view)
result = list(session.execute('SELECT * from foo2'))
assert len(result)
assert len(result[0]) == 2 + 2
assert any(r['data.foo.bar'] is None for r in result)
assert any(isinstance(r['data.foo.bar'], int) for r in result)
assert any(r['data.eggs.spam'] is None for r in result)
assert any(isinstance(r['data.eggs.spam'], str) for r in result)
assert all(r['data.foo.bar'] is None or \
isinstance(r['data.foo.bar'], int) for r in result)
assert all(r['data.eggs.spam'] is None or \
isinstance(r['data.eggs.spam'], str) for r in result)
def _test_quantifiers(schema, models, session):
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1,
models.Form.type_id == 1)
view_name = 'foo_%s' % (id(schema))
create_view = CreateJSONView(view_name, q, models.Form.data, schema)
session.execute(create_view)
result = list(session.execute("SELECT * from %s" % view_name))
assert len(result)
for row in result:
assert len(row) == 2 + 2
assert isinstance(row['data.string'], str)
assert isinstance(row['data.decimal'], Decimal)
QUANTIFIER_SCHEMAS = [
{
"properties": {
"string": SCHEMAS['string']
}
},
{
"properties": {
"decimal": SCHEMAS['decimal']
}
}
]
def test_jsonschema_all_of(session, models):
_test_quantifiers({
"type": "object",
"allOf": QUANTIFIER_SCHEMAS
}, models, session)
def test_jsonschema_one_of(session, models):
_test_quantifiers({
"type": "object",
"oneOf": QUANTIFIER_SCHEMAS
}, models, session)
def test_jsonschema_any_of(session, models):
_test_quantifiers({
"type": "object",
"anyOf": QUANTIFIER_SCHEMAS
}, models, session)
def test_conflicting_schema_properties(session, models):
schema = {
"type": "object",
"oneOf": [
{
"properties": {
"boolean": SCHEMAS['boolean']
}
},
{
"properties": {
"boolean": SCHEMAS['string']
}
}
]
}
with pytest.raises(JSONSchemaConflict):
create_view = CreateJSONView(None, None, None, schema)
session.execute(create_view)
def test_invalid_schema_property_types(session, models):
schema = {
"type": "object",
"properties": {
"foo": {
"type": "any"
}
}
}
with pytest.raises(InvalidJSONSchemaError):
create_view = CreateJSONView(None, None, None, schema)
session.execute(create_view)
schema = {
"type": "object",
"properties": {
"foo": {
"type": ["string", "integer"]
}
}
}
with pytest.raises(InvalidJSONSchemaError):
create_view = CreateJSONView(None, None, None, schema)
session.execute(create_view)
schema = {
"type": "object",
"properties": {
"foo": {}
}
}
with pytest.raises(InvalidJSONSchemaError):
create_view = CreateJSONView(None, None, None, schema)
session.execute(create_view)
def test_create_json_view_returns_table_columns(session, models):
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 1)
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': SCHEMAS
})
session.execute(create_view)
# I'm lazy.
columns = create_view.columns
paths = [c.path for c in columns]
enums = [c.enum for c in columns]
titles = [c.title for c in columns]
assert len(columns) == len(SCHEMAS) # + 1. Leaving out primary key
# because it's not useful for BI
# schema generation, which is what
# this test is aimed at
assert len(paths) == len(set(paths))
assert len(enums) == len(set(
[tuple(e) if isinstance(e, list) else e for e in enums]))
assert len(titles) == len(set(titles))
def test_can_change_type_of_column_in_existing_view(session, models):
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 1)
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': SCHEMAS
})
session.execute(create_view)
schemas = copy.deepcopy(SCHEMAS)
schemas['string']['type'] = 'integer'
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': schemas
})
session.execute(create_view)
def test_cant_replace_view_without_using_replace(session, models):
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 1)
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': SCHEMAS
})
session.execute(create_view)
with pytest.raises(exc.ProgrammingError):
try:
create_view = _CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': SCHEMAS
})
session.execute(create_view)
finally:
session.rollback()
@pytest.mark.xfail
def test_partial_index_creation(session, models):
# This will fail (no index-only scans, just index scans) until this
# Postgres issue is fixed:
# http://postgresql.1045698.n5.nabble.com/No-Index-Only-Scan-on-Partial-Index-td5773024.html
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1, models.Form.type_id == 1)
create_view = CreateJSONView('foo', q, models.Form.data, {
'type': 'object',
'properties': SCHEMAS
})
session.execute(create_view)
indexes = []
for k, v in SCHEMAS.items():
result = list(session.execute(
'EXPLAIN ANALYZE SELECT "%s" from foo' % ("data." + k)))
matchobj = re.search('Index Only Scan using (.+?) on', result[0][0])
indexes.append(matchobj.group(1))
assert len(indexes) == len(set(indexes))
def test_old_partial_index_deletion(session, models):
pass
def test_disabling_index_creation(session, models):
pass
def test_postgis_json(session, models):
schema = {
'type': 'object',
'properties': {
'geopoint': {
"title": "A Geopoint",
"type": "string",
"format": "geopoint",
}
}
}
q = session.query(models.Form)\
.filter(models.Form.tenant_id == 1,
models.Form.type_id == 1)
view_name = 'foo_%s' % (id(schema))
create_view = CreateJSONView(view_name, q, models.Form.data, schema)
session.execute(create_view)
# check that the postgis function casting and index is working by getting
# count of points within a bounding box that does not include all points
within_count = list(session.execute("""
SELECT COUNT(*) FROM %s
WHERE ST_Within(ST_SetSRID("data.geopoint", 4326),
ST_GeometryFromText(
'POLYGON((-72 42, -72 43, -71 43, -71 42, -72 42))', 4326))
""" % view_name))[0][0]
all_count = list(session.execute("SELECT COUNT(*) FROM %s" % view_name))[0][0]
assert 0 < within_count < all_count
| true |
bd3ab04d1aaa6bac16dc8b8963e0db235dc47e06 | Python | TinFe/sorting_algos | /Merge Sort.py | UTF-8 | 2,968 | 4.09375 | 4 | [] | no_license |
#convert input list into a list of lists of 2 numbers (with one list of one number if input contains an odd number of items) to be sorted later.
def initial_divide(lst):
#set an index variable to 0
z = 0
#create empty output list
out = []
#traverse the input list
while z < len(lst) - 1:
#append each item of input as a new array in the output
out.append([lst[z]])
z += 1
return out
#collates two sorted lists into one sorted list
def collate(lst0,lst1):
out = []
# set variables x and y = to 0. x will be a counter for lst0 and y will be a counter for lst1.
x = 0
y = 0
#initiate a loop. the loop will break when lst0 and lst1 have been iterated through
while x < len(lst0) or y < len(lst1):
#if lst0 has been fully iterated through, simply add the rest of lst1 to the output (or vice versa)
if x > len(lst0) - 1:
out.append(lst1[y])
y += 1
elif y > len(lst1) - 1:
out.append(lst0[x])
x += 1
#if x and y are equal, append x and y to output
elif lst0[x] == lst1[y]:
out.append(lst0[x])
out.append(lst1[y])
x+=1
y+=1
#if x is greater than y, append y to output
elif lst0[x] > lst1[y]:
out.append(lst1[y])
y += 1
#if x is less than y, append x to output
elif lst0[x] < lst1[y]:
out.append(lst0[x])
x += 1
return out
#takes a list of any number of sorted lists, collates them all together and returns one sorted list.
def final_col(lst):
res = lst
out = []
#variable used to increment through the list
z = 0
#initiate an infinite loop (loop will break when the list has been fully collated)
while True:
#begin to traverse accross the res variable
while z < len(res) -1 :
#because we are traversing through the res variable in increments of 2, the res variable must always contain an even number of elements.
#if res variable contains an odd number of elements, the first two items are collated together and the res variable now contains an even number of elements.
if len(res) % 2 != 0:
res[0] = collate(res[0],res[1])
del res[1]
out.append(collate(res[z],res[z+1]))
z += 2
#after res has been iterated through, it gets set = to out
res = out
#incrementer reset to 0
z = 0
#if the length of output = 1, that means we are finished sorting and return the output
if len(out) == 1:
return out[0]
out = []
def merge_sort(lst):
lst = initial_divide(lst)
lst = final_col(lst)
return lst
| true |
d828f624e33f71758b68d8fab5f8db14c4cece01 | Python | mramachandrappa/AWS-Workspace-Reboot | /workspace_maintenance.py | UTF-8 | 4,743 | 2.859375 | 3 | [] | no_license | import boto3
from httplib2 import Http
import json
import time
class Workspaces:
# ====================== Role based connection to AWS resource ===============================
def __init__(self):
sts_client = boto3.client('sts')
assumed_role_object = sts_client.assume_role(
RoleArn="<ROLE_ARN>",
RoleSessionName="AssumeRoleSession1"
)
credentials = assumed_role_object['Credentials']
self.connect = boto3.client(
'workspaces',
aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken'],
)
self.paginator = self.connect.get_paginator('describe_workspaces')
# ====================== Fetches list of Workspace ID's whose status is set to UNHEALTHY =========
def get_workspace(self, directory_id):
workspace_id = []
status = []
page_response = self.paginator.paginate(
DirectoryId=directory_id,
PaginationConfig={
'MaxItems': 300,
'PageSize': 25
}
)
for response in page_response:
for work_id in response["Workspaces"]:
for key, value in work_id.items():
if key == "WorkspaceId":
workspace_id.append(value)
if key == "State":
status.append(value)
unhealthy_workspaces = []
if len(workspace_id) == len(status):
for tups in zip(workspace_id, status):
for id in tups:
if id.__contains__("UNHEALTHY"):
unhealthy_workspaces.append(tups[0])
return unhealthy_workspaces
# ====================== Reboots UNHEALTHY workspaces returned from get_workspace()================
def reboot_workspace(self, unhealthy_workspaces):
for id in unhealthy_workspaces:
self.connect.reboot_workspaces(RebootWorkspaceRequests=[
{
'WorkspaceId': id
},
]
)
time.sleep(30)
status = []
i = 1
while i in range(0,5):
page_response = self.paginator.paginate(
WorkspaceIds=unhealthy_workspaces
)
for response in page_response:
for work_id in response["Workspaces"]:
for key, value in work_id.items():
if key == "State":
print("status", value)
status.append(value)
if len(unhealthy_workspaces) == status.count("AVAILABLE"):
return True
else:
status = []
i = i + 1
time.sleep(30)
return False
# ====================== Sends a Status message to Hangout Chat Room ==============================
def athens_bot(self, message, url):
bot_message = {
"text": message
}
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
http_obj = Http()
response = http_obj.request(
uri=url,
method='POST',
headers=message_headers,
body=json.dumps(bot_message),
)
return response
def main():
obj = Workspaces()
directory_id = "<DIRECTORY-ID>"
unhealthy_workspaces = obj.get_workspace(directory_id)
if unhealthy_workspaces:
message = "*Athens Workspace Notification:*\n\n" \
"```Status : UNHEALTHY\n" \
"WorkspaceIDs : " + str(unhealthy_workspaces) + "```\n" \
"*_Rebooting Workspaces..._*\n"
sucess_mes = "*Athens Workspace Notification:*\n\n" \
"*_UNHEALTHY Workspaces are Successfully Rebooted!_*" \
"```Status : AVAILABLE\n" \
"WorkspaceIDs : " + str(unhealthy_workspaces) + "```\n"
fail_mes = "*Athens Workspace Notification:*\n\n" \
"*_Some of UNHEALTHY workspaces are not coming to AVAILABLE state. Please check!_*" \
"```WorkspaceIDs : " + str(unhealthy_workspaces) + "```\n"
url = "<HANGOUT_WEBHOOK_URL>"
obj.athens_bot(message, url)
if obj.reboot_workspace(unhealthy_workspaces) is True:
obj.athens_bot(sucess_mes, url)
else:
obj.athens_bot(fail_mes, url)
else:
print("No UNHEALTHY Workspaces found!")
if __name__ == "__main__":
main()
| true |
8f98f98d23c036c50b097ffe60c4c5248665ad78 | Python | ahchin1996/Deep_Learning_Project | /ANN/Week_prediction.py | UTF-8 | 6,003 | 2.890625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import sklearn
import matplotlib.pyplot as mat
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
from sklearn import preprocessing
from keras import layers, optimizers, models
from sklearn.preprocessing import LabelEncoder
#讀取資料並做資料前處理將特殊字元處理掉
train_data =pd.read_csv('D:/Python/ML_report1/adult.data',sep=" ", header=None)
train_data = train_data.replace({'\$': '', ',': '','\.':'','<=50K':'1','>50K':'0'}, regex=True) #砍掉換行&逗號
train_data.columns = ['age','workclass','fnlwgt','education','education-num','marital-status','occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week','native-country','label']
test_data =pd.read_csv('D:/Python/ML_report1/adult.test',sep=" ", header=None)
test_data = test_data.replace({'\$': '', ',': '','<=50K.':'1','>50K.':'0'}, regex=True) #砍掉換行&逗號
test_data.columns = ['age','workclass','fnlwgt','education','education-num','marital-status','occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week','native-country','label']
#合併train data和test data
data_1 = train_data
data_1=data_1.append(test_data)
#data_1
#將缺值欄位補上平均值
data_1.replace('?', np.nan, inplace=True)
data_1=data_1.fillna(data_1.mean())
data_1 = data_1.apply(lambda x:x.fillna(x.value_counts().index[0]))
#data_1
#將資料轉成int
data_1[['age','fnlwgt','education-num','capital-gain','capital-loss','hours-per-week','label'] ]=data_1[['age','fnlwgt','education-num','capital-gain','capital-loss','hours-per-week','label']].astype(str).astype(int)
#將文字資料(要做one hot encoding和label切出)
data_cat = data_1[['workclass','education','marital-status','occupation','relationship','race','sex','native-country']]
data_hours = data_1[['hours-per-week']]
#one hot encoding
data_cat = pd.get_dummies(data_cat)
#data_cat
#newdata = data_1.drop(['hours-per-week','workclass','education','marital-status','occupation','relationship','race','sex','native-country'],axis=1).join(data_cat,how='left')
#newdata
#將原始資料合併one hot encoding後的資料
newdata1 = data_1.drop(['workclass','education','marital-status','occupation','relationship','race','sex','native-country'],axis=1)
newdata_merge = pd.concat([newdata1,data_cat],axis=1).reindex(data_1.index)
#newdata_merge
#將資料正規化
min_max_scaler = preprocessing.MinMaxScaler()
np_scaled = min_max_scaler.fit_transform(newdata_merge)
data_norm= pd.DataFrame(np_scaled, columns = newdata_merge.columns)
data_norm.head()
#test_data=newdata_merge.iloc[32561:]
#test_data
#train_data=newdata_merge.iloc[:32561]
#train_data
test_data1=data_norm.iloc[32561:] #分解train data和test data
test_data1
train_data1=data_norm.iloc[:32561] #分解train data和test data
train_data1
train_true=data_hours.iloc[:32561] #擷取traindata label
#train_true
test_true=data_hours.iloc[32561:] #擷取testdata label
#test_true
from keras import layers, optimizers, models
from sklearn.preprocessing import LabelEncoder
X_train1 = train_data1.drop('hours-per-week', axis=1)
y_train1 = train_true
X_test1 = test_data1.drop('hours-per-week', axis=1)
y_test1 = test_true
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers.core import Dense , Dropout
from keras import regularizers
from keras.callbacks import EarlyStopping
#dense指最普通的全連接層型態
#dropout減少節點 通常設0.2~0.5 最高不會超過0.5
#2層(最後一層ouput不算)
early_stopping = EarlyStopping(monitor='val_loss', patience=200,restore_best_weights=True)
model = models.Sequential()
model.add(layers.Dense(16, input_shape=(X_train1.shape[1],), activation="relu"))
model.add(layers.Dropout(0.30)) #d砍上一層的節點
model.add(layers.Dense(8, activation="relu"))
model.add(layers.Dropout(0.30))
model.add(layers.Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
history = model.fit(X_train1, y_train1, validation_split=0.2, epochs=1000000, batch_size=128,callbacks=[early_stopping])
loss, accuracy = model.evaluate(X_test1, y_test1)
print("Test Acc : " + str(accuracy))
print("Test Loss : " + str(loss))
y_pred1 = model.predict(X_test1)
print('MSE為:',mean_squared_error(y_test1,y_pred1))
#print('MSE為(直接计算):',np.mean((y_test-y_pred)**2))
print('RMSE為:',np.sqrt(mean_squared_error(y_test1,y_pred1)))
# from sklearn.metrics import mean_squared_error
# from keras.models import Sequential
# from keras.layers.core import Dense , Dropout
# from keras import regularizers
# from keras.callbacks import EarlyStopping
#
# #dense指最普通的全連接層型態
# #dropout減少節點 通常設0.2~0.5 最高不會超過0.5
# #2層(最後一層ouput不算)
#
#
# early_stopping = EarlyStopping(monitor='val_loss', patience=200,restore_best_weights=True)
# model = models.Sequential()
# model.add(layers.Dense(64, input_shape=(X_train1.shape[1],), activation="relu"))
# model.add(layers.Dropout(0.30)) #d砍上一層的節點
# model.add(layers.Dense(32, activation="relu"))
# model.add(layers.Dropout(0.30))
# model.add(layers.Dense(16, activation="relu"))
# model.add(layers.Dropout(0.30))
# model.add(layers.Dense(8, activation="relu"))
# model.add(layers.Dropout(0.30))
# model.add(layers.Dense(1))
#
# model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy'])
#
# history = model.fit(X_train1, y_train1, validation_split=0.2, epochs=1000000, batch_size=128,callbacks=[early_stopping])
#
# loss, accuracy = model.evaluate(X_test1, y_test1)
# print("Test Acc : " + str(accuracy))
# print("Test Loss : " + str(loss))
# y_pred = model.predict(X_test1)
# print('MSE為:',mean_squared_error(y_test1,y_pred))
# #print('MSE為(直接计算):',np.mean((y_test-y_pred)**2))
# print('RMSE為:',np.sqrt(mean_squared_error(y_test1,y_pred))) | true |
c0db9dc4d966b6e58f993909d90f94e569af416d | Python | 7thCode/streamlit | /st_base.py | UTF-8 | 1,002 | 3.3125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import streamlit as st
def main():
# タイトル
st.title('Application title.')
# ヘッダ
st.header('Header...')
# 純粋なテキスト
st.text('HOGE text')
# サブレベルヘッダ
st.subheader('Sub header')
# マークダウンテキスト
st.markdown('**Markdown is available **')
# LaTeX テキスト
st.latex(r'\bar{X} = \frac{1}{N} \sum_{n=1}^{N} x_i')
# コードスニペット
st.code('print(\'Hello, World!\')')
# エラーメッセージ
st.error('Error message1')
# 警告メッセージ
st.warning('Warning message')
# 情報メッセージ
st.info('Information message')
# 成功メッセージ
st.success('Success message')
# 例外の出力
st.exception(Exception('Oops!'))
# 辞書の出力
d = {
'foo': 'bar',
'users': [
'alice',
'bob',
],
}
st.json(d)
if __name__ == '__main__':
main() | true |
4de0eb944e81d943f43d62682fa12c10359b8a05 | Python | lczm/faceface | /model.py | UTF-8 | 6,851 | 2.71875 | 3 | [] | no_license | import keras
import os
import numpy as np
import tensorflow as tf
import face_recognition
from multiprocessing import Pool, cpu_count
from pprint import pprint
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
# from keras.layers import Conv1D, Activation, MaxPool1D, Flatten, Dense, Dropout
from keras.layers import Activation, Dense, Dropout
from keras.models import Sequential
from keras.utils import to_categorical
'''
This file will attach each of the image from the folder that the
'clean.py' file has completed
Attaching that to its individual label and returning a model to an interface
that can be used
'''
def extract_substring(dir_str):
'''
This function will return the substring
after the last \ in a directory string.
'''
# print('dir_str', dir_str)
index_list = []
for letter in dir_str:
# windows
# if letter == str('\\'):
# linux
# if letter == str('/'):
# index_list.append(dir_str.index(letter))
# updated version
for i in range(len(dir_str)):
if dir_str[i] == '/':
index_list.append(i)
filtered = dir_str[index_list[len(index_list)-1]:]
# pop out the first letter
filtered = filtered[1:]
return filtered
# Encodes the image at the given path
# Returns the encoding for the image
def encode_image(path):
print("encoding : ", path, flush=True)
# open the iamge and encode it, afterwards
# attaching it to the image_to_label_map
load_image = face_recognition.load_image_file(path)
encoding = face_recognition.face_encodings(load_image)
# print(len(encoding), flush=True)
if len(encoding) != 1:
return -1
return encoding
def load_data(directory):
'''
directory here refers to the directory where the images can be found for training
'''
# getting the folder_list and the labels as length of folder list
folder_list = os.listdir(directory)
total_labels = len(folder_list)
'''
label_map = {
1 : 'label',
2 : 'label',
'label' : 1,
'label' : 2,
...
}
'''
label_map = {}
# for i, label in enumerate(folder_list, start=1):
# label_map[i] = label
for i, label in enumerate(folder_list, start=0):
label_map[label] = i
# label_map[i] = label
encoding_sequence = []
label_sequence = []
# compile dataset image paths
image_paths = []
for root, dirs, files in os.walk(os.path.join(directory)):
for file in files:
image_paths.append(os.path.join(root, file))
# Extract label for image at path
# label = label_map[extract_substring(root)]
# label_sequence.append(label)
label_sequence.append(extract_substring(root))
proc_pool = Pool(cpu_count())
encoding_sequence = proc_pool.map(encode_image, image_paths)
clean_encoding = []
clean_label = []
# print('test')
# for i in range(len(encoding_sequence)):
# print(encoding_sequence[i])
# clean the list of encodings, labels again
# at this stage, the len(encoding_sequence) and len(label_sequence) are the same
if len(encoding_sequence) == len(label_sequence):
for i in range(len(encoding_sequence)):
# if len(encoding_sequence[i]) == 1:
if encoding_sequence[i] == -1:
pass
else:
# [0] because face_encodings returns a list and we only take the first one
clean_encoding.append(encoding_sequence[i][0])
clean_label.append(label_sequence[i])
else:
print('something went wrong')
return 1
# convert the encoding and label sequences to a numpy array before splittin
# them up and passing them into the fit function
int_clean_label = []
for label in clean_label:
int_clean_label.append(label_map[label])
clean_encoding = np.array(clean_encoding)
# one hot encode the labels
# clean_label = to_categorical(np.array(clean_label))
int_clean_label = to_categorical(np.array(int_clean_label))
print('length of keys', len(label_map.keys()))
# print(len(set(list(int_clean_label.tolist()))))
# print(set(int_clean_label.tolist()))
# for i in range(len(clean_label)):
# print('label', clean_label[i].shape)
# x_train, x_test, y_train, y_test = train_test_split(clean_encoding, clean_label, test_size=0.2)
x_train, x_test, y_train, y_test = train_test_split(clean_encoding, int_clean_label, test_size=0.2)
# print('x_train', len(x_train.shape))
# print('x_test', len(x_test.shape))
# print('y_train', len(y_train.shape))
# print('y_test', len(y_test.shape))
# print out the shapes of the arrays if needed
return (x_train, y_train), (x_test, y_test), total_labels
# return 0
def generate_dense_mode():
(x_train, y_train), (x_test, y_test), num_labels= load_data('./test')
print('number of labels', num_labels)
# print(x_train.shape)
# print(len(y_train))
# print(y_train.shape)
# count the number of labels in y_train
# print('length', len(y_train.unique))
pprint(y_train)
'''
Just add a bunch of normal dense layers without convolution to work
'''
model = Sequential()
# starting layer
model.add(Dense(128,activation='relu', input_shape=(128,)))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
# ending layer
model.add(Dense(num_labels, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# model.fit(x_train, y_train, batch_size=2, epochs=30)
# no batch size
# model.fit(x_train, y_train, batch_size=15,epochs=100, validation_data=(x_test, y_test))
model.fit(x_train, y_train, batch_size=5,epochs=150, validation_data=(x_test, y_test))
model.save('model.h5')
# use np.argmax to inverse the prediction
return model
if __name__ == "__main__":
print(generate_dense_mode())
# print(load_data('./test'))
| true |
5e755d3a6dd1c2b65408c9821261c8b3a23c4b88 | Python | Alfred-Dunhill/turnt-octo-sansa | /the_fight.py | UTF-8 | 4,936 | 3.84375 | 4 | [] | no_license | # made by Alexander Vasilkov, IKT-13
# inspired by the "Fight Club" movie
# Adults only. No one under 17 admitted
# Street Fighting v.1.0-RC1
# greeting, rules and some info about the game
print('Hello! Welcome to the game called \'Street Fighting\' v.1.0-RC1')
print('It is about to help you to decide who will go for the beer: you or your partner')
print('\nHere are some rules:\n* Only two fighters\n* Do not hit below the belt\n* Do not bite each other\n* No shirts, no shoes\n* Fight will go on as long as it have to\n* If you lose, you have to go for a beer')
# call the needed functions
from random import randint
from time import sleep
from os import system
import platform
# defining the array of names; we're going to use 2
player_names = []
# we must do something if player didn't enter the name (it's blank)
names_if_forgot = ['Tyler Durden', 'Thomas', 'Storm', 'Ranger', 'Vladimir Vladimirovich', 'Edward', 'Earthshaker', 'Assassin', 'Zeus', 'Vovka', 'Marko', 'Roar']
# the array of strikes made
strikes_made = [0, 0]
#defining the list that holds our players's health
player_health = [100, 100]
# making the player to read the rules
raw_input('\nPress enter when you are ready (do not break the rules!)...')
# starting phrase
print('\nSo, let\'s make some preparations before')
sleep(0.5) # pausing the script to make the wow-effect
# time to ask for the names
player_names.append(raw_input('\nEnter the name of the first fighter:\n'))
if player_names[0] == '':
player_names[0] = names_if_forgot[randint(0, 11)]
print 'Oh, we did not catch the first name, that\'s why we made it to sound like\n' + player_names[0]
else:
print('\nGood Job!')
sleep(0.5) # pause. pause. pause...
player_names.append(raw_input('\nNow it\'s time to enter the name of the second fighter:\n'))
if player_names[1] == '':
tmp_name = names_if_forgot[randint(0, 11)]
while tmp_name == player_names[0]:
tmp_name = names_if_forgot[randint(0, 11)]
player_names[1] = tmp_name
print 'Oops, the second name is missing, so we made it to sound like\n' + player_names[1]
sleep(3)
print('\nNow, we are fully prepared! The battle is about to start.\nYou can stretch your fists while waiting.')
sleep(4)
# clearing the output window
if platform.system() == 'Windows':
system('cls')
else:
system('clear')
# countdown
def countdown():
sleep(1)
print('3')
sleep(1)
print('2')
sleep(1)
print('1')
sleep(1)
print('The fight starts!\n***')
# returns random damage
def damage():
return randint(10, 25)
# returns random amount of health restored (while drinking herbal tea)
def healing():
return randint(10, 20)
def print_health():
if player_health[0] <= 0:
print player_names[0] + '\'s current hp is ' + 'zero or less. He is gonna lose the fight'
else:
if round(player_health[0]) == player_health[0]:
print player_names[0] + '\'s current hp is ' + str(int(player_health[0]))
else:
print player_names[0] + '\'s current hp is ' + str(player_health[0])
if player_health[1] <= 0:
print player_names[1] + '\'s current hp is ' + 'zero or less. He is gonna lose the fight'
else:
if round(player_health[1]) == player_health[1]:
print player_names[1] + '\'s current hp is ' + str(int(player_health[1]))
else:
print player_names[1] + '\'s current hp is ' + str(player_health[1])
# how the fighters think
def artificial_intelligence(myself, enemy):
hit = damage()
# maybe it's time for some Red Bull? To make the hit stronger.
if player_health[enemy] > 0 and randint(0, 4) == 2:
hit *= 1.5
print player_names[myself] + ' just drunk Red Bull and his hit damage for now is multiplied by 1.5!'
# the hit
player_health[enemy] -= hit
print player_names[myself] + ' caused ' + str(hit) + ' damage to ' + player_names[enemy]
strikes_made[myself] += 1
# time for healing?
if player_health[myself] <= 80 and player_health[enemy] > 0 and randint(0, 2) == 1:
hp_healed = healing()
player_health[myself] += hp_healed
print player_names[myself] + ' just drunk some herbal tea and restored ' + str(hp_healed) + ' health!'
print_health()
# we remember about the fight, so here it is
def the_fight():
# while somebody can fight, them must do it (yeah, too violently)
while player_health[0] > 0 and player_health[1] > 0:
sleep(1) # some pause for intrigue
# it's time to determine who will strike
striker = randint(0, 1)
if striker:
artificial_intelligence(1, 0)
else:
artificial_intelligence(0, 1)
print '***'
# who's the winner? and who will go for a beer? ;)
if player_health[0] > 0:
winner = 0
else:
winner = 1
print '\nAfter the great battle, we\'ve got a winner! And his name is ' + player_names[winner] + '.\nHe beated the opponent with ' + str(strikes_made[winner]) + ' strikes.'
print player_names[abs(winner - 1)] + ', don\'t forget to bring some beer to your opponent.\nRules are rules ;)'
# the main part
countdown()
the_fight()
# just not to quit too early
raw_input('\nPress enter to exit...')
| true |
c13df7bfa5eeea7082b44d6e0d73cc625e6a56fa | Python | cerealturk/Python-Final-Project | /Janek Portion/Scripts/xray.py | UTF-8 | 1,429 | 2.75 | 3 | [] | no_license | import json
import numpy as py
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from math import log
def part(data):
i = 0
types = {}
ptypes = []
for point in data:
ptype = str(point[4])
if not ptype in types:
types[ptype] = {}
types[ptype]["values"] = []
types[ptype]["timestamp"] = []
ptypes.append(ptype)
i += 1
types[ptype]["values"].append(log(float(point[1]), 10))
types[ptype]["timestamp"].append(float(point[0]))
return (types, ptypes)
def plot(types, ptypes):
timestamp_0 = types[ptypes[0]]["timestamp"]
timestamp_1 = types[ptypes[1]]["timestamp"]
timestamp_2 = types[ptypes[2]]["timestamp"]
values_0 = types[ptypes[0]]["values"]
values_1 = types[ptypes[1]]["values"]
values_2 = types[ptypes[2]]["values"]
xlower = min(timestamp_0)
xupper = max(timestamp_0)
ylower = min(values_0)
yupper = max(values_0)
ax = plt.gca()
ax.set_title("X-ray Observations for SN1987A")
ax.set_ylabel("Log Flux (ergs s^-1 cm^-1)")
ax.set_xlabel("Time (Gregorian)")
plt.plot(timestamp_0, values_0, 'r.',
timestamp_1, values_1, 'b.',
timestamp_2, values_2, 'g.', markersize=1)
red_patch = mpatches.Patch(color='red', label=ptypes[0] + " eV")
blue_patch = mpatches.Patch(color='blue', label=ptypes[1] + " eV")
green_patch = mpatches.Patch(color='green', label=ptypes[2] + " eV")
plt.legend(handles=[red_patch, blue_patch,green_patch])
plt.show() | true |
85191e1d027bc34ea5e0523ea8acd15b09a4bcf3 | Python | hiroshees/dstudy | /chapter06/forms.py | UTF-8 | 4,201 | 2.625 | 3 | [] | no_license | from django import forms
from .models import Friend
from .models import ClassRoom
from .models import Student
from .models import Author
from .models import Category
from .models import Book
from .models import MonsterType
from .models import Monster
class MyModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
class FriendForm (forms.ModelForm):
GENDER_CHOICES = (
("", "選択してください"),
(1, "男"),
(2, "女"),
)
class Meta:
model = Friend
fields = ['name','mail','gender','age','birthday']
name = forms.CharField(label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
mail = forms.EmailField(label = "メール",
widget=forms.TextInput(attrs={"class" : "form-control"}))
gender = forms.ChoiceField(label="性別",
choices = GENDER_CHOICES,
widget=forms.Select(attrs={"class" : "form-control"}))
age = forms.IntegerField(label = "年齢",
widget=forms.NumberInput(attrs={"class" : "form-control"}))
birthday = forms.DateField(label = "誕生日",
widget=forms.TextInput(attrs={"class" : "form-control", "type":"date"}))
class ClassRoomForm (forms.ModelForm):
class Meta:
model = ClassRoom
fields = ['name']
name = forms.CharField(label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
class StudentForm (forms.ModelForm):
class Meta:
model = Student
fields = ['name', 'age', 'class_room']
name = forms.CharField(
label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
age = forms.CharField(
label = "年齢",
widget=forms.NumberInput(attrs={"class" : "form-control"}))
class_room = MyModelChoiceField(
label="クラス",
queryset=ClassRoom.objects.all(),
widget=forms.Select(attrs={"class" : "form-control"}),
empty_label=None)
class AuthorForm (forms.ModelForm):
class Meta:
model = Author
fields = ['name']
name = forms.CharField(label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
class CategoryForm (forms.ModelForm):
class Meta:
model = Category
fields = ['name']
name = forms.CharField(label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
class BookForm (forms.ModelForm):
class Meta:
model = Book
fields = ['title', 'price', 'author', 'category',]
title = forms.CharField(
label = "タイトル",
widget=forms.TextInput(attrs={"class" : "form-control"}))
price = forms.CharField(
label = "金額",
widget=forms.NumberInput(attrs={"class" : "form-control"}))
author = MyModelChoiceField(
label="著者",
queryset=Author.objects.all(),
widget=forms.Select(attrs={"class" : "form-control"}),
empty_label="選択")
category = MyModelChoiceField(
label="カテゴリ",
queryset=Category.objects.all(),
widget=forms.Select(attrs={"class" : "form-control"}),
empty_label="選択")
class MonsterTypeForm (forms.ModelForm):
class Meta:
model = MonsterType
fields = ['name']
name = forms.CharField(label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
class MonsterForm (forms.ModelForm):
class Meta:
model = Monster
fields = ['name', 'hp', 'level', 'monster_type',]
name = forms.CharField(
label = "名前",
widget=forms.TextInput(attrs={"class" : "form-control"}))
hp = forms.IntegerField(
label = "HP",
widget=forms.NumberInput(attrs={"class" : "form-control"}))
level = forms.IntegerField(
label = "レベル",
widget=forms.NumberInput(attrs={"class" : "form-control"}))
monster_type = MyModelChoiceField(
label="モンスター種類",
queryset=MonsterType.objects.all(),
widget=forms.Select(attrs={"class" : "form-control"}),
empty_label="選択")
| true |
d24ea9dafb0858498c359b85d91dd1164d9a0b55 | Python | bjrichards/dodger | /game.py | UTF-8 | 4,953 | 3.203125 | 3 | [] | no_license | import pygame
import random
import sys
# @desc Container that holds everything else of the program
#
# @param none
# @return void
def main():
pygame.init()
game_continue = 1
score = [0]
screen_width = 700
screen_height = 400
screen = pygame.display.set_mode([screen_width,screen_height])
screen.fill((0, 0, 0))
while game_continue is 1:
TitleScreen(screen, screen_width, screen_height)
PlayGame(screen, screen_width, screen_height, score)
game_continue = GameOver(screen, screen_width, screen_height, score)
return
def PlayGame(screen, screen_width, screen_height, score):
myfont = pygame.font.SysFont("monospace", 25)
screen.fill((0,0,0))
pygame.display.update()
proj = []
p = Player()
score[0] = 0
i = 0
j = 0
score_counter = 0
game_continue = True
while game_continue:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
screen.fill((0, 0, 0))
p.Update(screen, keys, screen_height, screen_width)
if(random.randrange(0, 100) > 90 - score[0] * 0.03):
proj.append(Projectile(screen_width, screen_height, (score[0] * 0.04 + 5)))
if len(proj) is not 0:
for projectile in proj:
if projectile.x + projectile.w < 0:
proj.remove(projectile)
else:
projectile.Update(screen)
if projectile.x < p.x + p.w and projectile.x + projectile.w > p.x and projectile.y < p.y + p.h and projectile.h + projectile.y > p.y:
game_continue = False
scoretext = myfont.render("Score = "+str(score[0]), 1, (255,255,255))
screen.blit(scoretext, (5, 10))
pygame.display.update()
if score_counter == 0:
score[0] += 1
score_counter = 10
else:
score_counter -= 1
return
def GameOver(screen, screen_width, screen_height, score):
gameover = pygame.font.SysFont("monospace", 40)
endscore = pygame.font.SysFont("monospace", 30)
decision = pygame.font.SysFont("monospace", 25)
i = 40
while(True):
screen.fill((0, 0, 0))
gameOverText = gameover.render("GAME OVER", 1, (255,255,255))
screen.blit(gameOverText, (screen_width/2 - 80, screen_height/2 - 50))
scoretext = endscore.render("Score = "+str(score[0]), 1, (255,255,255))
screen.blit(scoretext, (screen_width/2-45, screen_height/2 - 15))
if i <= 40:
decisionText = decision.render("> Press 'e' to exit()", 1, (255,255,255))
screen.blit(decisionText, (10, screen_height/2+10))
decisionText = decision.render("> Press spacebar to play again()", 1, (255,255,255))
screen.blit(decisionText, (10, screen_height/2+30))
if i == 0:
i = 80
else:
i -= 1
elif i > 40:
i -= 1
pygame.display.update()
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
return 1
elif keys[pygame.K_e]:
return 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
def TitleScreen(screen, screen_width, screen_height):
proj = []
screen.fill((0,0,0))
Title = pygame.font.SysFont("monospace", 80)
Intr = pygame.font.SysFont("monospace", 40)
pygame.display.update()
while(True):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
break
screen.fill((0, 0, 0))
if(random.randrange(0, 100) > 90):
proj.append(Projectile(screen_width, screen_height, 5))
if len(proj) is not 0:
for projectile in proj:
if projectile.x + projectile.w < 0:
proj.remove(projectile)
else:
projectile.Update(screen)
TitleText = Title.render("Dodger", 1, (255,255,255))
InstrText = Intr.render("Press Space to Play()", 1, (255,255,255))
screen.blit(TitleText, (screen_width/2 - 100, screen_height/4))
screen.blit(InstrText, (screen_width/2 - 150, screen_height/2))
pygame.display.update()
class Player:
def __init__(self):
self.x = 100
self.y = 100
self.w = 25
self.h = 25
def Update(self, screen, keys, screen_height, screen_width):
self.UpdatePos(keys, screen_height, screen_width)
self.ShowPlayer(screen)
def ShowPlayer(self, screen):
pygame.draw.rect(screen, (255, 0, 0), (self.x, self.y, self.w, self.h), 3)
def UpdatePos(self, keys, screen_height, screen_width):
if keys[pygame.K_UP]:
if self.y > 0:
self.y -= 6
elif keys[pygame.K_DOWN]:
if (self.y + self.h) < screen_height:
self.y += 6
if keys[pygame.K_LEFT]:
if self.x > 0:
self.x -= 6
elif keys[pygame.K_RIGHT]:
if (self.x + self.w) < screen_width:
self.x += 6
class Projectile:
def __init__(self, width, height, v):
self.x = width - 1
self.y = random.randrange(0, height - 30)
self.h = 30
self.w = 30
self.v = v
def Update(self, screen):
self.UpdatePos()
self.ShowProjectile(screen)
def ShowProjectile(self, screen):
pygame.draw.rect(screen, (10, 10, 255), (self.x, self.y, self.w, self.h), 0)
def UpdatePos(self):
self.x -= self.v
main()
| true |
fc0223ac2768ab9d768b5b8d2dff0a9a1dd1559f | Python | zuik/stuff | /ytpl/yutility.py | UTF-8 | 1,991 | 2.578125 | 3 | [
"MIT"
] | permissive | import requests
key = "AIzaSyAK76Z9ryYzpjnzDbAMkqAXggcRLkzQ09Y"
def yt_search(query, page_id=None):
if not page_id:
r = requests.get("https://www.googleapis.com/youtube/v3/search",params={
"q": query,
"key": key,
"part":"snippet"})
return r.json()
else:
r = requests.get("https://www.googleapis.com/youtube/v3/search",params={
"q": query,
"key": key,
"part":"snippet",
"pageToken": page_id})
return r.json()
def playlist_data(playlist_id, page_id=None):
if not page_id:
r = requests.get("https://content.googleapis.com/youtube/v3/playlistItems",params={
"playlistId":playlist_id,
"key": key,
"part":"snippet",
"maxResults": "50"})
return r
else:
r = requests.get("https://content.googleapis.com/youtube/v3/playlistItems",params={
"playlistId":playlist_id,
"key": key,
"part":"snippet",
"maxResults": "50",
"pageToken": page_id})
return r
def all_playlist_data(playlist_id):
first_page = playlist_data(playlist_id)
if not first_page:
print("There is no data")
else:
first_page = first_page.json()
try:
next_token = first_page['nextPageToken']
except KeyError as e:
last_page = True
return first_page
else:
last_page = False
pages = []
print(pages)
pages.append(first_page)
while(not last_page):
try:
r = playlist_data(playlist_id, next_token)
if not r:
print("There is no data")
return pages
else:
pages.append(r)
next_token = r.json()['nextPageToken']
except KeyError as e:
return pages | true |
da95d482bb613fb794d03057baf4da904ae437fd | Python | HyunupKang/StepJaeKJoon | /0729_2_5-14_스타트링크.py | UTF-8 | 932 | 3.34375 | 3 | [] | no_license | #0729_2_5-14_스타트링크
'''
1. Up, Down 탐색 행렬 만들기
2. 탐색대로 각 층 방문 후 방문 처리
3. 목표 층에 도착했으면 return
4. 목표 층에 방문 못하고 스택이 비면 use the stairs
'''
from collections import deque
F, S, G, U, D = map(int, input().split()) # 총 F층, 강호가 있는 층 S, 스타트링크 위치 G, 올라가기 U, 내려가기 D
vistied = [0]*(F+1)
GoAndDown = [U, -D]
def bfs(st):
q = deque()
q.append(st)
vistied[st] = 1
cnt = 0
while q:
xq = q.popleft()
if xq == G:
return vistied[xq]-1
for i in range(2):
nq = xq + GoAndDown[i]
if 0 < nq <= F and vistied[nq] == 0:
vistied[nq] = vistied[xq] + 1
q.append(nq)
cnt += 1
return -1
result = bfs(S)
if result == -1:
print("use the stairs")
else:
print(result) | true |
a345f37a82a321e7e515c1a624a3bb8ea86ac14b | Python | MrZhangzhg/nsd_2018 | /nsd1810/python2/day02/func1.py | UTF-8 | 300 | 3.5625 | 4 | [] | no_license | def func1(*args): # *号表示后面的名字是个元组
print(args)
def func2(**kwargs): # **号表示后面的名字是字典
print(kwargs)
if __name__ == '__main__':
func1()
func1('bob')
func1('bob', 'tom', 'kenji', 'natasha')
func2()
func2(name='bob', age=20)
| true |
c341f48605e46d4fe7eb056203a59fe075ad6034 | Python | Lyueyeu/python_debug | /demo.py | UTF-8 | 395 | 2.65625 | 3 | [] | no_license | # coding:UTF-8
from debugger import debugger
def a1(c):
a2()
def a2():
a3()
def a3():
a4()
def a4():
a5()
def a5():
a6()
def a6():
a = 5
a7()
def a7():
b = 100
c = 0
return b / c
def p(x):
print x
if __name__ == '__main__':
debugger.set_throw_error(False)
# debugger.set_output_func(p)
debugger.run_func(a1, {"c": 123})
| true |
e9eb4fa1ae94ba32002a0e602e659fdbbc1ea58d | Python | watsonjj/aoc2020 | /aoc2020/d05_binary_boarding/methods.py | UTF-8 | 1,122 | 3.5625 | 4 | [
"MIT"
] | permissive | from math import floor, ceil
class BoardingPass:
def __init__(self, code):
min_row = 0
max_row = 127
min_column = 0
max_column = 7
for char in code:
mid_row = min_row + (max_row - min_row)/2
mid_column = min_column + (max_column - min_column)/2
if char == "F":
max_row = floor(mid_row)
elif char == "B":
min_row = ceil(mid_row)
elif char == "L":
max_column = floor(mid_column)
elif char == "R":
min_column = ceil(mid_column)
else:
raise ValueError(f"Unknown char: {char}")
if min_row != max_row:
raise ValueError(f"Incomplete row selection: {min_row} - {max_row}")
if min_column != max_column:
raise ValueError(f"Incomplete column selection: {min_column} - {max_column}")
self.row = min_row
self.column = min_column
@property
def id(self):
return self.row * 8 + self.column
def __gt__(self, other):
return self.id > other.id
| true |
eeea76db3466d7140277b3078b461040f9c4f1d6 | Python | megawubs/SpotSearch | /code/optionlist.py | UTF-8 | 484 | 2.875 | 3 | [] | no_license | from Tkinter import *
from pprint import pprint
class optionlist:
def __init__(self, searchResult):
self.result = searchResult
def callback(self, event):
pprint(event)
def makeList(self):
root = Tk()
ListBox = Listbox(root)
i=1
for option in self.result:
ListBox.insert(i,option['name'])
i+1
ListBox.bind("<Return>", self.callback)
ListBox.pack()
root.mainloop()
| true |
33a6bd81a156aa635a75c856440a763559cf4ebf | Python | ociepkam/transrel | /classes/trial.py | UTF-8 | 17,344 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from enum import Enum
import string
__author__ = 'ociepkam'
SampleTypes = {
'letters': 'letters',
'figures': 'figures',
'NamesHeightRelations': 'NamesHeightRelations',
'NamesAgeRelations': 'NamesAgeRelations',
'symbols': 'symbols',
'numbers': 'numbers',
'greek_letters': 'greek_letters'
}
# General relations types.
Relations = {
'major': '>',
'minor': '<'
}
# Age relations types for name samples.
class NamesAgeRelations(Enum):
major_M = ' starszy niż '
major_F = ' starsza niż '
minor_M = ' młodszy niż '
minor_F = ' młodsza niż '
# Height relations types for name samples.
class NamesHeightRelations(Enum):
major_M = ' wyższy niż '
major_F = ' wyższa niż '
minor_M = ' niższy niż '
minor_F = ' niższa niż '
# Dictonary for name samples.
names_types = {
'NamesAgeRelations': NamesAgeRelations,
'NamesHeightRelations': NamesHeightRelations
}
names = (
{'name': 'Tomek', 'sex': 'M'},
{'name': 'Lech', 'sex': 'M'},
{'name': 'Jan', 'sex': 'M'},
{'name': 'Roch', 'sex': 'M'},
{'name': 'Piotr', 'sex': 'M'},
{'name': 'Adam', 'sex': 'M'},
{'name': 'Filip', 'sex': 'M'},
{'name': 'Igor', 'sex': 'M'},
{'name': 'Jacek', 'sex': 'M'},
{'name': 'Wit', 'sex': 'M'},
{'name': 'Ewa', 'sex': 'F'},
{'name': 'Anna', 'sex': 'F'},
{'name': 'Iga', 'sex': 'F'},
{'name': 'Magda', 'sex': 'F'},
{'name': 'Ada', 'sex': 'F'},
{'name': 'Ola', 'sex': 'F'},
{'name': 'Łucja', 'sex': 'F'},
{'name': 'Maja', 'sex': 'F'},
{'name': 'Klara', 'sex': 'F'},
{'name': 'Ida', 'sex': 'F'},
)
# List of stimulus for letters samples. Only consonants letters.
letters = list(set(string.ascii_uppercase) - set("AEIOUY"))
figures = (
'square',
'triangle',
'circle',
'trapeze',
'diamond',
'ellipse',
'rectangle',
'hexagon'
)
symbols = list(set("~!@#%&*?|"))
numbers = list(set("0123456789"))
greek_letters = [u'\u0391', u'\u0392', u'\u0393', u'\u0394', u'\u0395', u'\u0396', u'\u0397', u'\u0398', u'\u0399',
u'\u039A', u'\u039B', u'\u039C', u'\u039D', u'\u039E', u'\u039F', u'\u03A0', u'\u03A1', u'\u03A2',
u'\u03A3', u'\u03A4', u'\u03A5', u'\u03A6', u'\u03A7', u'\u03A8', u'\u03A9']
class Trial:
def __init__(self, sample_type, n, nr, stim_time, resp_time, feedb, feedb_time, wait, exp, bin, trial_type):
"""
:param sample_type: kind of stimulus. All possibilities in SampleTypes class.
:param n: number of relations in trial. n+1 number od elements in relation chain
:param nr: Trial index. Different for each Trial.
:param stim_time: how long participant can see each relations.
:param resp_time: time between trials
:param feedb:
0 - doesn't show result for this Trial.
1 - show result for this Trial.
2 - doesn't show result for this Trial but show percent result at the end of test.
:param wait: break time after Trial. 0 - wait until participant doesn't press button.
:param exp:
(exp == 1) => Experiment Trial.
(exp == 0) => Test Trail.
:param bin:
0 - generate four answers
1 - generate two answers
:param trial_type:
1 - ask about relation
2 - ask about relation with changed "<" symbol
3 - ask about relation with distance 1
4 - ask about relation with distance 2
:return:
"""
self.sample_type = sample_type
self.n = n
self.nr = nr
self.stim_time = stim_time
self.resp_time = resp_time
self.feedb = feedb
self.feedb_time = feedb_time
self.wait = wait
self.exp = int(exp)
self.relations_list = None
self.task = None
self.answer = None
self.type = 'trial'
self.bin = bin
self.trial_type = trial_type
def create_sample_letters(self):
"""
From n+1 random letters generate chain of pair of relations.
There are two types of relations "<" and ">"
:return: Chain of pair of relations.
"""
relations_list = []
chain_of_letters = []
if self.sample_type == "letters":
stimulus_nr = random.sample(range(len(letters)), self.n + 1)
for idx in stimulus_nr:
chain_of_letters.append(letters[idx])
elif self.sample_type == "numbers":
stimulus_nr = random.sample(range(len(numbers)), self.n + 1)
for idx in stimulus_nr:
chain_of_letters.append(numbers[idx])
elif self.sample_type == "greek_letters":
stimulus_nr = random.sample(range(len(greek_letters)), self.n + 1)
for idx in stimulus_nr:
chain_of_letters.append(greek_letters[idx])
else:
stimulus_nr = random.sample(range(len(figures)), self.n + 1)
for idx in stimulus_nr:
chain_of_letters.append(symbols[idx])
for idx in range(0, self.n):
stimulus_type = random.choice([Relations['major'], Relations['minor']])
stimulus_1 = chain_of_letters[idx]
stimulus_2 = chain_of_letters[idx + 1]
if stimulus_type == Relations['minor']:
relation = stimulus_1 + stimulus_type + stimulus_2
relations_list.append(relation)
else:
relation = stimulus_2 + stimulus_type + stimulus_1
relations_list.append(relation)
# Generate task and answer
def good_bad_relation(relations_list, relations_chain):
if self.trial_type == 1:
first_task = random.choice(relations_list)
if first_task[1] == Relations['minor']:
second_task = first_task[0] + Relations['major'] + first_task[2]
else:
second_task = first_task[0] + Relations['minor'] + first_task[2]
return [first_task, second_task]
elif self.trial_type == 2:
second_task = random.choice(relations_list)[::-1]
if second_task[1] == Relations['minor']:
first_task = second_task[0] + Relations['major'] + second_task[2]
else:
first_task = second_task[0] + Relations['minor'] + second_task[2]
return [first_task, second_task]
elif self.trial_type == 3:
first = random.randint(0, self.n - 2)
second = first + 2
else:
first = random.randint(0, self.n - 3)
second = first + 3
if random.randint(0, 1):
first_task = relations_chain[first] + Relations['minor'] + relations_chain[second]
second_task = relations_chain[first] + Relations['major'] + relations_chain[second]
else:
first_task = relations_chain[second] + Relations['major'] + relations_chain[first]
second_task = relations_chain[second] + Relations['minor'] + relations_chain[first]
return [first_task, second_task]
# Creating task and answer
if self.bin:
task = good_bad_relation(relations_list, chain_of_letters)
else:
task = [good_bad_relation(relations_list, chain_of_letters)[0]]
while len(task) < 4:
new_task = good_bad_relation(relations_list, chain_of_letters)[1]
if new_task not in task:
task.append(new_task)
answer = task[0]
random.shuffle(task)
return relations_list, task, answer
def create_sample_names(self, sample_type):
"""
From n+1 random letters generate chain of pair of relations.
There are two types of relations "<" and ">"
:param sample_type: decide with of two NamesAgeRelations or NamesHeightRelations we need to generate
:return: Chain of pair of relations.
"""
stimulus_nr = random.sample(range(0, 8), self.n + 1)
relations_list = []
chain_of_names = []
for idx in stimulus_nr:
chain_of_names.append(names[idx])
for idx in range(0, self.n):
stimulus_type = random.choice([Relations['major'], Relations['minor']])
stimulus_1 = chain_of_names[idx]
stimulus_2 = chain_of_names[idx + 1]
if stimulus_type == Relations['minor']:
if stimulus_1['sex'] == 'F':
stimulus_type = sample_type.minor_F
else:
stimulus_type = sample_type.minor_M
relation = stimulus_1['name'] + stimulus_type + stimulus_2['name']
relations_list.append(relation)
else:
if stimulus_2['sex'] == 'F':
stimulus_type = sample_type.major_F
else:
stimulus_type = sample_type.major_M
relation = stimulus_2['name'] + stimulus_type + stimulus_1['name']
relations_list.append(relation)
task, answer = self.create_task(chain_of_names)
return relations_list, task, answer
def create_sample_figures(self):
"""
From n+1 random figures generate chain of pair of relations.
:return: Chain of pair of relations.
"""
stimulus_nr = random.sample(range(0, 8), self.n + 1)
chain_of_figures = []
for idx in stimulus_nr:
chain_of_figures.append(figures[idx])
relations_list = []
for idx in range(0, self.n):
stimulus_1 = chain_of_figures[idx]
stimulus_2 = chain_of_figures[idx + 1]
relations_list.append([stimulus_1, stimulus_2])
# Creating task and answer
def good_bad_relation(relations_chain, relations_list):
if self.trial_type == 1 or self.trial_type == 2:
rel = random.choice(relations_list)
first_task = rel
second_task = rel[::-1]
return [first_task, second_task]
elif self.trial_type == 3:
first = random.randint(0, self.n - 2)
second = first + 2
else:
first = random.randint(0, self.n - 3)
second = first + 3
first_task = [relations_chain[first], relations_chain[second]]
second_task = [relations_chain[second], relations_chain[first]]
return [first_task, second_task]
# Creating task and answer
if self.bin:
task = good_bad_relation(chain_of_figures, relations_list)
else:
task = [good_bad_relation(chain_of_figures, relations_list)[0]]
while len(task) < 4:
new_task = good_bad_relation(chain_of_figures, relations_list)[1]
if (new_task not in task) or (self.trial_type >= self.n):
task.append(new_task)
answer = task[0]
random.shuffle(task)
return relations_list, task, answer
def create_sample(self):
"""
Allow to choose task type.
:return: Chain of pair of relations.
"""
if self.sample_type == "letters" or self.sample_type == "symbols" or self.sample_type == 'numbers' or \
self.sample_type == 'greek_letters':
relations_list, task, answer = self.create_sample_letters()
elif self.sample_type == "NamesHeightRelations":
relations_list, task, answer = self.create_sample_names(names_types["NamesHeightRelations"])
elif self.sample_type == "NamesAgeRelations":
relations_list, task, answer = self.create_sample_names(names_types["NamesAgeRelations"])
else:
relations_list, task, answer = self.create_sample_figures()
self.shuffle_sample(relations_list)
self.relations_list = relations_list
self.task = task
self.answer = answer
def shuffle_sample(self, relations_list):
"""
:param relations_list: List of all relations in trial. Generated by create_sample.
:return: Shuffled list of relations in order with will see participant.
Firs relation is random. Each next must contain one of the parameters with was show before.
"""
# choosing first relation
first_stimulus = random.randint(0, self.n - 1)
relations_shuffled_list = [relations_list[first_stimulus]]
next_elem = first_stimulus + 1
previous_elem = first_stimulus - 1
# As long as exist relations before or after chose relations find new one before or after already choose.
while next_elem <= self.n and previous_elem >= -1:
# No not chose elements at the end of relations chain
if next_elem == self.n:
relations_shuffled_list.append(relations_list[previous_elem])
previous_elem -= 1
# No not chose elements at the beginning of relations chain
elif previous_elem == -1:
relations_shuffled_list.append(relations_list[next_elem])
next_elem += 1
# Choose element before or after created chain
else:
if random.choice(['next', 'previous']) == next_elem:
relations_shuffled_list.append(relations_list[next_elem])
next_elem += 1
else:
relations_shuffled_list.append(relations_list[previous_elem])
previous_elem -= 1
return relations_shuffled_list
def create_task(self, relations_chain):
"""
:param relations_chain: chain of all samples in trial
:return: Task and answer for trial
"""
def good_bad_relation():
if self.trial_type == 1 or self.relations_list == 2:
first = random.randint(0, self.n - 1)
second = first + 1
elif self.trial_type == 3:
first = random.randint(0, self.n - 2)
second = first + 2
else:
first = random.randint(0, self.n - 3)
second = first + 3
if self.sample_type == SampleTypes['figures']:
first_task = [relations_chain[first], relations_chain[second]]
second_task = [relations_chain[second], relations_chain[first]]
elif self.sample_type == SampleTypes['letters'] or self.sample_type == SampleTypes['symbols']:
first_task = relations_chain[first] + Relations['minor'] + relations_chain[second]
second_task = relations_chain[second] + Relations['minor'] + relations_chain[first]
else:
if self.sample_type == SampleTypes['NamesAgeRelations']:
if relations_chain[first]['sex'] == 'M':
first_relation = NamesAgeRelations.minor_M
else:
first_relation = NamesAgeRelations.minor_F
if relations_chain[second]['sex'] == 'M':
second_relation = NamesAgeRelations.minor_M
else:
second_relation = NamesAgeRelations.minor_F
else:
if relations_chain[first]['sex'] == 'M':
first_relation = NamesHeightRelations.minor_M
else:
first_relation = NamesHeightRelations.minor_F
if relations_chain[second]['sex'] == 'M':
second_relation = NamesHeightRelations.minor_M
else:
second_relation = NamesHeightRelations.minor_F
first_task = relations_chain[first]['name'] + first_relation + relations_chain[second]['name']
second_task = relations_chain[second]['name'] + second_relation + relations_chain[first]['name']
return [first_task, second_task]
# Creating task and answer
if self.bin:
task = good_bad_relation()
else:
task = [good_bad_relation()[0]]
while len(task) < 4:
new_task = good_bad_relation()[1]
if new_task not in task:
task.append(new_task)
answer = task[0]
random.shuffle(task)
return task, answer
def prepare_general(self):
trial = {
"TYPE": self.type,
'SAMPLE_TYPE': self.sample_type,
'N': self.n,
'NR': self.nr,
'STIMTIME': self.stim_time,
'RESPTIME': self.resp_time,
'FEEDB': self.feedb,
'FEEDB_TIME': self.feedb_time,
'WAIT': self.wait,
'EXP': self.exp,
'BIN': self.bin,
'TRIAL_TYPE': self.trial_type
}
return trial
def prepare_concrete(self):
trial = self.prepare_general()
trial['RELATIONS_LIST'] = self.relations_list
trial['TASK'] = self.task
trial['ANSWER'] = self.answer
return trial
| true |
56d75d1d37aaad57ea3fed27ec2b6e8954f3bdf3 | Python | SaeSimcheon/employee_or_farm | /Chapter7/9. 미로의 최단거리 통로/SH_cht7_9.py | UTF-8 | 1,319 | 2.734375 | 3 | [] | no_license | import sys
sys.stdin=open("input.txt", "r")
from collections import deque
seq = [[1,*list(map(int,input().split())),1] for _ in range(7)]
add=[1]*9
seq.insert(0,add)
seq.append(add)
start = [1,1,0]
que=deque(list())
que.append(start)
out = list()
i=0
while True:
if len(que) ==0:
print(-1)
break
a = que.popleft()
if a[0:2] == [1,1]:
seq[1][1] =1
if a[0:2] == [7,7]:
print(a[2])
break
else:
if seq[a[0]+1][a[1]] ==1 :
pass
else :
seq[a[0]+1][a[1]] =1
candidate = [a[0] + 1,a[1],a[2]+1]
que.append(candidate)
if seq[a[0]][a[1]+1] ==1 :
pass
else:
seq[a[0]][a[1]+1] =1
candidate = [a[0],a[1]+1,a[2]+1]
que.append(candidate)
if seq[a[0]-1][a[1]] ==1 :
pass
else:
seq[a[0]-1][a[1]] =1
candidate = [a[0] - 1,a[1],a[2]+1]
que.append(candidate)
if seq[a[0]][a[1]-1] ==1 :
pass
else:
seq[a[0]][a[1]-1] =1
candidate = [a[0],a[1]-1,a[2]+1]
que.append(candidate)
| true |
3b8c26364af9cbf15f2e27dd3812080766b34483 | Python | EHOTIK911/Inform-EGE | /27/27.2/27.2.py | UTF-8 | 2,190 | 3.734375 | 4 | [] | no_license | """
Имеется набор данных, состоящий из пар положительных целых чисел. Необходимо выбрать из каждой пары ровно одно число так, чтобы
сумма всех выбранных чисел не делилась на 3 и при этом была максимально возможной. Гарантируется, что искомую сумму получить можно.
Программа должна напечатать одно число — максимально возможную сумму, соответствующую условиям задачи.
Входные данные.
Даны два входных файла (файл A и файл B), каждый из которых содержит в первой строке количество пар N(1 ≤ N ≤ 100000). Каждая из
следующих N строк содержит два натуральных числа, не превышающих 10 000.
27A-1-1.txt 27B-1-1.txt
Пример организации исходных данных во входном файле:
6
1 3
5 12
6 9
5 4
3 3
1 1
Для указанных входных данных значением искомой суммы должно быть число 32.
В ответе укажите два числа: сначала значение искомой суммы для файла А, затем для файла B.
Предупреждение: для обработки файла B не следует использовать переборный алгоритм, вычисляющий сумму для всех возможных
вариантов, поскольку написанная по такому алгоритму программа будет выполняться слишком долго.
"""
f = open("27B-1-1.txt")
n = int(f.readline())
sum = 0
min_diff = 1000001
for i in range(n):
a, b = map(int,f.readline().split())
sum += max(a,b)
if abs(a-b) < min_diff and abs(a-b) % 3 !=0:
min_diff = abs(a-b)
if sum % 3 == 0:
print(sum - min_diff)
else:
print(sum)
| true |
27c0040d6903a4c72db24660b26e4c8478086832 | Python | TBisig/Python | /ScoresandGrades.py | UTF-8 | 848 | 4.125 | 4 | [] | no_license | # Scores and Grades
# Score: 87; Your grade is B
# Score: 67; Your grade is D
# Score: 95; Your grade is A
# Score: 100; Your grade is A
# Score: 75; Your grade is C
# Score: 90; Your grade is A
# Score: 89; Your grade is B
# Score: 72; Your grade is C
# Score: 60; Your grade is D
# Score: 98; Your grade is A
# End of the program. Bye!
scores = [95, 10, 83, 95, 55, 74, 65]
def grades(scores):
for int in scores:
if int >= 90:
print "Score: {}; Your grade is A".format(int)
elif int >= 80:
print "Score: {}; Your grade is B".format(int)
elif int >= 70:
print "Score: {}; Your grade is C".format(int)
elif int >= 60:
print "Score: {}; Your grade is D".format(int)
elif int < 60:
print "Score: {}; Your grade is F".format(int)
grades(scores) | true |
9e2aaeafe5e79e8dd95fd3fc5f347892941b52e2 | Python | curtainwang/osweb | /osweb.py | UTF-8 | 7,472 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
import sqlite3
import random
import web
import datetime
from jinja2 import Environment, PackageLoader
#url设置
urls = (
# '/',"INDEX",
'/',"TEST",
'/test/(.*)','TEST',
)
app = web.application(urls,globals())
web.config.debug = False
#数据库类
class OS:
#将题目保存到数据库
def savetodb(self):
conn = sqlite3.connect("os.db")
cur = conn.cursor()
sql_create = '''create table if not exists os(
id integer primary key autoincrement,
problem text,
answer text
)'''
cur.execute(sql_create)
cur.execute('''create table if not exists rank(
id integer primary key autoincrement,
score integer
)''')
conn.commit()
problem_set = open("os.txt").readlines()
sql_insert = "insert into os(problem,answer) values('%s','%s')"
for problem in problem_set:
temp = problem.split("\t")
# print '.',temp[0],temp[1][:-1],'.'
cur.execute(sql_insert%(temp[0],temp[1][:-1]))
conn.commit()
cur.close()
conn.close()
#从题库中随机抽取n个题目
def get_problem(self,n):
conn = sqlite3.connect("os.db")
cur = conn.cursor()
problem = cur.execute("select distinct problem from os")
problem_list = []
for row in problem:
problem_list.append(row[0])
total = len(problem_list)
ret = []
while n>0:
temp = random.randint(0,total-1)
if problem_list[temp] not in ret:
ret.append(problem_list[temp])
n -= 1
cur.close()
conn.close()
# for i in ret:
# print '='*50
# print i
# self.get_answer(i)
return ret
#获取某个题目的答案和答案个数
def get_answer(self,problem):
conn = sqlite3.connect("os.db")
cur = conn.cursor()
answer = cur.execute("select upper(answer) from os where problem='%s'"%problem)
ans_ret=[]
for row in answer:
ans_ret.append(row[0])
cur.close()
conn.close()
return ans_ret,len(ans_ret)
#保存成绩信息以便统计排名
def save_score(self,sc):
conn = sqlite3.connect("os.db")
cur = conn.cursor()
cur.execute("insert into rank(score) values(%s)"%sc)
conn.commit()
cur.close()
conn.close()
#获取排名信息
def get_rank(self,sc):
conn = sqlite3.connect("os.db")
cur = conn.cursor()
ret = cur.execute("select count(*) from rank where score > %s"%sc)
for i in ret:
r = i
cur.close()
conn.close()
# print "r=",r[0]
return r[0]+1
#测试页面
class TEST():
start_time=0 #测试开始时间
total_time=0 #总测试时间
os = OS()
problem = [] #题目列表
total_blank = 0#空数
accept = 0 #正确数
session=0 #防止不刷新的标记
level=1.0 #难度系数
time = { #难度系数对应的时间
'1.0':6,
'2.0':5,
'3.0':4,
}
#GET方法,获取题目
def GET(self,args = None):
try:
TEST.start_time=datetime.datetime.now() #开始计时
input = web.input() #获取输入
try:
TEST.level=input['level']
except:
TEST.level="2.0"
TEST.session=0
TEST.problem = self.os.get_problem(6) #随机抽取6个题目
std_ans = [] #标准答案
TEST.total_blank=0
for i in TEST.problem: #计算空数,以便控制时间
std_ans,num= self.os.get_answer(i)
TEST.total_blank += num
env = Environment(loader=PackageLoader('osweb', './')) #模板
template = env.get_template('os.html')
a = map(chr,range(66,71))
num = range(0,6)
# print "total",TEST.total_blank
TEST.total_time=TEST.total_blank*TEST.time[TEST.level]
return template.render(time=TEST.total_time,problem=TEST.problem,td=a,num=num)
except:
return '<h1>System Error<br/><br/><a href="/">back</a></h1>'
#POST方法,获取用户提交的数据并进行处理
def POST(self,name=None):
# try:
end_time=datetime.datetime.now()
# print TEST.start_time,end_time
try:
use=end_time-TEST.start_time
except:
return '<h1>Please click button at the end of rank page<br/><br/><a href="/">back</a></h1>'
# print use.seconds,TEST.total_time
if use.seconds-10 > TEST.total_time: #防止禁用JS而使时间停止作弊
return "<h1>Please NOT Forbid JS!</h1>"
input = web.input()
error_pro=[]
error_user=[]
right_ans=[]
for row in range(0,6):#1-6
# if TEST.session==1: #防止后退以后不刷新而而直接做题
# return "<h1>请刷新页面后再开始做题</h1>"
# if input["A%s"%row] not in TEST.problem:#判断题目是否在题库中,防止非法提交
# return '<h1>Please refresh before test<br/><br/><a href="/test/">back</a></h1>'
std_ans,num= self.os.get_answer(input["A%s"%row])
user_ans = []
user_all_ans = []
tag=0
for line in map(chr,range(66,71)):#65-71/
a = "%s%s"%(line,row)
input[a]=input[a].upper().replace(" ","") #删除用户输入的两边空格,并将字母转成大写
# print input[a]
user_all_ans.append(input[a])
# print input[a]
#回答正确的标准:输入的信息在正确答案中并且输入的信息不重复
if (input[a] in std_ans) and (input[a] not in user_ans) and input[a]!='':
user_ans.append(input[a])
self.accept += 1
tag+=1
#统计出错题目
if tag<num and input["A%s"%row] not in error_pro:
error_user.append(user_all_ans)
error_pro.append('%s'%input["A%s"%row])
right_ans.append(std_ans)
print "total2",self.total_blank
score = int(self.accept*100/TEST.total_blank)#计算成绩
print "score=",score
self.os.save_score(score) #保存成绩
TEST.session=1
env = Environment(loader=PackageLoader('osweb', './'))#成绩页面
template = env.get_template('rank.html')
num = range(len(error_pro))
return template.render(num = num,err_ans=error_user,score=score,rank=self.os.get_rank(score),problem=error_pro,answer=right_ans)
# except:
# return '<h1>System Error<br/><br/><a href="/">back</a></h1>'
#首页
class INDEX():
def GET(self):
try:
env = Environment(loader=PackageLoader('osweb', './'))
template = env.get_template('index.html')
return template.render()
except:
return '<h1>System Error<br/><br/><a href="/">back</a></h1>'
if __name__=="__main__":
app.run()
# test = OS()
# test.get_problem(6)
# test.get_answer("OS设计目标")
application = app.wsgifunc()
| true |
4cfa4f2b39aa616096cb4376b08b6f318d5da9c1 | Python | gorenNoa/RoboAdvisor | /server/api/stocks_prices_api.py | UTF-8 | 1,837 | 2.515625 | 3 | [
"MIT"
] | permissive | from flask import Blueprint, request, make_response, jsonify
from flask.views import MethodView
from app.configurations import Config
from app.extensions import db
from models.stock_price import StockPrice
from datetime import datetime
class StocksPricesApi(MethodView):
# add new stock price
def post(self):
try:
date_time_str = request.form['date_time']
date_time_obj = datetime.strptime(date_time_str, '%m-%d-%Y')
new_stock_price = StockPrice(ticker=request.form['ticker'], date_time=date_time_obj, price=request.form['price'],
asset_type=request.form['asset_type'])
db.session.add(new_stock_price)
db.session.commit()
response = make_response(jsonify(message="Stock price successfully added to database"), 200)
except Exception as e:
response = make_response(jsonify(message=str(e)), 400)
return response
# get stock price by ticker
def get(self):
try:
stock_price_by_email = db.session.query(StockPrice).filter_by(ticker=request.args.get('ticker')).first()
if stock_price_by_email is None:
response = make_response(jsonify(message='Invalid ticker'), 400)
else:
response = make_response(jsonify(stock_price_by_email.as_dict()), 200)
except Exception as e:
response = make_response(jsonify(message=str(e)), 400)
return response
api = Blueprint('stocks_prices_api', __name__, url_prefix=Config.API_PREFIX + '/stocks_prices')
stocks_prices = StocksPricesApi.as_view('api_stocks_prices')
api.add_url_rule('/add_stock_price/', methods=['POST'], view_func=stocks_prices)
api.add_url_rule('/get_stock_price_by_ticker/', methods=['GET'], view_func=stocks_prices)
| true |
ef0580c61874380a38e7e94f99651a429035445a | Python | Ramirezzz1/Python-102 | /even_number.py | UTF-8 | 51 | 2.546875 | 3 | [] | no_license | even_number=list(range(2,11,2))
print(even_number)
| true |
9af84c65e46b5d9d2a5199241a4342ed1d358923 | Python | m-milena/pearson_task | /Data_analysis/neural_network_data.py | UTF-8 | 1,255 | 2.84375 | 3 | [] | no_license | import mysql.connector
from database_config import db_connection_config
import database_connect
import database_disconnect
import csv
# database connect
db_connection = database_connect.database_connect(db_connection_config)
db_cursor = db_connection.cursor()
sql_select = "SELECT year, gpa, maths_exam, art_exam, language_exam, social_activity, essay_score, interview_score, score, graduated FROM sql_students.graduates INNER JOIN sql_students.score_board ON graduates.id=score_board.id"
with open('../Neural_network/neural_network_data.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows([['year', 'gpa', 'maths exam', 'art exam', 'language exam', 'social activity', 'essay score', 'interview score', 'score', 'graduated']])
db_cursor.execute(sql_select)
db_record = db_cursor.fetchall()
count = 0
for row in db_record:
if str(row[9]) =='TRUE':
binary_truefalse = 1
elif str(row[9]) == 'FALSE':
binary_truefalse = 0
else:
print('ERROR: '+str(row[9])+' is wrong type')
break
writer.writerow([row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], binary_truefalse])
csvfile.close()
# database disconnect
database_disconnect.database_disconnect(db_connection, db_connection.cursor())
| true |
20490095b4dd57e6abbf998f8b2b28e180413cac | Python | usc-isi-i2/etk | /etk/timeseries/annotation/excel_processor.py | UTF-8 | 1,459 | 2.734375 | 3 | [
"MIT"
] | permissive | import json
import logging
import argparse
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s', level=logging.INFO)
def process(filename):
out_file = open("output.txt", mode='w')
step = 1
final_json = []
for sheet, sheet_name, merged_cells in input_processor.process_excel(filename):
logging.info("Processing sheet: %s", sheet_name)
ps = sheet_processor.parsed_sheet(sheet_name, step, sheet, merged_cells)
step += 1
ps.find_tables(out_file)
tmp = ps.get_output()
if tmp == None:
continue
final_json.append(tmp)
logging.debug(json.dumps(ps.get_output(), sort_keys=True, indent=2, separators=(',', ': ')))
return final_json
def main():
ap = argparse.ArgumentParser()
ap.add_argument("infile", help='File to annotate.')
ap.add_argument("--outfile", help='File to write the annotation to.', default='')
args = ap.parse_args()
infile = args.infile.replace("\\", "")
outfile = args.outfile.replace("\\", "") # Handle case when outfile is empty
if outfile == "":
outfile = infile + "_annotation.json"
final_json = process(infile)
with open(outfile, mode='w') as out:
json.dump(final_json, out, sort_keys=True, indent=2, separators=(',', ': '))
logging.info("Output written to " + outfile)
if __name__ == "__main__":
main()
| true |
3a20f5497f92c37de8a13394f705521d3e18dfe1 | Python | bfazzani/find-a-home | /walkScore.py | UTF-8 | 618 | 2.65625 | 3 | [] | no_license | import requests
import urllib.parse as url
apiKey = "b5093bcc1c8c8c8a5f263745f82a5d39"
baseURL = "http://api.walkscore.com/score?format=json&"
def getTransitScores(address, lat, lng):
requestString = baseURL + url.urlencode({"address":address, "lat":lat,
"lon":lng, "transit":1, "bike":1,
"wsapikey":apiKey})
r = requests.get(requestString)
x = r.json()
walk = x["walkscore"]
bike = x["bike"]["score"]
try:
return (walk, bike, x["transit"]["score"])
finally:
return (walk, bike, 0)
| true |
d106530811b9f9584b35953b58cfc9d82e9bf3af | Python | c42-arun/coding-challenges-python | /src/merge_sorted_lists/solution.py | UTF-8 | 1,888 | 4.03125 | 4 | [] | no_license | import unittest
def merge_lists(my_list, alices_list):
# Combine the sorted lists into one large sorted list
result = [0] * (len(my_list) + len(alices_list))
my_index = 0
alice_index = 0
for i in range(len(result)):
mine_exhausted = my_index >= len(my_list)
alice_exhausted = alice_index >= len(alices_list)
# Next comes from Mine if
# 1. Mine is not exhausted AND
# 2. Alice is exhausted OR
# 3. Mine's is smaller than Alice's
if (not mine_exhausted and
(alice_exhausted or my_list[my_index] < alices_list[alice_index])):
result[i] = my_list[my_index]
my_index += 1
# else comes from my Alice
else:
result[i] = alices_list[alice_index]
alice_index += 1
return result
if __name__ == "__main__":
pass
l1 = [2, 4, 6]
l2 = [1, 3, 7]
result = merge_lists(l1, l2)
print(result)
# Tests
class Test(unittest.TestCase):
def test_both_lists_are_empty(self):
actual = merge_lists([], [])
expected = []
self.assertEqual(actual, expected)
def test_first_list_is_empty(self):
actual = merge_lists([], [1, 2, 3])
expected = [1, 2, 3]
self.assertEqual(actual, expected)
def test_second_list_is_empty(self):
actual = merge_lists([5, 6, 7], [])
expected = [5, 6, 7]
self.assertEqual(actual, expected)
def test_both_lists_have_some_numbers(self):
actual = merge_lists([2, 4, 6], [1, 3, 7])
expected = [1, 2, 3, 4, 6, 7]
self.assertEqual(actual, expected)
def test_lists_are_different_lengths(self):
actual = merge_lists([2, 4, 6, 8], [1, 7])
expected = [1, 2, 4, 6, 7, 8]
self.assertEqual(actual, expected)
unittest.main(verbosity=2) | true |
2729615d64723399716efc5534d5fcffa0641ba1 | Python | x75/rchw | /ga-hw/plot-stats.py | UTF-8 | 1,174 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# plot pyevolve statistics file
import numpy as np
import pylab as pl
# intrinsic / hw
# datafile = "ga-hw/data/ga2-hw-run2-stats-len016-popsz010-numgen030-mutrate0.02.csv"
# simulation
# datafile = "ga-hw/data/ga2-sim-run3-stats-len016-popsz010-numgen020-mutrate0.02.csv"
# simulation
datafile = "ga-hw/data/ga2-sim-run2-stats-len128-popsz020-numgen300-mutrate0.01.csv"
rec = np.genfromtxt(datafile, delimiter=";")
# pl.plot(rec[:,1:10])
# 03: rawMin
# 04: rawAvg
# 05: fitMin
# 08: fitAvg
# 09: fitMax
# 10: rawMax
# these indices vs. values are weird
data_indices = [2,7,9]
pl.plot(rec[:,data_indices])
# raw fit only
# pl.plot(rec[:,[4,7,8]])
# raw only
# pl.plot(rec[:,[2,3,9]])
pl.legend(("Min", "Avg", "Max"))
pl.show()
# save data for pgf plotting
for data_index in data_indices:
if data_index == 2:
data_name = "min"
elif data_index == 7:
data_name = "avg"
elif data_index == 9:
data_name = "max"
t = np.arange(len(rec))
pgfplotfile = re.sub("\.csv$", "-%d-%s.dat" % (data_index, data_name), datafile)
# print pgfplotfile
np.savetxt(pgfplotfile, np.vstack((t, rec[:,data_index])).T)
| true |
1ee771cc3ea6c3446028056fc047c079e3806133 | Python | blairck/project_template | /src/main.py | UTF-8 | 469 | 4.03125 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""This is a template with unit tests for other projects"""
class Addition(object):
"""Example class and function"""
def __init__(self, base):
self.base = base
def apply_addition(self, value):
return self.base + value
if __name__ == '__main__':
base = 5
value = 3
add_class = Addition(base)
result = add_class.apply_addition(value)
print "{0} + {1} = {2}".format(base, value, result) | true |
12a1b5520f4433603f6821320a9bb707e15bb081 | Python | NoahRJohnson/terra | /terra/core/settings.py | UTF-8 | 19,661 | 2.671875 | 3 | [
"MIT"
] | permissive | '''
A Terra settings file contains all the configuration of your app run. This
document explains how settings work.
The basics
----------
A settings file is just a json file with all your configurations set
.. rubric:: Example
.. code-block:: json
{
"compute": {
"type": "terra.compute.dummy"
},
"logging": {
"level": "DEBUG3"
}
}
Designating the settings
------------------------
.. envvar:: TERRA_SETTINGS_FILE
When you run a Terra App, you have to tell it which settings you’re using.
Do this by using an environment variable, :envvar:`TERRA_SETTINGS_FILE`.
Default settings
----------------
A Terra settings file doesn’t have to define any settings if it doesn’t need
to. Each setting has a sensible default value. These defaults live in
:data:`global_templates`.
Here’s the algorithm terra uses in compiling settings:
* Load settings from global_settings.py.
* Load settings from the specified settings file, overriding the global
settings as necessary, in a nested update.
Using settings in Python code
-----------------------------
In your Terra apps, use settings by importing the object
:data:`terra.settings`.
.. rubric:: Example
.. code-block:: python
from terra import settings
if settings.params.max_time > 15:
# Do something
Note that :data:`terra.settings` isn’t a module – it’s an object. So importing
individual settings is not possible:
.. code-block: python
from terra.settings import params # This won't work.
Altering settings at runtime
----------------------------
You shouldn’t alter settings in your applications at runtime. For example,
don’t do this in an app:
.. code-block:: python
from django.conf import settings
settings.DEBUG = True # Don't do this!
Available settings
------------------
For a full list of available settings, see the
:ref:`settings reference<settings>`.
Using settings without setting TERRA_SETTINGS_FILE
--------------------------------------------------
In some cases, you might want to bypass the :envvar:`TERRA_SETTINGS_FILE`
environment variable. For example, if you are writing a simple metadata parse
app, you likely don’t want to have to set up an environment variable pointing
to a settings file for each file.
In these cases, you can configure Terra's settings manually. Do this by
calling:
:func:`LazySettings.configure`
.. rubric:: Example:
.. code-block:: python
from django.conf import settings
settings.configure(logging={'level': 40})
Pass :func:`setting.configure()<LazySettings.configure>` the same arguments you
would pass to a :class:`dict`, such as keyword arguments as in this example
where each keyword argument represents a setting and its value, or a
:class:`dict`. Each argument name should be the same name as the settings. If a
particular setting is not passed to
:func:`settings.configure()<LazySettings.configure>` and is needed at some
later point, Terra will use the default setting value.
Configuring Terra in this fashion is mostly necessary - and, indeed,
recommended - when you’re using are running a trivial transient app in the
framework instead of a larger application.
'''
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from inspect import isfunction
from functools import wraps
from terra.core.exceptions import ImproperlyConfigured
from vsi.tools.python import (
nested_patch_inplace, nested_patch, nested_update, nested_in_dict
)
from json import JSONEncoder
try:
import jstyleson as json
except ImportError: # pragma: no cover
import json
ENVIRONMENT_VARIABLE = "TERRA_SETTINGS_FILE"
'''str: The environment variable that store the file name of the configuration
file
'''
filename_suffixes = ['_file', '_files', '_dir', '_dirs', '_path', '_paths']
'''list: The list key suffixes that are to be considered for volume translation
'''
json_include_suffixes = ['_json']
'''list: The list key suffixes that are to be considered executing json
include replacement at load time.
'''
def settings_property(func):
'''
Functions wrapped with this decorator will only be called once, and the value
from the call will be cached, and replace the function altogether in the
settings structure, similar to a cached lazy evaluation
One settings_property can safely reference another settings property, using
``self``, which will refer to the :class:`Settings` object
Arguments
---------
func : func
Function being decorated
'''
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.settings_property = True
return wrapper
@settings_property
def status_file(self):
'''
The default :func:`settings_property` for the status_file. The default is
:func:`processing_dir/status.json<processing_dir>`
'''
return os.path.join(self.processing_dir, 'status.json')
@settings_property
def processing_dir(self):
'''
The default :func:`settings_property` for the processing directory. If not
set in your configuration, it will default to the directory where the config
file is stored. If this is not possible, it will use the current working
directory.
If the directory is not writeable, a temporary directory will be used instead
'''
if hasattr(self, 'config_file'):
processing_dir = os.path.dirname(self.config_file)
else:
processing_dir = os.getcwd()
logger.warning('No config file found, and processing dir unset. '
f'Using cwd: {processing_dir}')
if not os.access(processing_dir, os.W_OK):
import tempfile
bad_dir = processing_dir
processing_dir = tempfile.mkdtemp(prefix="terra_")
logger.error(f'You do not have access to processing dir: "{bad_dir}". '
f'Using "{processing_dir}" instead')
return processing_dir
@settings_property
def unittest(self):
'''
A :func:`settings_property` for determing if unittests are running or not
Checks the value of :env:`TERRA_UNITTEST` and returns True or False based off
of that.
'''
return os.environ.get('TERRA_UNITTEST', None) == "1"
# TODO: come up with a way for apps to extend this themselves
global_templates = [
(
# Global Defaults
{},
{
"logging": {
"level": "ERROR",
"format": f"%(asctime)s (%(hostname)s): %(levelname)s - %(message)s",
"date_format": None,
"style": "%"
},
"executor": {
"type": "ThreadPoolExecutor"
},
"compute": {
"arch": "terra.compute.dummy"
},
"resume": False,
'status_file': status_file,
'processing_dir': processing_dir,
'unittest': unittest,
'resume': False
}
),
(
{"compute": {"arch": "terra.compute.virtualenv"}}, # Pattern
{"compute": {"virtualenv_dir": None}} # Defaults
),
( # So much for DRY :(
{"compute": {"arch": "virtualenv"}},
{"compute": {"virtualenv_dir": None}}
)
]
''':class:`list` of (:class:`dict`, :class:`dict`): Templates are how we
conditionally assign default values. It is a list of pair tuples, where the
first in the tuple is a "pattern" and the second is the default values. If the
pattern is in the settings, then the default values are set for any unset
values.
Values are copies recursively, but only if not already set by your settings.'''
class LazyObject:
'''
A wrapper class that lazily evaluates (calls :func:`LazyObject._setup`)
:class:`LazyObject` remains unevaluated until one of the supported magic
functions are called.
Based off of Django's LazyObject
'''
_wrapped = None
'''
The internal object being wrapped
'''
def _setup(self):
"""
Abstract. Must be implemented by subclasses to initialize the wrapped
object.
Raises
------
NotImplementedError
Will throw this exception unless a subclass redefines :func:`_setup`
"""
raise NotImplementedError(
'subclasses of LazyObject must provide a _setup() method')
def __init__(self):
self._wrapped = None
def __getattr__(self, name, *args, **kwargs):
'''Supported'''
if self._wrapped is None:
self._setup()
return getattr(self._wrapped, name, *args, **kwargs)
def __setattr__(self, name, value):
'''Supported'''
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is None:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
'''Supported'''
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is None:
self._setup()
delattr(self._wrapped, name)
def __dir__(self):
""" Supported """
d = super().__dir__()
if self._wrapped is not None:
return list(set(d + dir(self._wrapped)))
return d
def __getitem__(self, name):
'''Supported'''
if self._wrapped is None:
self._setup()
return self._wrapped[name]
def __setitem__(self, name, value):
'''Supported'''
if self._wrapped is None:
self._setup()
self._wrapped[name] = value
def __delitem__(self, name):
'''Supported'''
if self._wrapped is None:
self._setup()
del(self._wrapped[name])
def __contains__(self, name):
'''Supported'''
if self._wrapped is None:
self._setup()
return self._wrapped.__contains__(name)
def __iter__(self):
'''Supported'''
if self._wrapped is None:
self._setup()
return iter(self._wrapped)
class LazySettings(LazyObject):
'''
A :class:`LazyObject` proxy for either global Terra settings or a custom
settings object. The user can manually configure settings prior to using
them. Otherwise, Terra uses the config file pointed to by
:envvar:`TERRA_SETTINGS_FILE`
Based off of :mod:`django.conf`
'''
def _setup(self, name=None):
"""
Load the config json file pointed to by the environment variable. This is
used the first time settings are needed, if the user hasn't configured
settings manually.
Arguments
---------
name : :class:`str`, optional
The name used to describe the settings object. Defaults to ``settings``
Raises
------
ImproperlyConfigured
If the settings has already been configured, will throw an error. Under
normal circumstances, :func:`_setup` will not be called a second time.
"""
settings_file = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_file:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings." %
(desc, ENVIRONMENT_VARIABLE))
with open(settings_file) as fid:
self.configure(json.load(fid))
self._wrapped.config_file = os.environ.get(ENVIRONMENT_VARIABLE)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is None:
return '<LazySettings [Unevaluated]>'
return str(self._wrapped)
def configure(self, *args, **kwargs):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument should be a :class:`dict`).
Arguments
---------
*args :
Passed along to :class:`Settings`
**kwargs :
Passed along to :class:`Settings`
Raises
------
ImproperlyConfigured
If settings is already configured, will throw this exception
"""
from terra.core.signals import post_settings_configured
if self._wrapped is not None:
raise ImproperlyConfigured('Settings already configured.')
logger.debug2('Pre settings configure')
self._wrapped = Settings(*args, **kwargs)
for pattern, settings in global_templates:
if nested_in_dict(pattern, self._wrapped):
# Not the most efficient way to do this, but insignificant "preupdate"
d = {}
nested_update(d, settings)
nested_update(d, self._wrapped)
# Nested update and run patch code
self._wrapped.update(d)
def read_json(json_file):
# In case json_file is an @settings_property function
if getattr(json_file, 'settings_property', None):
json_file = json_file(settings)
with open(json_file, 'r') as fid:
return Settings(json.load(fid))
nested_patch_inplace(
self._wrapped,
lambda key, value: (isinstance(key, str)
and any(key.endswith(pattern)
for pattern in json_include_suffixes)),
lambda key, value: read_json(value))
post_settings_configured.send(sender=self)
logger.debug2('Post settings configure')
@property
def configured(self):
"""
Check if the settings have already been configured
Returns
-------
bool
Return ``True`` if has already been configured
"""
return self._wrapped is not None
def add_templates(self, templates):
"""
Helper function to easily expose adding more defaults templates to
:var:`global_templates` specific for an application
Arguments
---------
templates : list
A list of pairs of dictionaries just like :var:`global_templates`
"""
# Pre-extend
offset = len(global_templates)
for template in templates:
global_templates.insert(-offset, template)
def __enter__(self):
if self._wrapped is None:
self._setup()
return self._wrapped.__enter__()
def __exit__(self, exc_type=None, exc_value=None, traceback=None):
return self._wrapped.__exit__(exc_type, exc_value, traceback)
class ObjectDict(dict):
'''
An object dictionary, that accesses dictionary keys using attributes (``.``)
rather than items (``[]``).
'''
def __init__(self, *args, **kwargs):
self.update(*args, **kwargs)
def __dir__(self):
""" Supported """
d = super().__dir__()
return list(set(d + [x for x in self.keys()
if isinstance(x, str) and x.isidentifier()]))
def __getattr__(self, name):
""" Supported """
try:
return self[name]
except KeyError:
raise AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__qualname__, name)) from None
def __setattr__(self, name, value):
""" Supported """
self.update([(name, value)])
def __contains__(self, name):
if '.' in name:
first, rest = name.split('.', 1)
return self.__contains__(first) and (rest in self[first])
return super().__contains__(name)
def update(self, *args, **kwargs):
""" Supported """
nested_update(self, *args, **kwargs)
class ExpandedString(str):
pass
class Settings(ObjectDict):
def __getattr__(self, name):
'''
``__getitem__`` that will evaluate @settings_property functions, and cache
the values
'''
# This is here instead of in LazySettings because the functor is given
# LazySettings, but then if __getattr__ is called on that, the segment of
# the settings object that is retrieved is of type Settings, therefore
# the settings_property evaluation has to be here.
try:
val = self[name]
if isfunction(val) and getattr(val, 'settings_property', None):
# Ok this ONE line is a bit of a hack :( But I argue it's specific to
# this singleton implementation, so I approve!
val = val(settings)
# cache result, because the documentation said this should happen
self[name] = val
if isinstance(val, str) and not isinstance(val, ExpandedString):
val = os.path.expandvars(val)
if any(name.endswith(pattern) for pattern in filename_suffixes):
val = os.path.expanduser(val)
val = ExpandedString(val)
self[name] = val
return val
except KeyError:
# Throw a KeyError to prevent a recursive corner case
raise AttributeError("'{}' object has no attribute '{}'".format(
self.__class__.__qualname__, name)) from None
def __enter__(self):
import copy
object.__setattr__(self, "_backup", copy.deepcopy(self))
def __exit__(self, type_, value, traceback):
self.clear()
self.update(self._backup)
del self._backup
settings = LazySettings()
'''LazySettings: The setting object to use through out all of terra'''
class TerraJSONEncoder(JSONEncoder):
'''
Json serializer for :class:`LazySettings`.
.. note::
Does not work on :class:`Settings` since it would be handled
automatically as a :class:`dict`.
'''
def default(self, obj):
if isinstance(obj, LazySettings):
if obj._wrapped is None:
raise ImproperlyConfigured('Settings not initialized')
return TerraJSONEncoder.serializableSettings(obj._wrapped)
return JSONEncoder.default(self, obj) # pragma: no cover
@staticmethod
def serializableSettings(obj):
'''
Convert a :class:`Settings` object into a json serializable :class:`dict`.
Since :class:`Settings` can contain :func:`settings_property`, this
prevents json serialization. This function will evaluate all
:func:`settings_property`'s for you.
Arguments
---------
obj: :class:`Settings` or :class:`LazySettings`
Object to be converted to json friendly :class:`Settings`
'''
if isinstance(obj, LazySettings):
obj = obj._wrapped
return nested_patch(
obj,
lambda k, v: isfunction(v) and hasattr(v, 'settings_property'),
lambda k, v: v(obj))
@staticmethod
def dumps(obj):
'''
Convenience function for running `dumps` using this encoder.
Arguments
---------
obj: :class:`LazySettings`
Object to be converted to json friendly :class:`dict`
'''
return json.dumps(obj, cls=TerraJSONEncoder)
import terra.logger # noqa
logger = terra.logger.getLogger(__name__)
| true |
41dfce64eb50f999e39cecf7c30b01a36ffb18cb | Python | mahim007/complete-python | /Python Basics/Touple.py | UTF-8 | 328 | 3.734375 | 4 | [] | no_license | my_touple = (1, 2, 3, 4, 5, 3)
print(my_touple)
print(my_touple.index(3))
print(my_touple.count(3))
name = 100
my_data = {
name: 'mahim',
'age': 25,
True: [1, 2, 3],
(1, 2, 3): (1, 2, 3, 4, 5)
}
print(my_data[(1, 2, 3)][0:3])
new_touple = my_touple
x, y, z, *others = new_touple
print(x, y, z)
print(others)
| true |
078e04820fdf37cd86595960a7b23eff40466ea0 | Python | YunYouJun/LeetCode | /problems/ping-heng-er-cha-shu-lcof/solution.py | UTF-8 | 625 | 3.0625 | 3 | [
"MIT"
] | permissive | from helpers.py.tree_node import TreeNode
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def recur(root: TreeNode):
if not root:
return 0
left = recur(root.left)
if left == -1:
return - 1
right = recur(root.right)
if right == -1:
return - 1
return max(left, right) + 1 if abs(left - right) <= 1 else - 1
return recur(root) != -1
if __name__ == '__main__':
test_cases = []
for case in test_cases:
ans = Solution().isBalanced(case)
print(ans)
| true |
0e3806d6bcf4d7e2d514b203dc16213b4dd4dd9b | Python | 15807857476/bogdata-2 | /Nyspider/www.jfz.com/products.py | UTF-8 | 5,467 | 2.5625 | 3 | [] | no_license | #coding:utf-8
import requests
from bs4 import BeautifulSoup
import time
import xlwt3
import re
import threading
import datetime
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en-US,en;q=0.5",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0"}
def get_products(page):
statue=True
while statue:
try:
html=requests.get('http://www.jfz.com/simu/list_p%s.html'%page,headers=headers,timeout=50).text
statue=False
except:
continue
rel='href="(/product.*?html)"'
result=re.findall(rel,html)
urls=[]
for url in result:
url='http://www.jfz.com'+url
urls.append(url)
return urls
class Product_Infor(threading.Thread):
def __init__(self,url):
super(Product_Infor,self).__init__()
self.url=url
def run(self):
statue=True
while statue:
try:
html=requests.get(self.url,headers=headers,timeout=50).text.encode('ISO-8859-1').decode('utf-8','ignore')
statue=False
except:
continue
soup=BeautifulSoup(html,'lxml').find('div',attrs={'class':'simu_prodetail_container'})
baseInforTable=soup.find('div',attrs={'class':'simu_pro_info_wrap'}).find_all('li')
self.name=baseInforTable[0].get_text().split(':')[-1]
self.baseinfor=[]
self.strategy=baseInforTable[-2].get_text().split(':')[-1].replace('\n','')
for li in baseInforTable:
self.baseinfor.append(li.get_text().split(':')[-1].replace('\n',''))
pro_knowTable=soup.find('div',attrs={'class':'simu_pro_know_wrap'}).find_all('li')
for li in pro_knowTable:
self.baseinfor.append(li.get_text().split(':')[-1].replace('\n',''))
try:
table=soup.find('div',attrs={'class':'simu_pro_table_height'}).find('table').find_all('tr')
self.history={}
for tr in table:
tds=tr.find_all('td')
self.history[tds[0].get_text()]=tds[2].get_text()
except:
self.history={}
class Main():
def __init__(self):
self.label={'股票策略':1,'宏观策略':2,'管理期货':3,'事件驱动':4,'相对价值策略':5,'债券策略':6,'组合基金':7,'复合策略':8}
self.sheet_table={'1':0,'2':0,'3':0,'4':0,'5':0,'6':0,'7':0,'8':0}
self.col_table={'1':1,'2':1,'3':1,'4':1,'5':1,'6':1,'7':1,'8':1}
self.excel_f=[]
self.sheets=[]
for num in range(8):
excel=xlwt3.Workbook()
self.excel_f.append(excel)
for excel in self.excel_f:
sheet=excel.add_sheet('0', cell_overwrite_ok=True)
self.sheets.append(sheet)
def run(self):
page=1
while page<=485:
urls=get_products(page)
threadings=[]
for url in urls:
work=Product_Infor(url)
threadings.append(work)
for work in threadings:
work.setDaemon(True)
work.start()
for work in threadings:
work.join()
for work in threadings:
try:
index=self.label[work.strategy]-1
except:
index=2
self.write(index,work.history,work.baseinfor)
print(page,'----OK')
page+=1
if page%20==0:
self.save()
self.save()
def write(self,index,history,baseinfor):
col=self.col_table[str(index+1)]
try:
num=0
for infor in baseinfor:
self.sheets[index].write(num,col,infor)
num+=1
for key in history:
try:
row=self.get_row(key)
except:
continue
if row>65533 or row<0:
continue
self.sheets[index].write(row,col,history[key])
self.sheets[index].write(row,0,key)
self.col_table[str(index+1)]+=1
except:
self.col_table[str(index+1)]=1
col=self.col_table[str(index+1)]
self.sheet_table[str(index+1)]+=1
self.sheets[index]=self.excel_f[index].add_sheet(str(self.sheet_table[str(index+1)]), cell_overwrite_ok=True)
num=0
for infor in baseinfor:
self.sheets[index].write(num,col,infor)
num+=1
for key in history:
try:
row=self.get_row(key)
except:
continue
if row>65533 or row<0:
continue
self.sheets[index].write(row,col,history[key])
self.sheets[index].write(row,0,key)
self.col_table[str(index+1)]+=1
def get_row(self,date):
today=datetime.datetime.today()
pre_day=datetime.datetime.strptime(date,'%Y.%m.%d')
row=15+(today-pre_day).days
return row
def save(self):
num=0
for excel in self.excel_f:
for key in self.label:
if self.label[key]==num+1:
excel.save(key+'.xls')
num+=1
main=Main()
main.run()
| true |
4c911259a4dc864d9fc3f4465d53a236e16d274c | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2939/60602/284290.py | UTF-8 | 955 | 2.90625 | 3 | [] | no_license | def editNum(string,num):
i=0;
list=[];
while(i<len(string)):
list.append(string[i]);
i+=1;
i=0;
while(i<len(list)-1):
if(list[i]<list[i+1]):
num-=1;
del list[i];
if(num==0):
break;
i-=1;
i+=1;
i=0;
ans="";
while(i<len(list)):
ans+=list[i];
i+=1;
print(ans,end="");
def generateNum(NumList):
i=0;
while(i<len(NumList)):
NumList.append(2*NumList[i]+1);
NumList.append(4 * NumList[i] + 5);
if(len(NumList)>1000):
return NumList;
break;
i+=1;
try:
inputList=input().split(" ");
K=int(inputList[0]);
num=int(inputList[1]);
except Exception as e:
K = int(inputList[0]);
num = int(inputList[2]);
model = sorted(generateNum([1]));
todoS="";
i=0;
while(i<K):
todoS+=str(model[i]);
i+=1;
print(todoS);
editNum(todoS,num); | true |
6a99617f35385e9bc38de363ca4d7bf4b3a03713 | Python | GypsyJasmer/python | /NewWk4_DungeonCrawl/DungeonFunctions.py | UTF-8 | 9,895 | 3.875 | 4 | [] | no_license | # Functions
import constant as c
import random
# should be used any place the user is required to select yes or no
# should return true for yes and false for no.
def validYesNo(Input):
isValid = False
userInput = ""
while userInput != "Y" and userInput != "N":
userInput = input(Input).upper() # prompt the user for actual input
if userInput == "Y":
isValid = True
return isValid
# this function will display the instructions for playing this game
def displayInstructions():
first_play = False
if not first_play:
print("Welcome to Dungeon Crawl! \n" +
"The goal is to get to the exit of the \n" +
"dungeon without hitting any of the traps. \n" +
"Best of luck to you! \n")
else:
print("You know how to play Dungeon Crawl, get crawling.")
# this function should return true if the user wants to define the dungeon size
def userDefinedSize():
return validYesNo("Would you like to set the size of your dungeon \n" +
" or go with the preset size 10x12? \n" +
"Y to create your own size or any button for no. \n")
# return a tuple for the user requested dungeon size
def getSize():
width = int(input("Enter the width of the dungeon"))
height = int(input("Enter the height of the dungeon"))
return width, height
# this function will create the 2D list for the map and return it to
# main after it has been created . This can be called with default
# values for the default size, or with custom values from getSize
def createMap(width=c.MAX_ROW, height=c.MAX_COL):
dungeonMap = [] # Make initial list
for row in range(height): # for every row
dungeonMap.append([]) # make a new list
dungeonMap[row] = [c.SPACE] * width # fill the list spaces all at once
# tester
# print("Dungeon is set")
placeTrap(dungeonMap, width, height)
placeTreasure(dungeonMap, width, height)
placePlayer(dungeonMap, width, height)
placeMonster(dungeonMap, width, height)
return dungeonMap # created Map
# displays the map layout
def displayDungeon(dungeonMap):
print("Here is what the dungeon currently looks like")
for row in dungeonMap:
for col in row:
print(col, end="")
print() # this moves the row down
# These place functions: should be called from within createMap
# and should take the map and number of each thing being
# placed into the map and place the appropriate number of
# objects randomly into the map. Default parameters should
# be defined in constant.py and specified in function definition
def placeTrap(dungeonMap, width, height):
for index in range(c.NUM_TRAPS):
# this is the empty space tuple
trapSpots = findEmpty(dungeonMap, width, height)
dungeonMap[trapSpots[0]][trapSpots[1]] = c.TRAP
# print("Traps are set")
def placeTreasure(dungeonMap, width, height):
for index in range(c.NUM_CASH):
cashSpots = findEmpty(dungeonMap, width, height)
# this is the empty space tuple
dungeonMap[cashSpots[0]][cashSpots[1]] = c.CASH
# print("Treasure is set")
def placePlayer(dungeonMap, width, height):
# this is the empty space tuple
playerSpot = findEmpty(dungeonMap, width, height)
dungeonMap[playerSpot[0]][playerSpot[1]] = c.PLAY
print("This is the player location from place PLayer:", playerSpot)
# print("Player is set")
def placeMonster(dungeonMap, width, height):
# this is the empty space tuple
for index in range(c.NUM_MONSTER):
monsterSpot = findEmpty(dungeonMap, width, height)
dungeonMap[monsterSpot[0]][monsterSpot[1]] = c.MONSTER
# this method will be used in createMap. When called, it will
# find a random location on the map that is empty and return
# a tuple (row, column) for that location
def findEmpty(dungeonMap, width, height):
emptySpot = False
while not emptySpot:
row = random.randrange(height)
col = random.randrange(width)
if dungeonMap[row][col] != c.SPACE:
emptySpot = False
else:
return row, col # returns the tuple
#
# this function will find the player location on the map and return
# a tuple (row, column) of where it is
# does not track player just searches the board.
def findPlayer(dungeonMap, width, height):
# bool for player
found = False
while not found:
for row in range(height): # scans rows
for col in range(width): # scans col
if dungeonMap[row][col] != c.PLAY: # checks player
found = False # once found breaks out of loop and return row and column.
else:
return row, col
def findMonster(dungeonMap, width, height):
# bool for monster
found = False
while not found:
for row in range(height): # scans rows
for col in range(width): # scans col
if dungeonMap[row][col] != c.MONSTER: # checks player
found = False # once found breaks out of loop and return row and column.
else:
return row, col
# this function should get a move from the player and return a
# tuple containing the new location of the player.
# This function should only return a move after validating that it
# is within the array. The player should also be allowed
# choose the letter q to quit the game and return (-1, -1) to main
def getMove(width, height, playerLocation):
validMove = False
move = ()
while not validMove:
playerMove = input("Enter 'U' for Up, 'D' for Down, 'L' for Left, and 'R' for Right and Q for Quit").upper()
if playerMove != 'U' and playerMove != 'D' and playerMove != 'L' and playerMove != 'R' and playerMove != 'Q':
print("please enter U, D, L, R, or Q")
else:
if playerMove == c.UP:
move = -1, 0
elif playerMove == c.DOWN:
move = 1, 0
elif playerMove == c.LEFT:
move = 0, -1
elif playerMove == c.RIGHT:
move = 0, 1
elif playerMove == c.QUIT:
move = 0, 0
inbounds = checkBounds(playerLocation, move, width, height)
if inbounds:
validMove = True
# print(move)
return move
def getMonsterMove():
row = random.randrange(-1, 1)
col = random.randrange(-1, 1)
return row, col
# if the player has chosen to quit the game, this should be used to
# terminate the game loop without the use of break
def checkQuit(move):
if move[0] == -1 and move[1] == -1:
return False
return True
# this function should be called inside of getMove and validate
# whether or not the player has attempted to move outside of the
# bounds of the list.
def checkBounds(playerLocation, move, width, height):
inBounds = False
while not inBounds:
# left and up
if playerLocation[0] + move[0] < 0 or playerLocation[1] + move[1] < 0 or \
playerLocation[0] + move[0] > width - 1 or playerLocation[1] + move[1] > height - 1:
print("Out of bounds")
return False
return True
# these functions should accept the map and move and test
# whether the chosen move will cause the player to win or lose
# the game. Should return true or false. Should not update the
# the map. The result of these should be used to terminate the
# game loop without the use of break
def checkWin(dungeonMap, move, playerLocation):
isWin = False
while not isWin:
if dungeonMap[playerLocation[0] + move[0]][playerLocation[1] + move[1]] == c.CASH:
isWin = True
print("You found the treasure!")
return isWin
def checkLose(dungeonMap, move, playerLocation):
isLost = False
while not isLost:
if dungeonMap[playerLocation[0] + move[0]][playerLocation[1] + move[1]] == c.TRAP:
print("You stepped on a trap, you lost")
isLost = True
elif dungeonMap[playerLocation[0] + move[0]][playerLocation[1] + move[1]] == c.MONSTER:
print("You stepped on a MONSTER, you're dead!")
isLost = True
return isLost
# this function should accept the map and move from main and
# use them to update the Map. You should not update the map
# unless not win and not lose.
# a) pass in the dungeon and the move
# b) update the dungeon moving the player marker(place a new player and clear the old spot)
# c) return type should be void
def updateMap(dungeonMap, playerLocation, move):
dungeonMap[playerLocation[0]][playerLocation[1]] = c.SPACE
# this creates a new tuple by adding the old tuple together for the current spot.
playerLocation = (playerLocation[0] + move[0], playerLocation[1] + move[1])
# then this creates the new spot for the player
dungeonMap[playerLocation[0]][playerLocation[1]] = c.PLAY
return playerLocation
def updateMonsterMap(dungeonMap, monsterLocation, monsterMove):
dungeonMap[monsterLocation[0]][monsterLocation[1]] = c.SPACE
# this creates a new tuple by adding the old tuple together for the current spot.
monsterLocation = (monsterLocation[0] + monsterMove[0], monsterLocation[1] + monsterMove[1])
# then this creates the new spot for the player
dungeonMap[monsterLocation[0]][monsterLocation[1]] = c.MONSTER
return monsterLocation
# after the game is over, this function should see if the player
# wishes to start again. Should return true or false. Should use
# validYesNo. If the user wants to play again, start over with a
# new dungeon
def repeat():
return validYesNo("Do you want to play again? Y for Yes any other key for No")
if __name__ == "__main__":
displayInstructions()
getMove(10, 10, (2, 2))
| true |
9437a7256db7027b42d145128031eca7510569ee | Python | anyl92/ALGORITHM | /baek/baek_2559_sequnce.py | UTF-8 | 278 | 2.640625 | 3 | [] | no_license | import sys
sys.stdin = open('input.txt', 'r')
N, K = map(int, input().split())
L = list(map(int, input().split()))
ans = 0
for i in range(K):
ans += L[i]
sum = ans
for i in range(1, N-K+1):
sum -= L[i-1]
sum += L[i+K-1]
if sum > ans:
ans = sum
print(ans) | true |
31c56e156baa4ba03527005b53bb51b06b1a3236 | Python | VivekVinushanth/flask-website | /tethne fix/model/managers/mallet.py | UTF-8 | 13,101 | 2.734375 | 3 | [] | no_license | """
Classes and methods related to the :class:`.MALLETModelManager`\.
"""
import os
import re
import shutil
import tempfile
import subprocess
import numpy as np
from networkx import Graph
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel('ERROR')
from ...classes import GraphCollection
#from ..social import TAPModel
from ..managers import ModelManager
from ...writers.corpora import to_documents
from ..corpus.ldamodel import from_mallet, LDAModel
class MALLETModelManager(ModelManager):
"""
Generates a :class:`.LDAModel` from a :class:`.Corpus` using
`MALLET <http://mallet.cs.umass.edu/>`_.
The :class:`.Corpus` should already contain at least one featurset,
indicated by the `feature` parameter, such as wordcounts. You may
specify two working directories: `temppath` should be a working
directory that will contain intermediate files (e.g. documents, data
files, metadata), while `outpath` will contain the final model and any
plots generated during the modeling process. If `temppath` is not
provided, generates and uses a system temporary directory.
Tethne comes bundled with a recent version of MALLET. If you would
rather use your own install, you can do so by providing the
`mallet_path` parameter. This should point to the directory containing
``/bin/mallet``.
.. autosummary::
:nosignatures:
topic_over_time
Parameters
----------
D : :class:`.Corpus`
feature : str
Key from D.features containing wordcounts (or whatever
you want to model with).
outpath : str
Path to output directory.
temppath : str
Path to temporary directory.
mallet_path : str
Path to MALLET install directory (contains bin/mallet).
Examples
--------
Starting with some JSTOR DfR data (with wordcounts), a typical workflow
might look something like this:
.. code-block:: python
>>> from nltk.corpus import stopwords # 1. Get stoplist.
>>> stoplist = stopwords.words()
>>> from tethne.readers import dfr # 2. Build Corpus.
>>> C = dfr.corpus_from_dir('/path/to/DfR/datasets', 'uni', stoplist)
>>> def filt(s, C, DC): # 3. Filter wordcounts.
... if C > 3 and DC > 1 and len(s) > 3:
... return True
... return False
>>> C.filter_features('wordcounts', 'wc_filtered', filt)
>>> from tethne.model import MALLETModelManager # 4. Get Manager.
>>> outpath = '/path/to/my/working/directory'
>>> mallet = '/Applications/mallet-2.0.7'
>>> M = MALLETModelManager(C, 'wc_filtered', outpath, mallet_path=mallet)
>>> M.prep() # 5. Prep model.
>>> model = M.build(Z=50, max_iter=300) # 6. Build model.
>>> model # (may take awhile)
<tethne.model.corpus.ldamodel.LDAModel at 0x10bfac710>
A plot showing the log-likelihood/topic over modeling iterations should be
generated in your `outpath`. For example:
.. figure:: _static/images/ldamodel_LL.png
:width: 400
:align: center
Behind the scenes, the :func:`.prep` procedure generates a plain-text corpus
file at `temppath`, along with a metadata file. MALLET's ``import-file``
procedure is then called, which translates the corpus into MALLET's internal
format (also stored at the `temppath`).
The :func:`.build` procedure then invokes MALLET's ``train-topics``
procedure. This step may take a considerable amount of time, anywhere from
a few minutes (small corpus, few topics) to a few hours (large corpus, many
topics).
For a :class:`.Corpus` with a few thousand :class:`.Paper`\s, 300 - 500
iterations is often sufficient to achieve convergence for 20-100 topics.
Once the :class:`.LDAModel` is built, you can access its methods directly.
See full method descriptions in :class:`.LDAModel`\.
For more information about topic modeling with MALLET see
`this tutorial <http://programminghistorian.org/lessons/topic-modeling-and-mallet>`_.
"""
def __init__(self, D, feature='unigrams', outpath='/tmp/', temppath=None,
mallet_path='./model/bin/mallet-2.0.7'):
super(MALLETModelManager, self).__init__(outpath, temppath)
self.D = D
self.mallet_path = mallet_path
self.feature = feature
self.input_path = '{0}/input.mallet'.format(self.temp)
self.corpus_path = self.temp+'/tethne_docs.txt'
self.meta_path = self.temp+'/tethne_meta.csv'
self.dt = '{0}/dt.dat'.format(self.temp)
self.wt = '{0}/wt.dat'.format(self.temp)
self.om = '{0}/model.mallet'.format(self.outpath)
self.vocabulary = self.D.features[self.feature]['index']
def _generate_corpus(self, meta):
"""
Writes a corpus to disk amenable to MALLET topic modeling.
"""
# Metadata to export with corpus.
metadata = ( meta, { p: { k:paper[k] for k in meta }
for p,paper in self.D.papers.iteritems() } )
# Export the corpus.
to_documents(
self.temp+'/tethne', # Temporary files.
self.D.features[self.feature]['features'],
metadata=metadata,
vocab=self.D.features[self.feature]['index'] )
self._export_corpus()
def _export_corpus(self):
"""
Calls MALLET's `import-file` method.
"""
# bin/mallet import-file --input /Users/erickpeirson/mycorpus_docs.txt
# --output mytopic-input.mallet --keep-sequence --remove-stopwords
self.mallet = self.mallet_path + "/bin/mallet"
try:
exit = subprocess.call( [ self.mallet,
'import-file',
'--input {0}'.format(self.corpus_path),
'--output {0}'.format(self.input_path),
'--keep-sequence', # Required (oddly) for LDA.
'--remove-stopwords' ]) # Probably redundant.
except OSError: # Raised if mallet_path is bad.
raise OSError("MALLET path invalid or non-existent.")
if exit != 0:
raise RuntimeError("MALLET import-file failed: {0}.".format(exit))
def _run_model(self, max_iter=20, **kwargs):
"""
Calls MALLET's `train-topic` method.
"""
#$ bin/mallet train-topics --input mytopic-input.mallet --num-topics 100
#> --output-doc-topics /Users/erickpeirson/doc_top
#> --word-topic-counts-file /Users/erickpeirson/word_top
#> --output-topic-keys /Users/erickpeirson/topic_keys
prog = re.compile('\<([^\)]+)\>')
ll_prog = re.compile(r'(\d+)')
try:
p = subprocess.Popen( [ self.mallet,
'train-topics',
'--input {0}'.format(self.input_path),
'--num-topics {0}'.format(self.Z),
'--num-iterations {0}'.format(max_iter),
'--output-doc-topics {0}'.format(self.dt),
'--word-topic-counts-file {0}'.format(self.wt),
'--output-model {0}'.format(self.om) ],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Handle output of MALLET in real time.
while p.poll() is None:
l = p.stderr.readline()
# Keep track of LL/topic.
try:
this_ll = float(re.findall('([-+]\d+\.\d+)', l)[0])
self.ll.append(this_ll)
except IndexError: # Not every line will match.
pass
# Keep track of modeling progress.
try:
this_iter = float(prog.match(l).group(1))
self.ll_iters.append(this_iter)
progress = int(100 * this_iter/max_iter)
logger.debug('Modeling progress: {0}%.\r'.format( progress ),)
except AttributeError: # Not every line will match.
pass
logger.debug('Modeling complete.')
except OSError: # Raised if mallet_path is bad.
raise OSError("MALLET path invalid or non-existent.")
self.num_iters += max_iter
def _load_model(self):
self.model = from_mallet( self.dt,
self.wt,
self.meta_path )
def topic_over_time(self, k, threshold=0.05, mode='documents',
normed=True, plot=False,
figargs={'figsize':(10,10)} ):
"""
Representation of topic ``k`` over 'date' slice axis.
The :class:`.Corpus` used to initialize the :class:`.LDAModelManager`
must have been already sliced by 'date'.
Parameters
----------
k : int
Topic index.
threshold : float
Minimum representation of ``k`` in a document.
mode : str
'documents' counts the number documents that contain ``k``;
'proportions' sums the representation of ``k`` in each document
that contains it.
normed : bool
(default: True) Normalizes values by the number of documents in each
slice.
plot : bool
(default: False) If True, generates a MatPlotLib figure and saves
it to the :class:`MALLETModelManager` outpath.
figargs : dict
kwargs dict for :func:`matplotlib.pyplot.figure`\.
Returns
-------
keys : array
Keys into 'date' slice axis.
R : array
Representation of topic ``k`` over time.
Examples
--------
.. code-block:: python
>>> keys, repr = M.topic_over_time(1, plot=True)
...should return ``keys`` (date) and ``repr`` (% documents) for topic 1,
and generate a plot like this one in your ``outpath``.
.. figure:: _static/images/topic_1_over_time.png
:width: 400
:align: center
"""
if k >= self.model.Z:
raise ValueError('No such topic in this model.')
items = self.model.dimension_items(k, threshold)
slices = self.D.get_slices('date')
keys = sorted(slices.keys())
R = []
topic_label = self.model.print_topic(k)
if mode == 'documents': # Documents that contain k.
for t in keys:
docs = slices[t]
Ndocs = float(len(docs))
Ncontains = 0.
for i,w in items:
if i in docs:
Ncontains += 1.
if normed: # As a percentage of docs in each slice.
ylabel = 'Percentage of documents containing topic.'
if Ndocs > 0.:
R.append( Ncontains/Ndocs )
else:
R.append( 0. )
else: # Raw count.
ylabel = 'Number of documents containing topic.'
R.append( Ncontains )
elif mode == 'proportions': # Representation of topic k.
for t in keys:
docs = slices[t]
Ndocs = float(len(docs))
if normed: # Normalized by number of docs in each slice.
ylabel = 'Normed representation of topic in documents.'
if Ndocs > 0.:
R.append( sum([ w for i,w in items if i in docs ])
/Ndocs )
else:
R.append( 0. )
else:
ylabel = 'Sum of topic representation in documents.'
R.append( sum([ w for i,w in items if i in docs ]) )
if plot: # Generates a simple lineplot and saves it in the outpath.
import matplotlib.pyplot as plt
fig = plt.figure(**figargs)
plt.plot(np.array(keys), np.array(R))
plt.xlabel('Time Slice')
plt.ylabel(ylabel) # Set based on mode.
plt.title(topic_label)
plt.savefig('{0}/topic_{1}_over_time.png'.format(self.outpath, k))
return np.array(keys), np.array(R) | true |
bbdd960fb520ac7077bde4ffe4e2f636240a2b9c | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_155/3290.py | UTF-8 | 315 | 3.140625 | 3 | [] | no_license | # Google Code Jam 2015
# Qualification Round
# Problem A
t = int(raw_input())
for case in xrange(1,t+1):
s,a = raw_input().split()
c = 0
x = 0
for i in xrange(len(a)):
if x < i:
c += i-x
x = i
x += ord(a[i]) - ord('0')
print 'Case #%d: %d' % (case, c)
| true |
c35b123dd932a28a4e2ea13a0b73db6ac7b11dde | Python | Ntims/Nt_Python | /실습20190510-20194082-김민규/P0718.py | UTF-8 | 225 | 3.796875 | 4 | [] | no_license | from GCDFunction import gcd
n1 = int(input("첫 번째 정수를 입력하세요:"))
n2 = int(input("두 번째 정수를 입력하세요:"))
print(n1, "과", n2, "의 최대 공약수는", gcd(n1, n2), "입니다.")
| true |
0e391ac78b3914a18f147319843a66eb4694a1c0 | Python | budaLi/leetcode-python- | /微信加好友脚本/demo.py | UTF-8 | 2,378 | 2.515625 | 3 | [] | no_license | # conding:utf-8
from appium import webdriver
from time import sleep
'''
deviceName:通过adb device来获取,84325b42; 127.0.0.1:62001
platformName:操作系统的名字,Android
platformVersion:操作系统的版本,6.0.1; 5.1.1
appPackage:被测试app的包,apk,com.tencent.mobileqq
Activity:被测APP的启动项,activity.LoginActivity
'''
# 通过appium连接手机app,配置caps
caps = {}
# caps['deviceName'] = '127.0.0.1:62001'
caps['deviceName'] = 'HBSBB18830527105'
caps['platformName'] = 'Android'
caps['platformVersion'] = '5.1.1'
caps['appPackage'] = 'com.tencent.mobileqq'
caps['appActivity'] = '.activity.SplashActivity'
caps['unicodeKeyboard'] = True # 自动化操作中是否需要输入中文,默认false
# 连接appium,访问到app
driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', caps)
driver.implicitly_wait(20)
# 定位弹出框同意的按钮,并点击
driver.find_element_by_xpath('//android.widget.TextView[@content-desc="同意"]').click()
driver.implicitly_wait(10)
# 定位登录按钮并点击
driver.find_element_by_xpath('/hierarchy/android.widget.FrameLayout/android.widget'
'.LinearLayout/android.widget.FrameLayout/android.widget'
'.RelativeLayout/android.widget.FrameLayout/android.widget'
'.FrameLayout/android.widget.RelativeLayout/android.widget'
'.LinearLayout/android.widget.Button[2]').click()
driver.implicitly_wait(10)
# 定位输入账号的输入框,点击一次
driver.find_element_by_xpath('//android.widget.EditText[@content-desc="请输入QQ号码或手机或邮箱"]').click()
driver.implicitly_wait(10)
# 定位输入账号的输入框,输入账号
driver.find_element_by_xpath('//android.widget.EditText[@content-desc="请输入QQ号码或手机或邮箱"]').send_keys('317708313')
# 定位输入密码的输入框,点击一次
driver.find_element_by_xpath('//android.widget.EditText[@content-desc="密码 安全"]').click()
driver.implicitly_wait(10)
# 定位输入账号的输入框,输入密码
driver.find_element_by_xpath('//android.widget.EditText[@content-desc="密码 安全"]').send_keys('cz2017tw')
driver.implicitly_wait(10)
# 定位登录按钮,点击一次
driver.find_element_by_xpath('//android.widget.ImageView[@content-desc="登 录"]').click()
| true |
b10e6124e84d7a00766dcb87d82ca5a074a33e4f | Python | xiangqianzsh/btcrobot | /robot/Util.py | UTF-8 | 2,002 | 3.171875 | 3 | [] | no_license | __author__ = 'yunling'
import numpy as np
def nozeros(s):
for i in range(0, len(s)):
if s[i] == 0 and i > 0:
s[i] = s[i-1]
for i in range(len(s)-1, -1,-1):
if s[i] == 0 and i < (len(s) - 1):
s[i] = s[i+1]
return s
def moving_average(x, n, type='exp'):
"""
compute an n period moving average.
type is 'simple' | 'exponential'
"""
x = np.asarray(x)
if type=='simple':
weights = np.ones(n)
else:
weights = np.exp(np.linspace(-1., 0., n))
weights /= weights.sum()
a = np.convolve(x, weights, mode='full')[:len(x)]
a[:n] = a[n]
return a
def relative_strength(prices, n=14):
"""
compute the n period relative strength indicator
http://stockcharts.com/school/doku.php?id=chart_school:glossary_r#relativestrengthindex
http://www.investopedia.com/terms/r/rsi.asp
"""
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def moving_average_convergence(x, nslow=26, nfast=12):
"""
compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg'
return value is emaslow, emafast, macd which are len(x) arrays
"""
emaslow = moving_average(x, nslow, type='exponential')
emafast = moving_average(x, nfast, type='exponential')
return emaslow, emafast, emafast - emaslow
def marketTrendIndex(p, m):
x = np.asarray(p)
y = np.asarray(m)
return sum(x - y) / len(p)
| true |
5eeee98f2c98234a54f69ebec540b1e2d920b284 | Python | boyko11/LogisticRegression | /logistic_regression_learner.py | UTF-8 | 2,371 | 3.109375 | 3 | [] | no_license | import numpy as np
from base.base_learner import BaseLearner
class LogisticRegressionLearner(BaseLearner):
def __init__(self, theta_vector_size, learning_rate=0.001):
self.theta = np.random.rand(1, theta_vector_size)
self.learning_rate = learning_rate
self.cost_history = []
self.theta_history = []
def predict(self, feature_data):
return self.predict_for_theta(feature_data, self.theta)
@staticmethod
def predict_for_theta(feature_data, theta):
z = np.dot(np.insert(feature_data, 0, 1, axis=1), np.transpose(theta)).flatten()
return 1/(1 + np.exp(-z))
def calculate_cost(self, predictions, labels):
predictions[predictions == 1] -= 0.00001
predictions[predictions == 0] += 0.00001
predictions_logs = np.log(predictions)
one_minus_prediction_logs = np.log(1 - predictions)
one_errors = labels * predictions_logs
one_error = np.sum(one_errors)
zero_errors = (1 - labels) * one_minus_prediction_logs
zero_error = np.sum(zero_errors)
all_errors_sum = one_error + zero_error
return -all_errors_sum/predictions.shape[0]
def train(self, feature_data, labels):
for i in range(4000):
predictions = self.predict(feature_data)
current_cost = self.calculate_cost(predictions, labels)
# print('current cost: ', current_cost)
self.cost_history.append(current_cost)
self.theta_history.append(self.theta)
self.update_theta_gradient_descent(predictions, feature_data, labels)
min_cost_index = np.argmin(self.cost_history)
self.theta = self.theta_history[min_cost_index]
print('min_cost_index: ', min_cost_index)
# print('min_cost_theta: ', self.theta)
self.cost_history = self.cost_history[:min_cost_index + 1]
def update_theta_gradient_descent(self, predictions, feature_data, labels):
predictions_minus_labels = np.transpose(predictions - labels)
predictions_minus_labels = predictions_minus_labels.reshape(predictions_minus_labels.shape[0], 1)
gradient = np.mean(predictions_minus_labels * feature_data, axis=0)
#add 1 for the bias
gradient = np.concatenate(([1], gradient))
self.theta = self.theta - self.learning_rate * gradient
| true |
5d72efe7e6e6faf2501e03756c60049ce3a783ee | Python | Arsher123/Wizualizacja-danych-BartoszN | /Zadania cw3/zadanie 9.py | UTF-8 | 252 | 3.234375 | 3 | [] | no_license | import math
def ciag_iloczyn(* liczby):
if len(liczby) == 0:
return 0.0
else:
iloczyn = 1.0
for i in liczby:
iloczyn = iloczyn*i
return iloczyn
print(ciag_iloczyn())
print(ciag_iloczyn(1,2,3,4,5,6,7,8)) | true |
a4dd9e32d5f5f34696733a0dfacd2e1f2092a200 | Python | tsuki1646/Python | /MachineLearning/src/ch5/07_20170501/keras-bmi.py | UTF-8 | 1,433 | 2.8125 | 3 | [] | no_license | from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.callbacks import EarlyStopping
import pandas as pd, numpy as np
# BMIのデータを読み込んで正規化する
csv = pd.read_csv("../../ch4/05_20170418/bmi.csv")
# 体重と身長のデータ
csv["weight"] /= 100
csv["height"] /= 200
X = csv[["weight", "height"]].as_matrix()
# ラベル
bclass = {"thin":[1,0,0], "normal":[0,1,0], "fat":[0,0,1]}
y = np.empty((20000,3))
for i, v in enumerate(csv["label"]):
y[i] = bclass[v]
# 訓練データとテストデータを分ける
X_train, y_train = X[1:15001], y[1:15001]
X_test, y_test = X[15001:20000], y[15001:20000]
# モデルの構造を定義
model = Sequential()
model.add(Dense(512, input_shape=(2,)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(3))
model.add(Activation('softmax'))
# モデルを構築
model.compile(
loss = 'categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy']
)
# データで訓練
hist = model.fit(
X_train, y_train,
batch_size = 100,
nb_epoch = 20,
validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss', patience=2)],
verbose = 1
)
# テストデータを用いて評価する
score = model.evaluate(X_test, y_test)
print('loss=', score[0])
print('accuracy', score[1])
| true |
74d528b6f771ede3c1b34da1ef2bf356dfd10882 | Python | DaHuO/Supergraph | /codes/CodeJamCrawler/16_0_4_neat/16_0_4_n3tr_app.py | UTF-8 | 763 | 3.3125 | 3 | [] | no_license |
def generate_pattern(p,c=3):
max_str = '1' * p
max_val = int(max_str, 2)
for i in xrange(0, max_val+1):
n = "{0:b}".format(i)
if len(n) < len(max_str):
n = ('0' * (len(max_str) - len(n))) + n
n = n.replace('1','L').replace('0','G')
p = n
for i in range(1, c):
p = p.replace('G','G' * len(n)).replace('L',n)
print p
def read_file():
f = open('4-small.in', 'r')
return f.read().splitlines()
lines = read_file()
t = int(lines[0]) # read a line with a single integer
for i in xrange(1, t + 1):
line = lines[i]
comp = line.split(' ')
K, C, S = int(comp[0]), comp[1], comp[2]
print "Case #{}: {}".format(i, ' '.join([str(n) for n in range(1, K+1)]))
| true |