blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8a8396457d58e52337d6ab7d416069d0f10d7ca1 | Python | michaelrgalloway/Reinforcement-Q-Learning-Smartcab | /agent.py | UTF-8 | 6,002 | 3.203125 | 3 | [] | no_license | import random
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
class LearningAgent(Agent):
"""An agent that learns to drive in the smartcab world."""
def __init__(self, env):
super(LearningAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
self.Q = {}
self.alpha = 0.1
self.epsilon = 0.1
self.gamma = 0.8
self.action = None
self.reward = 0
self.reachedCount = 0
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
# this function will get the action in the Q table that has a highest score. This function is called when
# the action is needed to update the car. Since the learner is trying to get to a waypoint, that waypoint will
# be attemped first for any given state if no entries are yet in the Q table. Otherwise, the value with the highest
# score is used. If there does yet exist a value for an action for a state, then 0 is used. These values will
# eventually get updated by the Q learner at the end of the update function
def getActionToAttempt(self,actions, preferredAction):
R = {}
R['left'] = 0
R['right'] = 0
R['forward'] = 0
R['none'] = 0
preferred= 0
if 'left' in actions:
R['left'] = actions['left']
if 'right' in actions:
R['right'] = actions['right']
if 'forward' in actions:
R['forward'] = actions['forward']
if preferredAction in actions:
preferred = actions[preferredAction]
if None in actions:
R['none'] = actions[None]
if preferred >= 0:
return preferredAction
else:
m = max(R, key=R.get)
if m == 'none':
return None
else:
return m
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
#print(self.next_waypoint)
# TODO: Update state
#save the value of the last state so we can update the Qtable with the current values at the end of the function
prevState = self.state
self.state = (inputs['light'],inputs['oncoming'],inputs['right'],inputs['left'],self.next_waypoint)
#save the value of last action so we can update Qtable with current values at end of function
prevAction = self.action
# TODO: Select action according to your policy
# Here we are either assigning an action from our Q table or assigning a random action via our epsilon
# value. .
if random.uniform(0, 1) < self.epsilon:
print 'Going Random'
self.action = random.choice([None,'left','right','forward'])
else:
if not self.state in self.Q:
self.Q[self.state] = {}
self.action = self.getActionToAttempt(self.Q[self.state],self.next_waypoint)
# Execute action and get reward
reward = self.reward
self.reward = self.env.act(self, self.action)
#to evaluate performance, we are going to keep a count of how many trials reach the destination and
#compare to how many trials were held
if self.reward == 12.0:
self.reachedCount = self.reachedCount + 1
# TODO: Learn policy based on state, action, reward
# Here we are update the Q table. If the state does not yet exist in the Q table then we create it.
# Note Here: We are updating the values for last iterations state/action pair during this iteration. We
# are doing this because we needed access to the 'next' states max reward in order to update a given
# state
if not prevState in self.Q.keys():
self.Q[prevState] = {}
if not prevAction in self.Q[prevState].keys():
self.Q[prevState][prevAction] = reward
else:
r = self.Q[prevState][prevAction]
maxr = 0.0
if self.state in self.Q:
x = self.Q[self.state]
if len(x) > 0:
maxr = max(x.values())
if maxr == None:
maxr = 0.0
self.Q[prevState][prevAction] = ((1 - self.alpha) * r) + (self.alpha * (self.gamma * maxr))
print "LearningAgent.update():next_waypoint={}, deadline = {}, inputs = {}, action = {}, reward = {}".format(self.next_waypoint,deadline, inputs, self.action, self.reward) # [debug]
print self.reachedCount
def run():
"""Run the agent for a finite number of trials."""
# Set up environment and agent
e = Environment() # create environment (also adds some dummy traffic)
a = e.create_agent(LearningAgent) # create agent
e.set_primary_agent(a, enforce_deadline=True) # specify agent to track
# NOTE: You can set enforce_deadline=False while debugging to allow longer trials
# Now simulate it
sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)
# NOTE: To speed up simulation, reduce update_delay and/or set display=False
sim.run(n_trials=100) # run for a specified number of trials
# NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line
if __name__ == '__main__':
run()
| true |
68c1f9a9b82e73b32f60e803528158bf1168dac8 | Python | huchengbei/handwriting | /printImg.py | UTF-8 | 4,237 | 2.90625 | 3 | [] | no_license | from PIL import Image, ImageDraw, ImageFont
import codecs as cs
# margin: the space between cell and the border of picture,
# padding: the space between cell and the word
# font: the font of word
# lines: the words
# redLine: 1 means draw picture with red cell, 0 means without, default is 0
def gen_font_image(margin, padding, font, lines, redLine = 0):
col = raw = 0
size = font.size
width = 0
for line in lines:
line = line.strip()
width = max(width,len(line))
height = len(lines)
image = Image.new('RGB', (margin * 2 + (size + padding * 2) * width, margin * 2 + (size + padding * 2) * height), (255, 255, 255))
draw = ImageDraw.Draw(image)
if redLine == 1 :
for raw in range(0,height ):
for col in range(0,width):
# draw the overbar
start = (margin + col * (size + padding * 2), margin + raw * (size + padding * 2))
end = (margin + col * (size + padding * 2) + (size + padding * 2),margin + raw * (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the middle horizontal line
start = (margin + col * (size + padding * 2), margin + raw * (size + padding * 2) + (size + padding * 2) / 2)
end = (margin + col * (size + padding * 2) + (size + padding * 2),margin + raw * (size + padding * 2) + (size + padding * 2) / 2)
draw.line([start, end], fill='#ff0000')
# draw the left vertical line
start = (margin + col * (size + padding * 2), margin + raw * (size + padding * 2))
end = (margin + col * (size + padding * 2),margin + raw * (size + padding * 2) + (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the middle vertical line
start = (margin + col * (size + padding * 2) + (size + padding * 2) / 2, margin + raw * (size + padding * 2))
end = (margin + col * (size + padding * 2) + (size + padding * 2) / 2, margin + raw * (size + padding * 2) + (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the right slash
start = (margin + col * (size + padding * 2), margin + raw * (size + padding * 2))
end = (margin + col * (size + padding * 2) + (size + padding * 2), margin + raw * (size + padding * 2) + (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the left slash
start = (margin + col * (size + padding * 2), margin + raw * (size + padding * 2) + (size + padding * 2))
end = (margin + col * (size + padding * 2) + (size + padding * 2),margin + raw * (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the right vertical line
start = (margin + (col + 1) * (size + padding * 2), margin + raw * (size + padding * 2))
end = (margin + (col + 1) * (size + padding * 2), margin + raw * (size + padding * 2) + (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the bottom horizontal line
start = (margin + 0 * (size + padding * 2), margin + (raw+1) * (size + padding * 2))
end = (margin + col * (size + padding * 2) + (size + padding * 2), margin + (raw+1) * (size + padding * 2))
draw.line([start, end], fill='#ff0000')
# draw the words
col = raw = 0
for line in lines:
line = line.strip()
for char in line:
x = margin + col * (size + padding * 2) + padding
y = margin + raw * (size + padding * 2) + padding
draw.text((x, y), char, font=font, fill='#000000')
col = col + 1
raw = raw + 1
col = 0
return image
if __name__ == '__main__':
size = 96
font = ImageFont.truetype('田英章钢笔行书简.ttf', size)
hansfile = cs.open('words.txt', 'r', 'utf-8')
hans = hansfile.readlines()
hansfile.close()
image = gen_font_image(10,6,font,hans,0)
image.save(str('words'+'.png'))
image = gen_font_image(10,6,font,hans,1)
image.save(str('words_with_line'+'.png')) | true |
f173cbfa1551210da6b330280ea788a3ccfc46da | Python | Ani-Gil/Python | /Python 200/145.py | UTF-8 | 297 | 3.53125 | 4 | [] | no_license | # 145.py - 파일의 특정 부분만 복사하기(seek, read, write)
spos = 105 # 파일을 읽는 위치 지정
size = 500 # 읽을 크기를 지정
f = open('stockcode.txt', 'r')
h = open('stockcode_part.txt', 'w')
f.seek(spos)
data = f.read(size)
h.wrtie(data)
h.close()
f.close() | true |
ee764da82a9cb986925a213c4dd7d50f10b02c83 | Python | ngoankeoo/real_estate | /analysis/views.py | UTF-8 | 1,747 | 2.625 | 3 | [] | no_license | from django.shortcuts import render
import random
from listings.models import Listing
def index(request):
"""
index function to draw chart about listing
:param request:
:return:
"""
queryset_list = Listing.objects.order_by('-price')
districts = Listing.objects.order_by('district').distinct().values_list('district', flat=True)
if 'district' in request.GET:
district = request.GET['district']
if district:
queryset_list = queryset_list.filter(district=district)
# print(list(queryset_list))
count = 0
labels = []
datas = []
labels_pie = ['Thấp hơn 5 (triệu/m²)', 'Từ 5 đến 10 (triệu/m²)', 'Từ 10 đến 20 (triệu/m²)',
'Từ 20 đến 30 (triệu/m²)', 'Từ 30 đến 50 (triệu/m²)', 'Trên 50 (triệu/m²)']
datas_pie = [0, 0, 0, 0, 0, 0]
for data in list(queryset_list):
if data.price != 0 and data.price < 300:
labels.append(count)
count = count + 1
datas.append(data.price)
if data.price <= 5:
datas_pie[0] += 1
elif 5 < data.price <= 10:
datas_pie[1] += 1
elif 10 < data.price <= 20:
datas_pie[2] += 1
elif 20 < data.price <= 30:
datas_pie[3] += 1
elif 30 < data.price <= 50:
datas_pie[4] += 1
else:
datas_pie[5] += 1
context = {
'district_choices': list(districts),
'labels': labels,
'data': datas,
'labels_pie': labels_pie,
'datas_pie': datas_pie,
'values': request.GET,
}
return render(request, 'analysis/analysis.html', context)
| true |
cadc5f211723f651b08562b85589e388e39104c6 | Python | cheba4ok/python_for_everybody | /assignments_chapter1-10/ex7_1.py | UTF-8 | 193 | 3.40625 | 3 | [] | no_license | fname = input('Enter a filename : ')
try:
fhand = open(fname)
except:
print('Error, no such file!!!')
quit()
for line in fhand:
line = line.strip().upper()
print(line)
| true |
6301057a6a6c41c5cc0e550131ffc0d93ee6df24 | Python | nkmk/python-snippets | /notebook/sys_recursionlimit.py | UTF-8 | 1,100 | 3.03125 | 3 | [
"MIT"
] | permissive | import sys
import resource
print(sys.getrecursionlimit())
# 1000
def recu_test(n):
if n == 1:
print('Finish')
return
recu_test(n - 1)
recu_test(950)
# Finish
# recu_test(1500)
# RecursionError: maximum recursion depth exceeded in comparison
# recu_test(995)
# RecursionError: maximum recursion depth exceeded while calling a Python object
sys.setrecursionlimit(2000)
print(sys.getrecursionlimit())
# 2000
recu_test(1500)
# Finish
sys.setrecursionlimit(4)
print(sys.getrecursionlimit())
# 4
# sys.setrecursionlimit(3)
# RecursionError: cannot set the recursion limit to 3 at the recursion depth 1: the limit is too low
sys.setrecursionlimit(10 ** 9)
print(sys.getrecursionlimit())
# 1000000000
# sys.setrecursionlimit(10 ** 10)
# OverflowError: signed integer is greater than maximum
recu_test(10 ** 4)
# Finish
# recu_test(10 ** 5)
# Segmentation fault
print(resource.getrlimit(resource.RLIMIT_STACK))
# (8388608, -1)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
print(resource.getrlimit(resource.RLIMIT_STACK))
# (-1, -1)
recu_test(10 ** 5)
# Finish
| true |
19d8efc1b69852e9a5fcffcce14c85e6f528e296 | Python | yamaton/codeforces | /problemSet/553B-Kyoya_and_Permutation.py | UTF-8 | 519 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Codeforces Round #309 (Div. 1/2)
Problem 553B / 554D. Kyoya and Colored Balls
@author yamaton
@date 2015-08-17
"""
import itertools as it
import functools
import operator
import collections
import math
import sys
def solve(n, k):
pass
def print_stderr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
[n, k] = [int(input()) for _ in range(n)]
result = solve(n, k)
print(' '.join(str(n) for n in result))
if __name__ == '__main__':
main() | true |
756753b9ce642edc6f4fcfdc03eb929839b446bc | Python | HugoDelval/anonymisation_docx | /docxImgAnonymisateur/forms.py | UTF-8 | 3,865 | 2.59375 | 3 | [] | no_license | # coding: utf-8
import os
from django import forms
class DocxUploadFileForm(forms.Form):
error_messages = {
'empty_file': "Le type de fichier est vide. Veuillez uploader un fichier valide svp",
'wrong_file': "Le type de fichier est invalide. Veuillez uploader un fichier de type Docx (Word)",
'txt_original_vide': "Le texte final ne peut être rempli qu'à condition que le texte original soit rempli !"
}
fichier = forms.FileField()
niveau_de_flou = forms.IntegerField(max_value=50, min_value=5)
texte_original = forms.CharField(required=False)
texte_final = forms.CharField(required=False)
extra_field_count = forms.IntegerField(widget=forms.HiddenInput())
extra_fields = 0
def __init__(self, *args, **kwargs):
self.extra_fields = int(kwargs.pop('extra', '0'))
super(DocxUploadFileForm, self).__init__(*args, **kwargs)
self.fields['extra_field_count'].initial = self.extra_fields
self.fields['fichier'].widget.attrs\
.update({
'placeholder': "Fichier Docx à Anonymiser",
})
self.fields['niveau_de_flou'].widget.attrs\
.update({
'placeholder': "Rayon du flou",
'class': 'form-control',
'value': 17
})
self.fields['texte_original'].widget.attrs\
.update({
'placeholder': "d'amossys",
'class': 'form-control',
})
self.fields['texte_final'].widget.attrs\
.update({
'placeholder': "[du client]",
'class': 'form-control',
})
for index in range(0, self.extra_fields):
self.fields['extra_texte_original_{index}'.format(index=index)] = \
forms.CharField(required=False)
self.fields['extra_texte_original_{index}'.format(index=index)].widget.attrs\
.update({
'placeholder': "amossys",
'class': 'form-control',
})
self.fields['extra_texte_final_{index}'.format(index=index)] = \
forms.CharField(required=False)
self.fields['extra_texte_final_{index}'.format(index=index)].widget.attrs\
.update({
'placeholder': "[client]",
'class': 'form-control',
})
def clean_fichier(self):
fichier = self.cleaned_data.get('fichier')
if not fichier:
raise forms.ValidationError(
self.error_messages['empty_file'],
code='empty_file',
)
s = ''
for b in fichier.chunks(10):
s += b
if not s.startswith("\x50\x4b\x03\x04"):
raise forms.ValidationError(
self.error_messages['wrong_file'],
code='wrong_file',
)
return fichier
def clean(self):
erreur_txt_or = forms.ValidationError(
self.error_messages['txt_original_vide'],
code='txt_original_vide',
)
cleaned_data = super(DocxUploadFileForm, self).clean()
texte_final = cleaned_data.get('texte_final')
texte_original = cleaned_data.get('texte_original')
if texte_final and not texte_original:
self.add_error('texte_final', erreur_txt_or)
raise erreur_txt_or
for i in range(0, self.extra_fields):
texte_final = cleaned_data.get('extra_texte_final_{index}'.format(index=i))
texte_original = cleaned_data.get('extra_texte_original_{index}'.format(index=i))
if texte_final and not texte_original:
self.add_error('extra_texte_final_{index}'.format(index=i), erreur_txt_or)
raise erreur_txt_or
return cleaned_data
| true |
082dce0cedb5b08162a1683af637e7cf48607cd9 | Python | jjkke88/RL_toolbox | /build/lib.linux-x86_64-2.7/RLToolbox/toolbox/baseline/baseline_tensorflow.py | UTF-8 | 1,706 | 2.609375 | 3 | [
"MIT"
] | permissive | import tensorflow as tf
import numpy as np
import prettytensor as pt
class Baseline(object):
coeffs = None
def __init__(self , session=None):
self.net = None
self.session = session
def create_net(self , shape):
print(shape)
self.x = tf.placeholder(tf.float32 , shape=[None , shape] , name="x")
self.y = tf.placeholder(tf.float32 , shape=[None] , name="y")
self.net = (pt.wrap(self.x).
fully_connected(64 , activation_fn=tf.nn.tanh).
fully_connected(1))
self.net = tf.reshape(self.net , (-1 ,))
self.l2 = (self.net - self.y) * (self.net - self.y)
self.train = tf.train.AdamOptimizer().minimize(self.l2)
self.session.run(tf.initialize_all_variables())
def _features(self, path):
o = path["observations"].astype('float32')
o = o.reshape(o.shape[0] , -1)
l = len(path["rewards"])
al = np.arange(l).reshape(-1 , 1) / 100.0
return np.concatenate([o , o ** 2 , al , al ** 2 , np.ones((l , 1))] , axis=1)
def fit(self, paths):
featmat = np.concatenate([self._features(path) for path in paths])
if self.net is None:
self.create_net(featmat.shape[1])
returns = np.concatenate([path["returns"] for path in paths])
for _ in range(10):
loss, _ = self.session.run([self.l2, self.train], {self.x: featmat , self.y: returns})
def predict(self, path):
if self.net is None:
return np.zeros(len(path["rewards"]))
else:
ret = self.session.run(self.net , {self.x: self._features(path)})
return np.reshape(ret , (ret.shape[0] ,))
| true |
0e3a4005f473c93f54a782185a4290ec10486ca5 | Python | Fomal-haut/LeetCode | /python/Contains_Duplicate2/Solution.py | UTF-8 | 646 | 3 | 3 | [] | no_license | class Solution:
# @param {integer[]} nums
# @param {nteger} k
# @param {boolean}
def containsNearbyDuplicate(self, nums, k):
container = {}
index = 0
if nums is None or k == 0:
return False
for i in nums:
if i in container:
if index - container[i] <= k:
return True
else:
container[i] = index
else:
container[i] = index
index += 1
return False
numbers = [1]
solution = Solution()
result = solution.containsNearbyDuplicate(numbers,2)
print result
| true |
8bee26a354dd0ad11e2a59e9737ab74645d85c49 | Python | smtamh/oop_python_ex | /study_ex/18_SOLID/1802_OCP_3.py | UTF-8 | 708 | 4.3125 | 4 | [] | no_license | # 좋은 예
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
class Circle:
def __init__(self, radius):
self.radius = radius
def area(self):
return 3.14 * self.radius ** 2
class AreaCalculator(object):
def __init__(self, shapes):
self.shapes = shapes
def total_area(self):
total = 0
for shape in self.shapes:
total += shape.area()
return total
shapes = [Rectangle(1, 6), Rectangle(2, 3), Circle(5), Circle(7)]
calculator = AreaCalculator(shapes)
print("The total area is: ", calculator.total_area()) | true |
2e35586fc19f243f897d19cd0344f03d8d8d7298 | Python | Estebangbr/Netway-Webapp | /scanthread.py | UTF-8 | 1,321 | 3.421875 | 3 | [] | no_license | import socket
import threading
from queue import Queue
def scan():
print_lock = threading.Lock()
target ='localhost' # Ciblage de l'analyse des ports ouverts
def scan_range(port): # Création de la fonction scan
list = []
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Création d'un socket pour la connexion avec le serveur en local
try:
result = sock.connect_ex((target, port)) # Définir sur laquelle les ports vont écouter
with print_lock:
list.append(port)
print("Le port", port,"est ouvert !") # Indication des ports qui sont ouverts
file = open ("portscanaffiche.txt", "w+") # Mise en place des resultats dans le fichier
file.write ("le port", port, "est ouvert")
file.close
result.close()
except:
pass
def threader (): # Définir le threading
while True:
worker = q.get()
scan_range(worker)
q.task_done()
q = Queue ()
for x in range (60):
t = threading.Thread(target=threader)
t.daemon = True
t.start()
for worker in range (1,101): # Effectuer une recherche des ports ouverts
q.put(worker)
q.join()
scan()
| true |
f2fac8b52835d3aa3dac0604726b6eab7c623e0b | Python | Panda-Lewandowski/Computational-algorithms | /lab6/jacobi.py | UTF-8 | 1,460 | 3.046875 | 3 | [
"MIT"
] | permissive | from math import log, e
import numpy as np
import matrix
def jacobi(p, k, T, Z, K):
# начальные значения
Xe = log(0.1)
X = [0, 0, -5, -15]
dXe = log(0.1)
dX = [0, 0, -5, -15]
A = np.zeros((5, 5))
B = np.zeros((5, 1))
while dXe / Xe > 0.01:
Xe = dXe
dX = dX[:]
# заполняем матрицы
B[0] = -e ** Xe + e ** X[0] + e ** X[1] + e ** X[2] + e ** X[3] - (p / k / T)
B[1] = -Z[1] * e ** X[1] - Z[2] * e ** X[2] - Z[3] * e ** X[3] + e ** Xe
B[2] = - Xe - X[1] + X[0] + log(K[0])
B[3] = - Xe - X[2] + X[1] + log(K[1])
B[4] = - Xe - X[3] + X[2] + log(K[2])
A[0][0] = e ** Xe
for i in range(1, 5):
A[0][i] = e ** X[i - 1]
A[1][0] = - e ** Xe
A[1][2] = Z[1] * e ** X[1]
A[1][3] = Z[2] * e ** X[2]
A[1][4] = Z[3] * e ** X[3]
A[2][0] = 1
A[2][1] = -1
A[2][2] = 1
A[3][0] = 1
A[3][2] = -1
A[3][3] = 1
A[4][0] = 1
A[4][3] = -1
A[4][4] = 1
A = nparray_to_list(A)
B = B.ravel()
D = matrix.inv(A)
R = matrix.multi(D, B)
dXe = R[0]
dX = R[1:]
return dXe, dX
def nparray_to_list(a):
a = list(a)
for i in range(len(a)):
a[i] = list(a[i])
return a
if __name__ == "__main__":
print(jacobi(1000, 8, 2000, [0, 1,2,3, 4], [2,4,6,4,5]))
| true |
509d7205b8780fcf66f8763d991b846c61d8998c | Python | Alan0170/JornadaByLearn | /CalcularMedia.py | UTF-8 | 387 | 3.65625 | 4 | [] | no_license | def calcular_media(notas):
quantidade = len(notas)
soma = sum(notas)
media = soma / quantidade
return media
def verificar_aprovacao(media):
if media >= 6:
print('Aluno Aprovado!')
else:
print('Aluno reprovado!')
notas = [6.2,5.5,4.5,7.5]
media = calcular_media(notas)
print(f'A média do aluno é: {media:.1f}')
verificar_aprovacao(media) | true |
10d878fc1b0de364601567d6f6c315cebddcc911 | Python | has-c/roobet-crash-analysis | /config.py | UTF-8 | 839 | 2.546875 | 3 | [] | no_license | import pandas as pd
### Hashing Parameters and Functions
e = 2**52
salt = "0000000000000000000fa3b65e43e4240d71762a5bf397d5304b2596d116859c"
first_game = "77b271fe12fca03c618f63dfb79d4105726ba9d4a25bb3f1964e435ccf9cb209"
current_game = "edb044b4a7ca73f75ab4da39a8e3984b2fa5f7071ad390d9008739fc650765bc"
### Train Test Split Parameters
train_pct = 0.8
### Experiment Parameters
multiplier_checkout_level = 2
bet_size = 0.1 #USD
batch_starting_capital = 50 #USD
batch_size = 100
def get_experiment_parameters():
config_parameters = pd.Series({'multiplier_checkout_level': multiplier_checkout_level,
'bet_size': bet_size,
'batch_starting_capital': batch_starting_capital,
'batch_size': batch_size})
return config_parameters | true |
17882943fa4c371b8a23fc7daa4abba906cbfb34 | Python | wdhorton/pong-tensorflow | /pong/training/load_data.py | UTF-8 | 4,374 | 2.625 | 3 | [] | no_license | import collections
import numpy as np
from pymongo import MongoClient
from math import sqrt
from itertools import chain
from random import random
PONG_DB_NAME = 'pong'
COLLECTION_NAME = 'game_data'
BINARY_COLLECTION_NAME = 'game_data_binary'
def min_max_scale(x, x_min, x_max):
if x_max - x_min:
return (x - x_min) / (x_max - x_min)
FEATURES = [
"ball_x_velocity",
"ball_y_velocity",
"ball_x_position",
"ball_y_position",
"paddle_position",
]
MAXES = {
"ball_x_velocity": 5,
"ball_y_velocity": sqrt(50 - 9),
"ball_x_position": 600,
"ball_y_position": 400,
"paddle_position": 400,
}
MINS = {
"ball_x_velocity": -5,
"ball_y_velocity": -1 * sqrt(50 - 9),
"ball_x_position": 0,
"ball_y_position": 0,
"paddle_position": 0,
}
def scale_features(row):
linear_terms = [min_max_scale(row[feature], MINS[feature], MAXES[feature]) for feature in FEATURES]
quadratic_terms = [min_max_scale(row[feature1] * row[feature2], MINS[feature1] * MINS[feature2], MAXES[feature1] * MAXES[feature2]) for feature1 in FEATURES for feature2 in FEATURES]
cubic_terms = [min_max_scale(row[feature1] * row[feature2] * row[feature3], MINS[feature1] * MINS[feature2] * MINS[feature3], MAXES[feature1] * MAXES[feature2] * MAXES[feature3]) for feature1 in FEATURES for feature2 in FEATURES for feature3 in FEATURES]
return filter(lambda x: x is not None, linear_terms + quadratic_terms + cubic_terms)
# Dataset class adapted from https://github.com/tensorflow/tensorflow/blob/r0.11/tensorflow/contrib/learn/python/learn/datasets/mnist.py
class DataSet(object):
def __init__(self, data, target):
self._num_examples = data.shape[0]
self._data = data
self._target = target
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def data(self):
return self._data
@property
def target(self):
return self._target
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._data = self._data[perm]
self._target = self._target[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._data[start:end], self._target[start:end]
def make_training_and_test_sets(one_hot=False, balanced=False, binary=False):
game_data = MongoClient()[PONG_DB_NAME][COLLECTION_NAME if not binary else BINARY_COLLECTION_NAME]
if balanced:
up_rows = game_data.find({ 'paddle_velocity': { '$lt': 0 } })
down_rows = game_data.find({ 'paddle_velocity': { '$gt': 0 } })
stationary_rows = game_data.find({ 'paddle_velocity': 0 }).limit(max(up_rows.count(), down_rows.count()))
rows = chain(up_rows, down_rows, stationary_rows)
num_rows = up_rows.count() + down_rows.count() + stationary_rows.count()
else:
rows = game_data.find()
num_rows = rows.count()
training_data, training_target = [], []
test_data, test_target = [], []
for i, row in enumerate(rows):
if random() < 0.8:
target = training_target
data = training_data
else:
target = test_target
data = test_data
# Classes: UP -- 0, STATIONARY -- 1, DOWN -- 2
if row['paddle_velocity'] < 0:
target.append(0 if not one_hot else (np.array([1, 0, 0]) if not binary else np.array([1, 0])))
elif row['paddle_velocity'] == 0:
if binary:
raise
target.append(1 if not one_hot else np.array([0, 1, 0]))
else:
target.append(2 if not one_hot else (np.array([0, 0, 1]) if not binary else np.array([0, 1])))
row_data = scale_features(row)
data.append(np.asarray(row_data, dtype=np.float32))
training_target = np.array(training_target, dtype=np.int)
training_data = np.array(training_data)
test_target = np.array(test_target, dtype=np.int)
test_data = np.array(test_data)
return DataSet(training_data, training_target), DataSet(test_data, test_target)
| true |
abd433c6b2a9641530f2021958cc4bb2ca748d77 | Python | jakobes/xalbrain | /xalbrain/cellsolver.py | UTF-8 | 22,362 | 2.703125 | 3 | [] | no_license | """This module contains solvers for (subclasses of) CellModel."""
__author__ = "Marie E. Rognes (meg@simula.no), 2012--2013"
import ufl
from xalbrain.markerwisefield import rhs_with_markerwise_field
import dolfin as df
import numpy as np
from xalbrain.cellmodels import (
CellModel,
MultiCellModel,
)
from xalbrain.utils import (
state_space,
time_stepper,
split_function,
)
from abc import (
ABC,
abstractmethod
)
from operator import or_
from functools import reduce
import typing as tp
import os
import logging
from operator import or_
from functools import reduce
from xalbrain.utils import import_extension_modules
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
logger = logging.getLogger(__name__)
class AbstractCellSolver(ABC):
"""Abstract base class for cell solvers."""
def __init__(
self,
*,
mesh: df.mesh,
time: df.Constant,
cell_model: CellModel,
periodic_domain: df.SubDomain = None,
parameters: df.Parameters = None
):
"""Store common parameters for all cell solvers.
NB! The periodic domain has to be set in the pde solver too.
"""
# Initialize and update parameters if given
self._parameters = self.default_parameters()
if parameters is not None:
self._parameters.update(parameters)
# Store input
self._mesh = mesh
self._cell_model = cell_model
# Create time if not given, otherwise use given time
if time is None:
self._time = df.Constant(0.0)
else:
self._time = time
# Extract some information from cell model
self._F = self._cell_model.F
self._I_ion = self._cell_model.I
self._num_states = self._cell_model.num_states()
# Create (vector) function space for potential + states
self.VS = df.VectorFunctionSpace(
self._mesh,
"CG",
1,
dim=self._num_states + 1,
constrained_domain=periodic_domain
)
# Initialize solution fields
self.vs_ = df.Function(self.VS, name="vs_")
self.vs = df.Function(self.VS, name="vs")
@staticmethod
@abstractmethod
def default_parameters():
"""Default parameters."""
pass
@property
def time(self) -> df.Constant:
"""The internal time of the solver."""
return self._time
def solution_fields(self) -> tp.Tuple[df.Function, df.Function]:
"""Return current solution object.
Modifying this will modify the solution object of the solver
and thus provides a way for setting initial conditions for
instance.
*Returns*
(previous vs_, current vs) (:py:class:`dolfin.Function`)
"""
return self.vs_, self.vs
@abstractmethod
def step(t0: float, t1: float) -> None:
pass
def solve(
self,
t0: float,
t1: float,
dt: float
) -> tp.Iterable[tp.Tuple[tp.Tuple[float, float], df.Function]]:
"""Solve the problem in the interval (`t0`, `t1`) with timestep `dt`.
Arguments:
t0: Start time.
t1: End time.
dt: Time step.
Returns current solution interval and solution.
*Example of usage*::
# Create generator
solutions = solver.solve((0.0, 1.0), 0.1)
# Iterate over generator (computes solutions as you go)
for (interval, vs) in solutions:
# do something with the solutions
"""
# Solve on entire interval if no interval is given.
if dt is None:
dt = t1 - t0
# Create timestepper
for _t0, _t1 in time_stepper(t0, t1, dt):
self.step(_t0, _t1)
# Yield solutions
yield (_t0, _t1), self.vs
self.vs_.assign(self.vs)
class BasicCardiacODESolver(AbstractCellSolver):
"""A basic, non-optimised solver for systems of ODEs typically
encountered in cardiac applications of the form: find a scalar
field :math:`v = v(x, t)` and a vector field :math:`s = s(x, t)`
.. math::
v_t = - I_{ion}(v, s) + I_s
s_t = F(v, s)
where :math:`I_{ion}` and :math:`F` are given non-linear
functions, and :math:`I_s` is some prescribed stimulus.
Here, this nonlinear ODE system is solved via a theta-scheme. By
default theta=0.5, which corresponds to a Crank-Nicolson
scheme. This can be changed by modifying the solver parameters.
.. note::
For the sake of simplicity and consistency with other solver
objects, this solver operates on its solution fields (as state
variables) directly internally. More precisely, solve (and
step) calls will act by updating the internal solution
fields. It implies that initial conditions can be set (and are
intended to be set) by modifying the solution fields prior to
simulation.
*Arguments*
mesh (:py:class:`dolfin.Mesh`)
The spatial domain (mesh)
time (:py:class:`dolfin.Constant` or None)
A constant holding the current time. If None is given, time is
created for you, initialized to zero.
model (:py:class:`xalbrain.CellModel`)
A representation of the cardiac cell model(s)
I_s (optional) A typically time-dependent external stimulus
given as a :py:class:`dolfin.GenericFunction` or a
Markerwise. NB: it is assumed that the time dependence of I_s
is encoded via the 'time' Constant.
parameters (:py:class:`dolfin.Parameters`, optional)
Solver parameters
"""
def __init__(
self,
mesh: df.Mesh,
time: df.Constant,
cell_model: CellModel,
I_s: tp.Union[df.Expression, tp.Dict[int, df.Expression]],
parameters: df.Parameters = None,
) -> None:
"""Create the necessary function spaces """
# Handle stimulus
self._I_s = I_s
super().__init__(mesh=mesh, time=time, cell_model=cell_model, parameters=parameters)
@staticmethod
def default_parameters() -> df.Parameters:
"""Initialize and return a set of default parameters."""
parameters = df.Parameters("BasicCardiacODESolver")
parameters.add("theta", 0.5)
# Use iterative solver as default.
parameters.add(df.NonlinearVariationalSolver.default_parameters())
parameters["nonlinear_variational_solver"]["newton_solver"]["linear_solver"] = "gmres"
parameters["nonlinear_variational_solver"]["newton_solver"]["preconditioner"] = "jacobi"
return parameters
def step(self, t0: float, t1: float) -> None:
"""Solve on the given time step (`t0`, `t1`).
End users are recommended to use solve instead.
Arguments:
t0: Start time.
t1: End time.
"""
timer = df.Timer("ODE step")
# Extract time mesh
k_n = df.Constant(t1 - t0)
# Extract previous solution(s)
v_, s_ = split_function(self.vs_, self._num_states + 1)
# Set-up current variables
self.vs.assign(self.vs_) # Start with good guess
v, s = split_function(self.vs, self._num_states + 1)
w, r = split_function(df.TestFunction(self.VS), self._num_states + 1)
# Define equation based on cell model
Dt_v = (v - v_)/k_n
Dt_s = (s - s_)/k_n
theta = self._parameters["theta"]
# Set time (propagates to time-dependent variables defined via self.time)
t = t0 + theta*(t1 - t0)
self.time.assign(t)
v_mid = theta*v + (1.0 - theta)*v_
s_mid = theta*s + (1.0 - theta)*s_
if isinstance(self._cell_model, MultiCellModel):
model = self._cell_model
mesh = model.mesh()
dy = df.Measure("dx", domain=mesh, subdomain_data=model.markers())
if self._I_s is None:
self._I_s = df.Constant(0)
rhs = self._I_s*w*dy()
n = model.num_states() # Extract number of global states
# Collect contributions to lhs by iterating over the different cell models
domains = self._cell_model.keys()
lhs_list = list()
for k, model_k in enumerate(model.models()):
n_k = model_k.num_states() # Extract number of local (non-trivial) states
# Extract right components of coefficients and test functions () is not the same as (1,)
if n_k == 1:
s_mid_k = s_mid[0]
r_k = r[0]
Dt_s_k = Dt_s[0]
else:
s_mid_k = df.as_vector(tuple(s_mid[j] for j in range(n_k)))
r_k = df.as_vector(tuple(r[j] for j in range(n_k)))
Dt_s_k = df.as_vector(tuple(Dt_s[j] for j in range(n_k)))
i_k = domains[k] # Extract domain index of cell model k
# Extract right currents and ion channel expressions
F_theta_k = self._F(v_mid, s_mid_k, time=self.time, index=i_k)
I_theta_k = -self._I_ion(v_mid, s_mid_k, time=self.time, index=i_k)
# Variational contribution over the relevant domain
a_k = (
(Dt_v - I_theta_k)*w
+ df.inner(Dt_s_k, r_k)
- df.inner(F_theta_k, r_k)
)*dy(i_k)
# Add s_trivial = 0 on Omega_{i_k} in variational form:
a_k += sum(s[j]*r[j] for j in range(n_k, n))*dy(i_k)
lhs_list.append(a_k)
lhs = sum(lhs_list)
else:
dz, rhs = rhs_with_markerwise_field(self._I_s, self._mesh, w)
# Evaluate currents at averaged v and s. Note sign for I_theta
F_theta = self._F(v_mid, s_mid, time=self.time)
I_theta = -self._I_ion(v_mid, s_mid, time=self.time)
lhs = (Dt_v - I_theta)*w*dz + df.inner(Dt_s - F_theta, r)*dz
# Set-up system of equations
G = lhs - rhs
# Solve system
pde = df.NonlinearVariationalProblem(G, self.vs, J=df.derivative(G, self.vs))
solver = df.NonlinearVariationalSolver(pde)
solver_parameters = self._parameters["nonlinear_variational_solver"]
solver_parameters["nonlinear_solver"] = "snes"
solver_parameters["snes_solver"]["absolute_tolerance"] = 1e-13
solver_parameters["snes_solver"]["relative_tolerance"] = 1e-13
# Tested on Cressman
solver_parameters["snes_solver"]["linear_solver"] = "bicgstab"
solver_parameters["snes_solver"]["preconditioner"] = "jacobi"
solver.parameters.update(solver_parameters)
solver.solve()
timer.stop()
class CardiacODESolver(AbstractCellSolver):
"""An optimised solver for systems of ODEs typically
encountered in cardiac applications of the form: find a scalar
field :math:`v = v(x, t)` and a vector field :math:`s = s(x, t)`
.. math::
v_t = - I_{ion}(v, s) + I_s
s_t = F(v, s)
where :math:`I_{ion}` and :math:`F` are given non-linear
functions, and :math:`I_s` is some prescribed stimulus.
.. note::
For the sake of simplicity and consistency with other solver
objects, this solver operates on its solution fields (as state
variables) directly internally. More precisely, solve (and
step) calls will act by updating the internal solution
fields. It implies that initial conditions can be set (and are
intended to be set) by modifying the solution fields prior to
simulation.
*Arguments*
mesh (:py:class:`dolfin.Mesh`)
The spatial mesh (mesh)
time (:py:class:`dolfin.Constant` or None)
A constant holding the current time. If None is given, time is
created for you, initialized to zero.
model (:py:class:`xalbrain.CellModel`)
A representation of the cardiac cell model(s)
I_s (:py:class:`dolfin.Expression`, optional)
A typically time-dependent external stimulus. NB: it is
assumed that the time dependence of I_s is encoded via the
'time' Constant.
parameters (:py:class:`dolfin.Parameters`, optional)
Solver parameters
"""
def __init__(
self,
mesh: df.Mesh,
time: df.Constant,
model: CellModel,
I_s: tp.Union[df.Expression, tp.Dict[int, df.Expression]] = None,
parameters: df.Parameters = None
) -> None:
"""Initialise parameters."""
super().__init__(mesh=mesh, time=time, cell_model=model, parameters=parameters)
import ufl.classes # TODO Why?
self._I_s = I_s
# Initialize scheme
v, s = split_function(self.vs, self._num_states + 1)
w, q = split_function(df.TestFunction(self.VS), self._num_states + 1)
# Workaround to get algorithm in RL schemes working as it only works for scalar expressions
F_exprs = self._F(v, s, self._time)
# MER: This looks much more complicated than it needs to be!
# If we have a as_vector expression
F_exprs_q = ufl.zero()
if isinstance(F_exprs, ufl.classes.ListTensor):
for i, expr_i in enumerate(F_exprs.ufl_operands):
F_exprs_q += expr_i*q[i]
else:
F_exprs_q = F_exprs*q
rhs = F_exprs_q - self._I_ion(v, s, self._time)*w
# Handle stimulus: only handle single function case for now
if self._I_s:
rhs += self._I_s*w
self._rhs = rhs*df.dP()
name = self._parameters["scheme"]
Scheme = self._name_to_scheme(name)
self._scheme = Scheme(self._rhs, self.vs, self._time)
# Initialize solver and update its parameters
self._pi_solver = df.PointIntegralSolver(self._scheme)
def _name_to_scheme(self, name: str) -> "df.MultiStageScheme":
"""Use the magic `eval` function to convert string to multi stage ode scheme."""
return eval("df.multistage.{:s}".format(name))
@staticmethod
def default_parameters() -> df.Parameters:
"""Initialize and return a set of default parameters."""
parameters = df.Parameters("CardiacODESolver")
parameters.add("scheme", "RK4")
return parameters
def step(self, t0: float, t1: float) -> None:
"""Solve on the given time step (t0, t1).
End users are recommended to use solve instead.
Arguments:
t0: Start time.
t1: End time.
"""
# NB: The point integral solver operates on vs directly, map initial condition in vs_ to vs:
timer = df.Timer("ODE step")
self.vs.assign(self.vs_)
dt = t1 - t0
self._pi_solver.step(dt)
timer.stop()
class MultiCellSolver(AbstractCellSolver):
def __init__(
self,
time: df.Constant,
mesh: df.Mesh,
cell_model: CellModel,
parameter_map: "ODEMap",
indicator_function: df.Function,
periodic_domain: df.SubDomain = None,
parameters: df.parameters = None,
) -> None:
"""Initialise parameters. NB! Keep I_s for compatibility."""
super().__init__(
mesh=mesh,
time=time,
cell_model=cell_model,
periodic_domain=periodic_domain,
parameters=parameters
)
comm = df.MPI.comm_world
rank = df.MPI.rank(comm)
indicator_tags = set(np.unique(indicator_function.vector().get_local()))
indicator_tags = comm.gather(indicator_tags, root=0)
if rank == 0:
indicator_tags = reduce(or_, indicator_tags)
else:
assert indicator_tags is None
indicator_tags = df.MPI.comm_world.bcast(indicator_tags, root=0)
ode_tags = set(parameter_map.get_tags())
assert ode_tags <= indicator_tags, f"Parameter map tags does not match indicator_function: {ode_tags - indicator_tags}"
# self._indicator_function = indicator_function
self._indicator_function = indicator_function
self._indicator_function.vector()[:] = np.rint(indicator_function.vector().get_local())
from extension_modules import load_module
self.ode_module = load_module(
"LatticeODESolver",
recompile=self._parameters["reload_extension_modules"],
verbose=self._parameters["reload_extension_modules"]
)
self.ode_solver = self.ode_module.LatticeODESolver(
parameter_map,
self.vs_.function_space().num_sub_spaces()
)
@staticmethod
def default_parameters():
parameters = df.Parameters("MultiCellSolver")
parameters.add("reload_extension_modules", False)
parameters.add("theta", 0.5)
return parameters
def step(self, t0: float, t1: float) -> None:
"""Take a step using my much better ode solver."""
theta = self._parameters["theta"]
dt = t1 - t0 # TODO: Is this risky?
# Set time (propagates to time-dependent variables defined via self.time)
t = t0 + theta*(t1 - t0)
self._time.assign(t)
comm = df.MPI.comm_world
rank = df.MPI.rank(comm)
logger.debug("MultiCell ode solver step")
self.ode_solver.solve(self.vs_.vector(), t0, t1, dt, self._indicator_function.vector())
logger.debug("Copy vector back")
self.vs.vector()[:] = self.vs_.vector()[:] # TODO: get local?
df.MPI.barrier(comm)
# self.vs.assign(self.vs_)
class BasicSingleCellSolver(BasicCardiacODESolver):
"""A basic, non-optimised solver for systems of ODEs typically
encountered in cardiac applications of the form: find a scalar
field :math:`v = v(t)` and a vector field :math:`s = s(t)`
.. math::
v_t = - I_{ion}(v, s) + I_s
s_t = F(v, s)
where :math:`I_{ion}` and :math:`F` are given non-linear
functions, :math:`I_s` is some prescribed stimulus. If :math:`I_s`
depends on time, it is assumed that :math:`I_s` is a
:py:class:`dolfin.Expression` with parameter 't'.
Use this solver if you just want to test the results from a
cardiac cell model without any spatial mesh dependence.
Here, this nonlinear ODE system is solved via a theta-scheme. By
default theta=0.5, which corresponds to a Crank-Nicolson
scheme. This can be changed by modifying the solver parameters.
.. note::
For the sake of simplicity and consistency with other solver
objects, this solver operates on its solution fields (as state
variables) directly internally. More precisely, solve (and
step) calls will act by updating the internal solution
fields. It implies that initial conditions can be set (and are
intended to be set) by modifying the solution fields prior to
simulation.
*Arguments*
model (:py:class:`~xalbrain.cellmodels.cardiaccellmodel.CellModel`)
A cardiac cell model
time (:py:class:`~dolfin.Constant` or None)
A constant holding the current time.
parameters (:py:class:`dolfin.Parameters`, optional)
Solver parameters
"""
def __init__(
self,
*,
time: df.Constant,
cell_model: CellModel,
parameters: df.Parameters = None
) -> None:
"""Create solver from given cell model and optional parameters."""
msg = "Expecting model to be a CellModel, not {}".format(cell_model)
assert isinstance(cell_model, CellModel), msg
msg = "Expecting time to be a Constant instance, not %r".format(time)
assert (isinstance(time, df.Constant)), msg
msg = "Expecting parameters to be a Parameters (or None), not {}".format(parameters)
assert isinstance(parameters, df.Parameters) or parameters is None, msg
# Define carefully chosen dummy mesh
mesh = df.UnitIntervalMesh(1)
super().__init__(mesh=mesh, time=time, model=cell_model, I_s=cell_model.stimulus, parameters=parameters)
class SingleCellSolver(CardiacODESolver):
def __init__(
self,
*,
cell_model: CellModel,
time: df.Constant,
parameters: df.Parameters=None
) -> None:
"""Create solver from given cell model and optional parameters."""
assert isinstance(cell_model, CellModel), \
"Expecting model to be a CellModel, not %r" % cell_model
assert (isinstance(time, df.Constant)), \
"Expecting time to be a Constant instance, not %r" % time
assert isinstance(parameters, df.Parameters) or parameters is None, \
"Expecting parameters to be a Parameters (or None), not %r" % parameters
# Define carefully chosen dummy mesh
mesh = df.UnitIntervalMesh(1)
super().__init__(
mesh=mesh,
time=time,
model=cell_model,
I_s=cell_model.stimulus,
parameters=parameters
)
class SingleMultiCellSolver(MultiCellSolver):
def __init__(
self,
*,
time: df.Constant,
cell_model: CellModel,
parameters: df.Parameters = None
) -> None:
"""Create solver from given cell model and optional parameters."""
# Define carefully chosen dummy mesh
mesh = df.UnitIntervalMesh(1)
_function_space = df.FunctionSpace(mesh, "CG", 1)
indicator_function = df.Function(_function_space)
indicator_function.vector()[:] = 1
extension_modules = import_extension_modules()
from extension_modules import load_module
ode_module = load_module(
"LatticeODESolver",
recompile=parameters["reload_extension_modules"],
verbose=parameters["reload_extension_modules"]
)
odemap = ode_module.ODEMap()
odemap.add_ode(1, ode_module.SimpleODE())
super().__init__(
time=time,
mesh=mesh,
cell_model=cell_model,
parameter_map=odemap,
indicator_function=indicator_function,
parameters=parameters
)
| true |
76c5a12e9511fe0b1b0f2fb96d56717111593f70 | Python | aroques/logistic-regression | /main.py | UTF-8 | 2,602 | 3.078125 | 3 | [
"MIT"
] | permissive | from math import e, pow, log
import numpy as np
import matplotlib.pyplot as plt
def main():
x, y = get_x_y()
w = np.random.uniform(-10, 10, x.shape[1])
min_iterations = 100
eta = 0.1
t = 200000
ein = []
for i in range(t):
if i < min_iterations:
ein.append(insample_error(w, x, y))
g = calculate_gradient(w, x, y)
w = w - eta * g
if np.sum(np.absolute(g)) < 0.000001 and i > min_iterations:
print('number of iterations = {}'.format(i))
break
print('final weight vector: {}'.format(w))
plot_exp(ein, w)
def get_logistic_probs(w, x):
probs = []
for this_x in x:
prob = logistic_fn(w, this_x)
probs.append(prob)
return probs
def get_predictions(w, x):
preds = []
for this_x in x:
pred = round(logistic_fn(w, this_x))
preds.append(pred)
return [pred if pred > 0 else -1 for pred in preds]
def plot_exp(ein, w):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10.0, 5.0))
f.canvas.set_window_title('Logistic Regression')
plt.tight_layout(pad=3.0, w_pad=5.0, h_pad=4.0)
# axes 1
x_ein = np.array(range(0, len(ein)))
ax1.set(title='In-sample Error',
xlabel='iteration',
ylabel='in-sample error',
)
ax1.plot(x_ein, ein)
# axes 2
midpoint = 3.25 # where P(x = 1) = 0.5
x = range(round(midpoint) - 12, round(midpoint) + 12)
probs = get_logistic_probs(w, add_bias(x))
ax2.set(title='Sigmoid Function',
xlabel='x',
ylabel='P(y = 1)'
)
ax2.plot(x, probs)
ax2.axvline(midpoint, color='orange', ls='--')
plt.show()
def get_x_y():
data = np.genfromtxt('training_data.csv', delimiter=',', dtype='int32')
x, y = data[:, 0], data[:, 1]
return add_bias(x), [y if y == 1 else -1 for y in y]
def add_bias(x):
return np.column_stack((np.ones_like(x), x)) # Let x0 equal 1
def calculate_gradient(w, x, y):
gradient_sum = 0
for this_x, this_y in zip(x, y):
gradient_sum += partial_gradient(w, this_x, this_y)
return - (gradient_sum / x.shape[0])
def partial_gradient(w, x, y):
return (y * x) / (1 + pow(e, y * (np.dot(w, x))))
def logistic_fn(w, x):
s = np.dot(w, x)
return pow(e, s) / (1 + pow(e, s))
def insample_error(w, x, y):
sum = 0
for this_x, this_y in zip(x, y):
sum += pt_error(w, this_x, this_y)
return sum / x.shape[0]
def pt_error(w, x, y):
return log(1 + pow(e, -y * np.dot(w, x)))
if __name__ == '__main__':
main()
| true |
600846658c8252713e318aeb241bf7a67e3262c6 | Python | KevinRapa/pyLethe | /src/West_Antechamber.py | UTF-8 | 6,124 | 2.6875 | 3 | [] | no_license | from Foyer import Foy2_Button
import Direction, Id, AudioPlayer
from Structure_gen import Door, Column
from Player import Player
from Things import Statue
from Furniture import Furniture
from Room import Room
from Mechanics import Lever, Button
from Rotunda import Rotu
class Want_Button(Button):
def __init__(self, ID):
super(Want_Button,self).__init__()
self.FOY2_LVR_ID = ID
self.description = ("There's a small black button on the wall next to the gate.")
self.addNameKeys("(?:small )?(?:black )?button")
def event(self, key):
return Player.getRoomObj(Id.FOY2).getFurnRef(self.FOY2_LVR_ID).event("")
class Want_Door(Door):
def __init__(self, direct):
super(Want_Door,self).__init__(direct)
self.description = ("The door at the bottom of the ramp catches your eye. " +
"It's carved very artfully. At its center, a cobra's " +
"head is carved into the wood.")
class Want_Gate(Door):
def __init__(self, direct):
super(Want_Gate,self).__init__(direct)
self.actDialog = ("You wouldn't be able to lift it with your hands.")
self.description = ("The open gateway leads back into the foyer.")
self.addNameKeys(str(direct) + " gate", "gate")
def interact(self, key):
op = Player.getPos().isAdjacent(Id.FOY1) or Player.getPos().isAdjacent(Id.FOY2)
if key == "close":
if op:
return ("That would only impede your progress.")
else:
return ("The gate is closed already!")
elif self.open:
return ("It's just empty space. Maybe you should go through it?")
elif key == "open" or key == "lift":
return self.actDialog
else:
return super(Want_Gate,self).interact(key)
def getDescription(self):
if Player.getPos().isAdjacent(Id.FOY1) or Player.getPos().isAdjacent(Id.FOY2):
return self.description
else:
return "The closed gate bars your way into the foyer."
class Want_Lever(Lever):
def __init__(self):
super(Want_Lever,self).__init__()
self.description = ("It's a black iron lever resting on the plinth of the statue.")
self.searchDialog = ("There's a pile of gold! No, not really, just a lever.")
self.actDialog = ("You pull the lever. The room vibrates and you " +
"here a prolonged rumble past the wall to your west.")
self.addNameKeys("lever", "(?:black )?(?:iron )?lever")
def event(self, key):
ref = Player.getRoomObj(Id.ROTU)
if ref.getState() == Rotu.EAST_WEST:
return ("You pull the lever, but nothing happens except a faint " +
"-click- sounding past the wall to your west.")
else:
AudioPlayer.playEffect(19, 30)
ref.rotate()
return self.actDialog
class Want_Pillars(Column):
def __init__(self):
super(Want_Pillars,self).__init__()
self.description = ("They're grooved, sandstone pillars holding up the " +
"ceiling two stories above. They're grand- about 5 " +
"feet in diameter and stand on square plinths.")
self.addNameKeys("pillars?", "columns?")
"""
Contains a hidden lever that can be pulled to rotate the rotunda.
Room description doesn't refer to lever. Player can assume one is there
because there are one's in Stud, Look, and Iha1.
"""
class Want(Room):
def __init__(self, name, ID):
super(Want,self).__init__(name, ID)
def getBarrier(self, direct):
if direct == Direction.WEST:
AudioPlayer.playEffect(6) # If the rotunda has rotated.
return ("The door is missing!")
elif direct == Direction.EAST:
AudioPlayer.playEffect(4) # If the foyer gate is closed.
return ("The gate that way is closed.")
else:
return self.bumpIntoWall()
class Want_Ramp(Furniture):
def __init__(self):
super(Want_Ramp,self).__init__()
self.description = ("At the far end of the antechamber, a ramp slopes " +
"downward about six feet before terminating at a door.")
self.searchDialog = ("There's nothing there except dust and a few cobwebs.")
self.addNameKeys("ramp")
class Want_Statue(Statue):
def __init__(self):
super(Want_Statue,self).__init__()
self.description = ("Inspecting each statue, you discover each to be " +
"depicting an Egyptian god. There's Anubis, god " +
"of the dead, Isis, goddess of magic, Thoth, god of " +
"wisdom, and Wadjet, goddess of protection. You " +
"notice what appears to be a lever attached to " +
"the base of one of them.")
self.searchDialog = ("They are plain statues. Upon closer inspection " +
"of one though, you find a lever hidden.")
self.actDialog = ("You feel a statue, but you are discomforted in thinking " +
"that somehow, the other statues may be watching you.")
self.addNameKeys("statues")
class Want_Torches(Furniture):
def __init__(self):
super(Want_Torches,self).__init__()
self.description = ("Tall tan obelisks standing in the corners of the room " +
"support metal baskets of burning wood chunks. They " +
"are burning quite audibly and furiously.")
self.actDialog = ("These are large standing torches, and much too heavy " +
"for you to just take and carry around. Find one on a " +
"wall somewhere.")
self.addActKeys(Furniture.GETPATTERN)
self.addNameKeys("(?:standing )?torch(?:es)?", "(?:metal )?baskets",
"(?:burning )?(?:wood(?:en)? )?chunks") | true |
ff61d4bb0ae2f301ad5ae4d98e5b77bf4f6d033a | Python | eaglez1111/RoboMath16811 | /hw2/pg.py | UTF-8 | 884 | 2.734375 | 3 | [] | no_license | import numpy as np
def formR():
p = np.array([1,-4,6,-4])
q = np.array([1,2,-8])
A = np.zeros([5,5],dtype='float32')
A[0,0:4],A[1,1:5] = p,p
A[2,0:3],A[3,1:4],A[4,2:5] = q,q,q
print np.linalg.det(A)
A1 = np.array([[-4,6,-4,0],[1,-4,6,-4],[2,-8,0,0],[1,2,-8,0]])
A2 = np.array([[1,6,-4,0],[0,-4,6,-4],[1,-8,0,0],[0,2,-8,0]])
print A1
print np.linalg.det(A1)
print A2
print np.linalg.det(A2)
def main1():
def f():
print 1
func=[f]*2
func.append(f)
func[0]()
func[1]()
def svdSolve(A,b):
U,Sigma,V = np.linalg.svd(A)
c = np.dot(U.T,b)
w = np.linalg.solve(np.diag(Sigma),c)
x = np.dot(V.T,w)
return x
def main():
A = np.array([[10,6,4],[5,3,2],[1,1,0]],dtype='float32')
b = np.array([2,1,-1],dtype='float32')
print svdSolve(A,b)
if __name__ == "__main__":
main()
| true |
3142444ab1d3c26b6c83219e13a53b0feca99392 | Python | QHedgeTech/witables | /childnumber.py | UTF-8 | 2,129 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# Store the filepath for file manipulation
import os
filepath = os.path.abspath(os.path.dirname(__file__))
# Add the file path to the system path to import framework file.
import sys
if filepath not in sys.path:
sys.path.append(filepath)
# Debug Module.
import cgitb
cgitb.enable()
# Framework module
from framework import *
# Show the node as group structure
def makePlainText(activeFile, activePath):
# Open database file.
database = open_file(filepath + databaseDirectory + '/' + activeFile, mode = 'r')
# Get the node with path
node = database.get_node(activePath)
output = ''
#Allowed classname: 'Group', 'Leaf', 'Table' and 'Array'
if node._f_getattr('CLASS') == 'GROUP':
output = node._v_nchildren
if node._f_getattr('CLASS') == 'TABLE':
output = node.nrows
if node._f_getattr('CLASS') == 'ARRAY':
# Array class is not handled yet.
raise AttributeError
if node._f_getattr('CLASS') == 'LEAF':
# Array class is not handled, don't know what to do with it now.
raise AttributeError
# Close database file
database.close()
return str(output)
def application(environ, start_response):
# Process the parameters if any.
parameters = parse_qs(environ.get('QUERY_STRING', ''))
# Test if we've got an active filename.
if 'file' not in parameters.keys():
return errorPage('Missing argument. childnumber page needs a filename argument.', start_response)
# Test if we've got an active filename.
if 'path' not in parameters.keys():
return errorPage('Missing argument. childnumber page needs a path argument.', start_response)
# Get the filename
activeFile = parameters['file'][0]
# Get the node path
activePath = parameters['path'][0]
# Make body answer
output = makePlainText(activeFile, activePath)
# Encode it
utf8_version = output.encode('utf-8')
# Make answer header
status = '200 OK'
response_headers = [('Content-Type', 'text/html; charset=utf-8'), ('Content-Length', str(len(utf8_version)))]
start_response(status, response_headers)
# Return the body answer
return [utf8_version]
print makePlainText('QDatabaseESFinal.h5', '/Economics')
| true |
2f89388896eedf08e1091449f0fb2cce8c035403 | Python | liuyuhang791034063/LeetCode | /Python3/Minimum K number.py | UTF-8 | 490 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: Minimum K number
Description:
Author: God
date: 2018/10/23
-------------------------------------------------
Change Activity: 2018/10/23
-------------------------------------------------
"""
__author__ = 'God'
class Solution:
def GetLeastNumbers_Solution(self, tinput, k):
tinput.sort()
return tinput[:k] if k <= len(tinput) else [] | true |
7649d8de3da3fa532a57d14916a865a0eb58d067 | Python | krishnakalyan3/LearnPyTorch | /src/models/basic_models.py | UTF-8 | 2,232 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python3
import torch.nn as nn
import torch.nn.functional as F
class SimpleNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(2, 10, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class LinearRegression(nn.Module):
def __init__(self, input_size, output_size):
super(LinearRegression, self).__init__()
self.linear = nn.Linear(input_size, output_size)
def forward(self, x):
out = self.linear(x)
return out
class LogisticRegression(nn.Module):
def __init__(self, input_size, num_classes):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(input_size, num_classes)
def forward(self, x):
out = self.linear(x)
return out
| true |
8b3e194021c921e57669c4dd672a6fb9b4adefe6 | Python | togolife/py_learn | /auto_weibo.py | UTF-8 | 1,515 | 2.546875 | 3 | [] | no_license | import os
import time
import MySQLdb
import urllib
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
import SendKeys
driver = webdriver.Firefox()
driver.get("https://weibo.com/login.php")
driver.implicitly_wait(30)
# 登录微博
driver.find_element_by_id("loginname").clear()
driver.find_element_by_id("loginname").send_keys("") # 输入用户名
driver.find_element_by_name("password").send_keys("") # 输入密码
driver.find_element_by_css_selector(".W_btn_a.btn_32px").click()
WebDriverWait(driver,30,1).until(expected_conditions.title_contains(u"我的首页"))
time.sleep(15)
# 添加图片
if True:
upload = driver.find_element_by_xpath("/html/body/div[1]/div/div[2]/div[3]/div[2]/div[1]/div[1]/div/div[3]/div[2]/a[2]")
upload.click()
img_path = "D:\\tmp\\images\\146040450.jpg"
SendKeys.SendKeys(str(img_path))
SendKeys.SendKeys("{ENTER}")
SendKeys.SendKeys("{ENTER}")
time.sleep(5) # 等待上传图片
# 发送微博
st = u"【淘宝发红包了,有机会拿最高2018元红包!点击链接:http://www.dwntme.com/h.Wtxvqhg " +\
u"或复制这条信息¥icgW0nefUDh¥后打开手淘"
driver.find_element_by_css_selector("textarea.W_input").clear()
driver.find_element_by_css_selector("textarea.W_input").send_keys(st)
driver.implicitly_wait(3) # 加一下延时
driver.find_element_by_css_selector(".W_btn_a.btn_30px").click()
| true |
af38b139800813fc63f918d58a082d64afab7768 | Python | ChiuMarcus/Black-Friday | /gameplay.py | UTF-8 | 5,300 | 4.34375 | 4 | [] | no_license | # Gameplay
# Marcus Chiu
# A01034056
# 23 11 2018
# This module contains the functions directly related to gameplay, such as movement and combat.
import random
# These are constants representing the dimension of the dungeon.
map_w = 5
map_h = 5
def roll_die(sides):
"""
Returns a number representing the total value obtained from rolling a die with a defined number of sides.
PARAM: integer
PRECONDITION: side_num must be a positive integer
POSTCONDITION: Returns an integer between side_num and side_num * roll_num
"""
total = random.randint(1, sides)
return total
def move(char, direction):
"""Moves the character around the dungeon.
PARAM: A dictionary, a string
PRECONDITION: Character must be valid, direction must be either N, S, E or W.
POSTCONDITION: The character's position will either move one position in the specified direction, or prints an
error message if invaid.
RETURN: None
"""
north = ['north', 'North', 'n', 'N']
south = ['south', 'South', 's', 'S']
east = ['east', 'East', 'e', 'E']
west = ['west', 'West', 'w', 'W']
if direction in north:
dir_valid(char, 'N')
elif direction in south:
dir_valid(char, 'S')
elif direction in east:
dir_valid(char, 'E')
elif direction in west:
dir_valid(char, 'W')
else:
print("That is not a valid direction")
return
def dir_valid(char, point):
"""
Checks if the direction you move to is valid, and move you if it is.
PARAMETER: char
PRECONDITION: point must be N, S, E or W.
POSTCONDITION: Player character either moves one space in the indicated direction, or is informed that they
cannot move that way.
RETURN: None
"""
if point == 'S' and (char['y-pos'] + 1) < map_h:
char['y-pos'] += 1
enemy_roll(char)
elif point == 'E' and (char['x-pos'] + 1) < map_w:
char['x-pos'] += 1
enemy_roll(char)
elif point == 'N' and (char['y-pos'] - 1) >= 0:
char['y-pos'] -= 1
enemy_roll(char)
elif point == 'W' and (char['x-pos'] - 1) >= 0:
char['x-pos'] -= 1
enemy_roll(char)
else:
print("You cannot go that way")
return
def enemy_roll(char):
"""Rolls a 10-sided die, and either creates a monster to fight if the value rolled is 1 (i.e. a monster has spawned),
or heals the player for 1 hp if the character is under full health.
PARAM: None
PRECONDITION: None
POSTCONDITION: Creates a monster and initiates combat or heals the player for 1 hp if they are under
full health.
RETURN: None
"""
if roll_die(10) == 1:
enemy = {'name': 'Black Friday Shopper', 'hp': 10}
combat_round(char, enemy)
else:
if char['hp'] < 10:
char['hp'] += 1
return
def damage_calc(char):
"""Calculates the damage done to a char2 by an char1, damage is calculated by a dice roll.
PARAM: attack_class, the attacker's class, and char2, a character sheets
PRECONDITION: Both inputs are well-formed.
POSTCONDITION: Reduces the HP by the rolled value.
RETURN: None"""
damage = roll_die(6)
char['hp'] -= damage
print(char['name'] + " is struck for ", damage, "damage.")
return
def is_dead(char):
"""Determines if a character is dead
PARAM: a dictionary representing a character or monster
PRECONDITION: Input is a well formed character class
POSTCONDITION: returns 1 if the character's hp is 0 or lower, and 0 otherwise
RETURN: integer"""
if char['hp'] <= 0:
print(char['name'] + " is knocked out!")
return 1
else:
return 0
def combat_round(char1, char2):
"""Simulates a single round of combat
PARAM: char1 and char2, character sheets
PRECONDITION: Inputs are both character sheets created by generate_charsheet
POSTCONDITION: Prints message if one character dies
RETURN: None"""
global game
print("What will you do?")
choice = input("Type F to fight or R to flee")
if choice == "F" or choice == "f":
damage_calc(char2)
if is_dead(char2) == 1:
return
print(char2['name'] + "'s remaining HP: ", char2['hp'], "/10")
print("It's " + char2['name'] + "'s turn!")
damage_calc(char1)
if is_dead(char1) == 1:
print("GAME OVER")
game = False
return
print(char1['name'] + "'s remaining HP: ", char1['hp'], "/10")
print("On to the next round!")
return combat_round(char1, char2)
elif choice == "R" or choice == '"r':
if roll_die(10) == 1:
print(char2['name'] + " launched a sneak attack on " + char1['name'] + "!")
char1['hp'] -= roll_die(4)
print(char1['name'] + "'s remaining HP: ", char1['hp'], "/10")
if is_dead(char1) == 1:
print("GAME OVER")
game = False
return
else:
print(char1['name'] + " ran away!")
return
else:
print("That is not a valid command.")
combat_round(char1, char2) | true |
36e50c359f11527d6e96b6523043baa9be4159b9 | Python | mardix/polybaseappfunction-template | /lib.py | UTF-8 | 2,944 | 2.796875 | 3 | [] | no_license | import re
import sys
import uuid
import json
import datetime
import requests
def run_function(fn_module, fn_name, *args, **kwargs):
"""
Run functions
Args:
- fn_module: the module containing the functions
- fn_name: the function name
- *args
- **kwargs
"""
if fn_name.startswith("_"):
raise Exception("400: function is invalid")
if hasattr(fn_module, fn_name):
return getattr(fn_module, fn_name)(*args, **kwargs)
else:
raise Exception("404: function doesn't exist")
def require_post_method(request):
"""
Check if the request is a POST
"""
if request.method != "POST":
raise Exception('This function requires a POST method')
def get_headers_auth_bearer(request):
"""
Get the Authorization Bearer
"""
if 'Authorization' not in request.headers:
return None
data = request.headers['Authorization']
return str.replace(str(data), 'Bearer ', '').strip()
def split_list(lst, n):
"""
Split a list in multiple n chunks
Args:
- lst: list
- n: number
Returns Yield successive n-sized chunks from lst.
"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
#------------------------------------------------------------------------------
# Rejam API
#
class RejamException(Exception): pass
class Rejam(object):
"""
Rejam
"""
def __init__(self, url, access_key):
self.url = url
self.access_key = access_key
def __call__(self, *a, **kw):
"""
Make use of the class as a function for simplicity
Example:
# create client
rejam_client = Rejam(url, access_key)
# inline
rejam_client("STORE.GET", collection="abc", _key="xyz")
# With many args
many_args = {
"collection": "xyz",
"data": [
],
"operations": [
]
}
rejam_client("STORE.SET", **many_args)
Args:
- *args
- **kwargs
Returns:
- mixed
"""
return self.call(*a, **kw)
def call(self, action, **kw):
"""
Execute the Rejam service call
Args:
- action: string - the action to perform
- *kw: mixed
Returns:
- mixed
"""
url = self.url
headers = {
"X-REJAM-ACCESS-KEY": self.access_key
}
data = {
**kw,
"action": action
}
r = requests.post(url, json=data, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
_d = r.json()
if "error" in _d:
msg = _d["error"]["message"]
else:
msg = "[%s]" % r.status_code
raise RejamException(msg) | true |
c115f1a7590363404ecd67b3f7308a9253bb5897 | Python | rlsharpton/tocando | /words.py | UTF-8 | 712 | 3.578125 | 4 | [] | no_license | __author__ = 'tocando'
import scrabble
letters = 'abcdefghijklmnopqrstuvwxyz'
vowels = 'aeiou'
def has_all_vowels(word):
for word in scrabble.wordlist:
return word
x = 0
while x < 10:
ba = has_all_vowels(word)
print('the word is ', ba)
x = x + 1
#for word in scrabble.wordlist:
# for x in range(0, 10):
# print(word)
#word_with_all_vowels = has_all_vowels()
#print('word_with_all_vowels is : ', word_with_all_vowels)
#for letter in letters:
# if not has_a_double(letter):
# print(letter + " never occures as a double")
# Print all words containing 'uu'
#for word in scrabble.wordlist:
# if 'uu' in word:
# print(word)
| true |
40466522e4d49ba818f2bf275e4b7bd1269b51cc | Python | Python3pkg/Cascadenik | /cascadenik/nonposix.py | UTF-8 | 2,610 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | import os
import os.path as systempath
import posixpath
from hashlib import md5
drives = {}
# sketchy windows only mucking to handle translating between
# native cascadenik storage of posix paths and the filesystem.
# to_posix() and un_posix() are called in cascadenik/compile.py
# but only impact non-posix systems (windows)
def get_posix_root(valid_posix_path):
if posixpath.isdir(valid_posix_path) and not valid_posix_path.endswith(posixpath.sep):
valid_posix_path += posixpath.sep
else:
valid_posix_path = posixpath.dirname(valid_posix_path)
return valid_posix_path.split(posixpath.sep)[1] or valid_posix_path
def add_drive(drive,valid_posix_path):
root = get_posix_root(valid_posix_path)
if not drives.get(root):
drives[root] = drive
#print 'pushing drive: %s | %s | %s' % (drive,root, valid_posix_path)
def get_drive(valid_posix_path):
return drives.get(get_posix_root(valid_posix_path))
# not currently used
def add_drive_by_hash(drive,valid_posix_path):
# cache the drive so we can try to recreate later
global drives
hash = md5(valid_posix_path).hexdigest()[:8]
drives[hash] = drive
#print 'pushing drive: %s | %s | %s' % (drive,valid_posix_path,hash)
# not currently used
def get_drive_by_hash(valid_posix_path):
# todo - make this smarter
hash = md5(valid_posix_path).hexdigest()[:8]
drive = drives.get(hash)
if not drive:
hash = md5(posixpath.dirname(valid_posix_path)).hexdigest()[:8]
drive = drives.get(hash)
def to_posix(path_name):
if os.name == "posix":
return path_name
else:
drive, path = systempath.splitdrive(path_name)
valid_posix_path = path.replace(os.sep,posixpath.sep)
if drive:
#add_drive_by_hash(drive,valid_posix_path)
add_drive(drive,valid_posix_path)
return valid_posix_path
def un_posix(valid_posix_path,drive=None):
if os.name == "posix":
return valid_posix_path
else:
global drives
if not posixpath.isabs(valid_posix_path):
return valid_posix_path# what to do? for now assert
assert posixpath.isabs(valid_posix_path), "un_posix() needs an absolute posix style path, not %s" % valid_posix_path
#drive = get_drive_by_hash(valid_posix_path)
drive = get_drive(valid_posix_path)
assert drive, "We cannot make this path (%s) local to the platform without knowing the drive" % valid_posix_path
path = systempath.join(drive,systempath.normpath(valid_posix_path))
return path | true |
ec0243d892005007bc17761738d224b6c9b2a6a5 | Python | FXIhub/hummingbird | /hummingbird/interface/ui/image_view.py | UTF-8 | 12,382 | 2.8125 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
ImageView.py - Widget for basic image dispay and analysis
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
Widget used for displaying 2D or 3D data. Features:
- float or int (including 16-bit int) image display via ImageItem
- zoom/pan via GraphicsView
- black/white level controls
- time slider for 3D data sets
- ROI plotting
- Image normalization through a variety of methods
"""
import numpy
import pyqtgraph
from ..Qt import QtCore, QtGui, loadUiType
from . import uidir
Ui_Form, base = loadUiType(uidir + '/image_view.ui')
class ImageView(QtGui.QWidget):
"""
Widget used for display and analysis of image data.
Implements many features:
* Displays 2D and 3D image data. For 3D data, a z-axis
slider is displayed allowing the user to select which frame is displayed.
* Displays histogram of image data with movable region defining the dark/light levels
* Editable gradient provides a color lookup table
* Frame slider may also be moved using left/right arrow keys as well as pgup, pgdn, home, and end.
* Basic analysis features including:
* ROI and embedded plot for measuring image values across frames
* Image normalization / background subtraction
Basic Usage::
imv = pg.ImageView()
imv.show()
imv.setImage(data)
"""
sigProcessingChanged = QtCore.Signal(object)
def __init__(self, parent=None, name="ImageView", view=None, imageItem=None, *args):
"""
By default, this class creates an :class:`ImageItem <pyqtgraph.ImageItem>` to display image data
and a :class:`ViewBox <pyqtgraph.ViewBox>` to contain the ImageItem. Custom items may be given instead
by specifying the *view* and/or *imageItem* arguments.
"""
QtGui.QWidget.__init__(self, parent, *args)
self._parent = parent
self.levelMax = 4096
self.levelMin = 0
self.name = name
self.image = None
self.axes = {}
self.imageDisp = None
self.ui = Ui_Form()
self.ui.setupUi(self)
self.scene = self.ui.graphicsView.scene()
if view is None:
self.view = pyqtgraph.ViewBox()
else:
self.view = view
self.ui.graphicsView.setCentralItem(self.view)
self.view.setAspectLocked(True)
self.view.invertY()
if imageItem is None:
self.imageItem = pyqtgraph.ImageItem()
else:
self.imageItem = imageItem
self.view.addItem(self.imageItem)
self.currentIndex = 0
self.ui.histogram.setImageItem(self.imageItem)
self.keysPressed = {}
## wrap functions from view box
for fn in ['addItem', 'removeItem']:
setattr(self, fn, getattr(self.view, fn))
## wrap functions from histogram
for fn in ['setHistogramRange', 'autoHistogramRange', 'getLookupTable', 'getLevels']:
setattr(self, fn, getattr(self.ui.histogram, fn))
self.noRepeatKeys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown]
def setImage(self, img, autoRange=True, autoLevels=True, levels=None, axes=None, xvals=None, pos=None, scale=None, transform=None, autoHistogramRange=True):
"""
Set the image to be displayed in the widget.
================== =======================================================================
**Arguments:**
img (numpy array) the image to be displayed.
xvals (numpy array) 1D array of z-axis values corresponding to the third axis
in a 3D image. For video, this array should contain the time of each frame.
autoRange (bool) whether to scale/pan the view to fit the image.
autoLevels (bool) whether to update the white/black levels to fit the image.
levels (min, max); the white and black level values to use.
axes Dictionary indicating the interpretation for each axis.
This is only needed to override the default guess. Format is::
{'t':0, 'x':1, 'y':2, 'c':3};
pos Change the position of the displayed image
scale Change the scale of the displayed image
transform Set the transform of the displayed image. This option overrides *pos*
and *scale*.
autoHistogramRange If True, the histogram y-range is automatically scaled to fit the
image data.
================== =======================================================================
"""
if hasattr(img, 'implements') and img.implements('MetaArray'):
img = img.asarray()
if not isinstance(img, numpy.ndarray):
raise Exception("Image must be specified as ndarray.")
self.image = img
if xvals is not None:
self.tVals = xvals
elif hasattr(img, 'xvals'):
try:
self.tVals = img.xvals(0)
except:
self.tVals = numpy.arange(img.shape[0])
else:
self.tVals = numpy.arange(img.shape[0])
if axes is None:
if img.ndim == 2:
self.axes = {'t': None, 'x': 0, 'y': 1, 'c': None}
elif img.ndim == 3:
if img.shape[2] <= 4:
self.axes = {'t': None, 'x': 0, 'y': 1, 'c': 2}
else:
self.axes = {'t': 0, 'x': 1, 'y': 2, 'c': None}
elif img.ndim == 4:
self.axes = {'t': 0, 'x': 1, 'y': 2, 'c': 3}
else:
raise Exception("Can not interpret image with dimensions %s" % (str(img.shape)))
elif isinstance(axes, dict):
self.axes = axes.copy()
elif isinstance(axes, list) or isinstance(axes, tuple):
self.axes = {}
for i in range(len(axes)):
self.axes[axes[i]] = i
else:
raise Exception("Can not interpret axis specification %s. Must be like {'t': 2, 'x': 0, 'y': 1} or ('t', 'x', 'y', 'c')" % (str(axes)))
for x in ['t', 'x', 'y', 'c']:
self.axes[x] = self.axes.get(x, None)
self.imageDisp = None
self.currentIndex = 0
self.updateImage(autoHistogramRange=autoHistogramRange)
if levels is None and autoLevels:
self.autoLevels()
if levels is not None: ## this does nothing since getProcessedImage sets these values again.
self.setLevels(*levels)
if self.axes['t'] is not None:
if len(self.tVals) > 1:
start = self.tVals.min()
stop = self.tVals.max() + abs(self.tVals[-1] - self.tVals[0]) * 0.02
elif len(self.tVals) == 1:
start = self.tVals[0] - 0.5
stop = self.tVals[0] + 0.5
else:
start = 0
stop = 1
self.imageItem.resetTransform()
if scale is not None:
self.imageItem.scale(*scale)
if pos is not None:
self.imageItem.setPos(*pos)
if transform is not None:
self.imageItem.setTransform(transform)
if autoRange:
self.autoRange()
def autoLevels(self):
"""Set the min/max intensity levels automatically to match the image data."""
self.setLevels(self.levelMin, self.levelMax)
def setLevels(self, min, max):
"""Set the min/max (bright and dark) levels."""
self.ui.histogram.setLevels(min, max)
def autoRange(self):
"""Auto scale and pan the view around the image."""
image = self.getProcessedImage()
self.view.autoRange()
def getProcessedImage(self):
"""Returns the image data after it has been processed by any normalization options in use.
This method also sets the attributes self.levelMin and self.levelMax
to indicate the range of data in the image."""
if self.imageDisp is None:
image = self.normalize(self.image)
self.imageDisp = image
self.levelMin, self.levelMax = list(map(float, ImageView.quickMinMax(self.imageDisp)))
return self.imageDisp
def close(self):
"""Closes the widget nicely, making sure to clear the graphics scene and release memory."""
self.ui.graphicsView.close()
self.scene.clear()
del self.image
del self.imageDisp
self.setParent(None)
def keyPressEvent(self, ev):
if ev.key() == QtCore.Qt.Key_Home:
self.setCurrentIndex(0)
ev.accept()
elif ev.key() == QtCore.Qt.Key_End:
self.setCurrentIndex(self.getProcessedImage().shape[0]-1)
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
else:
QtGui.QWidget.keyPressEvent(self, ev)
def keyReleaseEvent(self, ev):
if ev.key() in [QtCore.Qt.Key_Space, QtCore.Qt.Key_Home, QtCore.Qt.Key_End]:
ev.accept()
elif ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
else:
QtGui.QWidget.keyReleaseEvent(self, ev)
def evalKeyState(self):
if len(self.keysPressed) == 1:
key = list(self.keysPressed.keys())[0]
if key == QtCore.Qt.Key_Right:
self.jumpFrames(1)
elif key == QtCore.Qt.Key_Left:
self.jumpFrames(-1)
elif key == QtCore.Qt.Key_Up:
self.jumpFrames(-10)
elif key == QtCore.Qt.Key_Down:
self.jumpFrames(10)
elif key == QtCore.Qt.Key_PageUp:
self.jumpFrames(-100)
elif key == QtCore.Qt.Key_PageDown:
self.jumpFrames(100)
def setCurrentIndex(self, ind, autoHistogramRange=True):
"""Set the currently displayed frame index."""
self.currentIndex = numpy.clip(ind, 0, self.getProcessedImage().shape[0]-1)
self.updateImage(autoHistogramRange=autoHistogramRange)
def jumpFrames(self, n):
"""Move video frame ahead n frames (may be negative)"""
if self.axes['t'] is not None:
self.setCurrentIndex(self.currentIndex + n)
self._parent.replot()
def hasTimeAxis(self):
return 't' in self.axes and self.axes['t'] is not None
@staticmethod
def quickMinMax(data):
while data.size > 1e6:
ax = numpy.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[tuple(sl)]
return data.min(), data.max()
def normalize(self, image):
image[numpy.isnan(image)] = 0
return image
def updateImage(self, autoHistogramRange=True):
## Redraw image on screen
if self.image is None:
return
image = self.getProcessedImage()
if autoHistogramRange:
self.ui.histogram.setHistogramRange(self.levelMin, self.levelMax)
if self.axes['t'] is None:
self.imageItem.updateImage(image)
else:
self.imageItem.updateImage(image[self.currentIndex])
def getView(self):
"""Return the ViewBox (or other compatible object) which displays the ImageItem"""
return self.view
def getImageItem(self):
"""Return the ImageItem for this ImageView."""
return self.imageItem
def getHistogramWidget(self):
"""Return the HistogramLUTWidget for this ImageView"""
return self.ui.histogram
| true |
6a8f687754c19f5308ffac06d3b90dd80c44d202 | Python | akajuvonen/advent-of-code-2019-python | /advent_of_code_2019_python/day6.py | UTF-8 | 4,044 | 3.359375 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
from typing import Dict, List
import attr
import click
@attr.s(auto_attribs=True)
class SpaceObject:
name: str
orbiters: List[SpaceObject] = attr.Factory(list)
def add_orbiter(self, orbiter: SpaceObject):
self.orbiters.append(orbiter)
def populate_orbits(orbit_list: List[str]) -> SpaceObject:
"""
Create all objects in space and populate their orbiter lists.
Arguments:
orbit_list: List of orbit instruction strings, like 'A)B' meaning B orbits A.
Returns:
Universal center of mass which acts as a root node.
"""
orbit_dict: Dict[str, List[str]] = {}
for orbit in orbit_list:
[center_of_mass, orbiter] = orbit.split(')')
if center_of_mass not in orbit_dict:
orbit_dict[center_of_mass] = []
orbit_dict[center_of_mass].append(orbiter)
univ_center_of_mass = SpaceObject(name='COM')
_add_orbiters(univ_center_of_mass, orbit_dict)
return univ_center_of_mass
def _add_orbiters(space_object: SpaceObject, orbit_dict: Dict[str, list]):
"""Recursively populate space objects and their child orbiter lists."""
if space_object.name not in orbit_dict:
return
for orbiter_name in orbit_dict[space_object.name]:
orbiter = SpaceObject(name=orbiter_name)
space_object.add_orbiter(orbiter)
_add_orbiters(orbiter, orbit_dict)
def calculate_orbits(univ_center_of_mass: SpaceObject) -> int:
"""
Calculate the total number of direct and indirect orbits.
Arguments
univ_center_of_mass: Root object in space.
Returns:
Total orbit count.
"""
return _traverse_objects(univ_center_of_mass, 0)
def _traverse_objects(space_object: SpaceObject, count: int) -> int:
"""Recursively traverse the space object tree, adding all counts and increasing it each level.
This equals the number of direct and indirect orbits."""
if not space_object.orbiters:
return count
total = 0
for orbiter in space_object.orbiters:
total += _traverse_objects(orbiter, count+1)
return total + count
def calculate_orbital_transfers(first_name: str, second_name: str, root: SpaceObject) -> int:
"""
Calculates the number of orbital transfers required between two objects.
Arguments:
first_name: Name of the first object.
second_name: Name of the second object.
root: Root node (universal center of mass).
Returns:
Total orbital transfer count.
"""
first_path: List[str] = []
_calculate_path(first_name, first_path, root)
second_path: List[str] = []
_calculate_path(second_name, second_path, root)
# Calculate the number of common nodes in the path and subtract from sum of lengths
# to get the total orbital transfer count
j = 0
for i in range(min(len(first_path), len(second_path))):
if first_path[i] == second_path[i]:
j += 1
return len(first_path) + len(second_path) - 2*j
def _calculate_path(name: str, path: list, node: SpaceObject) -> bool:
"""Recursively get a path from root to an object. This is a typical tree path calculation
algorithm which saves the path into given list."""
if node.name == name:
return True
path.append(node.name)
for orbiter in node.orbiters:
if _calculate_path(name, path, orbiter):
return True
path.pop()
return False
@click.command()
@click.option('--input-file', required=True, type=str, default='inputs/input_day6.txt', show_default=True,
help='Path to file containing space object orbits')
def main(input_file):
orbit_list = []
with open(input_file) as f:
for line in f:
orbit_list.append(line.rstrip('\n'))
univ_center_of_mass = populate_orbits(orbit_list)
orbit_count = calculate_orbits(univ_center_of_mass)
print(orbit_count)
print(calculate_orbital_transfers('YOU', 'SAN', univ_center_of_mass))
if __name__ == '__main__':
main()
| true |
5cf6be93187d29cdb9e506c1569d6aa5075fa361 | Python | NemoIII/gamepy | /base.py | UTF-8 | 4,170 | 3.25 | 3 | [] | no_license | import collections
import Tkinter as TK
import math
import os
def path(filename):
filepath = os.path.realpath(__file__)
dirpath = os.path.dirname(filepath)
fullpath = os.path.join(dirpath,filename)
return fullpath
def line(a, b, x, y):
import turtle
turtle.up()
turtle.goto(a, b)
turtle.down()
turtle.goto(x, y)
class vector(collections.Sequence):
# pylint: disable=invalid-name
PRECISION = 6
__slots__ = ('_x', '_y', '_hash')
def __init__(self, x, y):
self._hash = None
self._x = round(x, self.PRECISION)
self._y = round(y, self.PRECISION)
@property
#getter
def x(self):
return self._x
@x.setter
def x(self, value):
if self._hash is not None:
raise ValueError("Cannot set x after hashinhg")
self._x = round(value, self.PRECISION)
@property
def y(self):
return self._y
@y.setter
def y(self, value):
if self._hash is not None:
raise ValueError("Cannot set y after hashinhg")
self._y = round(value, self.PRECISION)
def __hash__(self):
#v.__hash__() -> hash(v)
#v = vector(1, 2)
if self._hash is None:
pair = (self.x, self.y)
self._hash = hash(pair)
return self._hash
def __len__(self):
return 2
def __getitem__(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
else:
raise IndexError
def copy(self):
type_self = type(self)
return type_self(self.x, self.y)
def __eq__(self, other):
#v = w if v = vector(1, 2) = w = vector(1, 2)
if isinstance(other, vector):
return self.x == other.x and self.y == other.y
return NotImplemented
def __ne__(self, other):
if isinstance(other, vector):
return self.x != other.x and self.y != other.y
return NotImplemented
def __iadd__(self, other):
#v.__iadd__(w) -> v += w
if self._hash is not None:
raise ValueError("Cannot add vector after hashinhg")
elif isinstance(other, vector):
self.x = other.x
self.y = other.y
else:
self.x += other
self.y += other
return self
def __add__(self, other):
#v.__iadd__(w) -> v + w
copy = self.copy()
return copy.__iadd__(other)
__radd__ = __add__
def move(self, other):
#move vector by other(n place)
#v = vector(1, 2) w = vector(3, 4) v.move(w) c ==> vector(4, 6)
self.__iadd__(other)
def __isub__(self, other):
#v.__isub__(w) -> v -= w
if self._hash is not None:
raise ValueError("Cannot subtract vector after hashinhg")
elif isinstance(other, vector):
self.x -= other.x
self.y -= other.y
else:
self.x -= other
self.y -= other
def __sub__(self, other):
#v.__sub__(w) -> v-w
copy = self.copy()
return copy.__isub__(other)
def __imul__(self, other):
#v.__imul__(w) => v*= w
if self._hash is not None:
raise ValueError("Cannot multiply vector after hashinhg")
elif isinstance(other, vector):
self.x *= other.x
self.y *= other.y
else:
self.x *= other
self.y *= other
return self
def __mul__(self, other):
#v.__mul__(w) => v * w
copy = self.copy.__imul__()
return copy.__imul__(other)
__rmul__ = __mul__
def scale(self, other):
self.__imul__(other)
def __itruediv__(self, other):
#v.__itruediv__(w) => v /= w
if self._hash is not None:
raise ValueError("Cannot divide vector after hashinhg")
elif isinstance(other, vector):
self.x /= other.x
self.y /= other.y
else:
self.x /= other
self.y /= other
return self
def __truediv__(self, other):
#v.__truediv__(w) => v / w
copy = self.copy()
return copy.__itruediv__(other)
def __neg__(self):
#v__neg__() => -v
copy = self.copy()
copy.x = -copy.x
copy.y = -copy.y
return copy
def __abs__(self):
#vector(3, 4) => 5
return (self.x**2 + self.y**2)**0.5
def rotate(self, angle):
if self._hash is not None:
raise ValueError("Cannot rotate vector after hashinhg")
radians = angle * math.pi/180.0
cosine = math.cos(radians)
sine = math.sin(radians)
x = self.x
y = self.y
self.x = x * cosine - y * sine
self.y = y * cosine + x * sine
def __repr__(self):
#v.__repr__() => repr(v)
type_self = type(self)
name = type_self.__name__
return '{}({!r},{!r})'.format(name, self.x, self.y)
| true |
7fcb80a56d5d2f8f198a5474c2577d2addd711e5 | Python | mars-project/mars | /mars/tensor/arithmetic/arctan2.py | UTF-8 | 4,454 | 3.125 | 3 | [
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..utils import infer_dtype
from .core import TensorBinOp
from .utils import arithmetic_operand
@arithmetic_operand
class TensorArctan2(TensorBinOp):
_op_type_ = OperandDef.ARCTAN2
_func_name = "arctan2"
@classmethod
def _is_sparse(cls, x1, x2):
if hasattr(x1, "issparse") and x1.issparse():
# if x1 is sparse, will be sparse always
return True
elif np.isscalar(x1) and x1 == 0:
# x1 == 0, return sparse if x2 is
return x2.issparse() if hasattr(x2, "issparse") else False
return False
@infer_dtype(np.arctan2)
def arctan2(x1, x2, out=None, where=None, **kwargs):
"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : Tensor, None, or tuple of Tensor and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated tensor is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
**kwargs
Returns
-------
angle : Tensor
Array of angles in radians, in the range ``[-pi, pi]``.
See Also
--------
arctan, tan, angle
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> import mars.tensor as mt
>>> x = mt.array([-1, +1, +1, -1])
>>> y = mt.array([-1, -1, +1, +1])
>>> (mt.arctan2(y, x) * 180 / mt.pi).execute()
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> mt.arctan2([1., -1.], [0., 0.]).execute()
array([ 1.57079633, -1.57079633])
>>> mt.arctan2([0., 0., mt.inf], [+0., -0., mt.inf]).execute()
array([ 0. , 3.14159265, 0.78539816])
"""
op = TensorArctan2(**kwargs)
return op(x1, x2, out=out, where=where)
| true |
0fbba28269a804fc9827d0b2f75810526d6007e8 | Python | h2r/slu_core | /data/kitchen/data/annotated_recipes/sweetCornbreadCake.py | UTF-8 | 4,782 | 3.0625 | 3 | [] | no_license | from kitchen import kitchenState, annotatedRecipe
from esdcs.groundings import PhysicalObject
#8
#Sweet Cornbread Cake
#(http://allrecipes.com/recipe/sweet-cornbread-cake/detail.aspx)
#
#1 cup cornmeal
#3 cups all-purpose flour
#1 1/3 cups white sugar
#2 tablespoons baking powder
#1 teaspoon salt
#2/3 cup vegetable oil
#1/3 cup melted butter
#2 tablespoons honey
#4 eggs, beaten
#2 1/2 cups whole milk
#
#1. Preheat oven to 350 degrees F (175 degrees C), and grease a 9x13 inch baking dish.
#2. Stir together the cornmeal, flour, sugar, baking powder, and salt in a mixing bowl. Pour in the vegetable oil, melted butter, honey, beaten eggs, and milk, and stir just to moisten.
#3. Pour the batter into the prepared baking dish and bake in the preheated oven for 45 minutes, until the top of the cornbread starts to brown and show cracks.
recipeName = "Sweet Cornbread Cake"
recipeSource = "http://allrecipes.com/recipe/sweet-cornbread-cake/detail.aspx"
#replace None with Physical Objects
#TODO: fix the prism id
ingredientsList = [("1 cup cornmeal", kitchenState.Ingredient(contains=["cornmeal"], homogenous=True, amount="1 cup",
physicalObject=PhysicalObject(kitchenState.prism_from_point(7, 3, 1, 2), lcmId=1, tags=['cornmeal']))),
("3 cups all-purpose flour", kitchenState.Ingredient(contains=["flour"], homogenous=True, amount="3 cups",
physicalObject=PhysicalObject(kitchenState.prism_from_point(7, 3, 1, 2), lcmId=2, tags=['flour']))),
("1 1/3 cups white sugar", kitchenState.Ingredient(contains=["sugar"], homogenous=True, amount="1 1/3 cups",
physicalObject=PhysicalObject(kitchenState.prism_from_point(5, 1, 1, 2), lcmId=3, tags=['sugar']))),
("2 tablespoons baking powder", kitchenState.Ingredient(contains=["baking_powder"], homogenous=True, amount="2 tablespoons",
physicalObject=PhysicalObject(kitchenState.prism_from_point(5, 3, 1, 2), lcmId=4, tags=['bakingpowder']))),
("1 teaspoon salt", kitchenState.Ingredient(contains=["salt"], homogenous=True, amount="1 teaspoon",
physicalObject=PhysicalObject(kitchenState.prism_from_point(5, 3, 1, 2), lcmId=5, tags=['salt']))),
("2/3 cup vegetable oil", kitchenState.Ingredient(contains=["oil"], homogenous=True, amount="2/3 cup",
physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=6, tags=['vegetableoil']))),
("1/3 cup melted butter", kitchenState.Ingredient(contains=["butter"], homogenous=True, amount="1/3 cup",
physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=7, tags=['butter']))),
("2 tablespoons honey", kitchenState.Ingredient(contains=["honey"], homogenous=True, amount="2 tablespoons",
physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 3, 1, 2), lcmId=8, tags=['honey']))),
("4 eggs, beaten", kitchenState.Ingredient(contains=["eggs"], homogenous=True, amount="4",
physicalObject=PhysicalObject(kitchenState.prism_from_point(7, 1, 1, 2), lcmId=9, tags=['eggs']))),
("2 1/2 cups whole milk", kitchenState.Ingredient(contains=["milk"], homogenous=True, amount="2 1/2 cups",
physicalObject=PhysicalObject(kitchenState.prism_from_point(5, 3, 1, 2), lcmId=10, tags=['milk'])))]
instructionsList = [("1. Preheat oven to 350 degrees F (175 degrees C), and grease a 9x13 inch baking dish.", "preheat(350), grease()"),
("2. Stir together the cornmeal, flour, sugar, baking powder, and salt in a mixing bowl.", "pour(cornmeal), pour(sugar), pour(baking_powder), pour(salt), mix()"),
("Pour in the vegetable oil, melted butter, honey, beaten eggs, and milk, and stir just to moisten.", "pour(oil), pour(butter), pour(honey), pour(eggs), pour(milk), mix()"),
("3. Pour the batter into the prepared baking dish and bake in the preheated oven for 45 minutes, until the top of the cornbread stats to brown and show cracks.", "scrape(), bake(45)")]
annotatedRecipeObject = annotatedRecipe.AnnotatedRecipe(recipeName, recipeSource, ingredientsList, instructionsList)
| true |
0417c29439fa07538962b1a16f44f5d34f19bff0 | Python | aluong91/cse103 | /pa2/Binomial | UTF-8 | 2,631 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python
import argparse
import random
from operator import mul
from math import factorial
from random import random
def choose(n, k):
return factorial(n) // (factorial(k) * factorial(n - k))
def exact(n, k, p):
return choose(n, k) * p**k * (1-p)**(n-k)
def sample(n, p, m):
for seq in range(m):
yield [int(random() < p) for flip in range(n)]
parser = argparse.ArgumentParser(description='')
parser.add_argument('-t','--type', help='exact, sample, chebyshev', type=str)
parser.add_argument('-n','--length', help='length of sequence', type=int)
parser.add_argument('-p','--p_heads',
help='The probability one flip of the coin turns up heads', type=float)
parser.add_argument('-m','--number_sequences', help='The number of flips of the coin', type=int)
parser.add_argument('-l','--lower', help='The lower end of the range whose probability is to be calculated', type=int)
parser.add_argument('-u','--upper', help='The upper end of the range whose probability is to be calculated', type=int)
args = vars(parser.parse_args())
# You can get the command line argument by indexing into the args dict.
# Eg to get -p, args['p_heads']. Or -m, args['number_sequences']
if args['type'] == 'exact':
# TODO Compute the sum of the binomial terms here, and save to variable
# binom_sum
binom_sum = 0
for i in range(args['lower'], args['upper'] + 1):
binom_sum += exact(args['length'], i, args['p_heads'])
"""
print args['lower'], args['upper']
print i
print binom_sum
"""
print binom_sum
elif args['type'] == 'sample':
# TODO Take args['number_sequences'] samples, to approximate the probability
# the number of heads lands in the range
# args['lower'] <= #heads <= args['upper']
# Each sample will give the number of biased coin flips out of args['length']
# that come up heads
# and save to variable binom_sum
# binom_sum
n, p, m = int(args['length']), float(args['p_heads']), args['number_sequences']
l, u = args['lower'], args['upper']
"""
n, p, m = 10, .2, 10.0
l, u = .1, .3
"""
count = 0
for seq in sample(n, p, m):
heads = seq.count(1)
if heads >= l and heads <= u:
count += 1
binom_sum = count / float(m)
print binom_sum
elif args['type'] == 'chebyshev':
# Compute a ***lower-bound*** on the probability #heads is between
# lower and upper, using chebyshev
n, p = float(args['length']), float(args['p_heads'])
l, u = float(args['lower']), float(args['upper'])
mean = n * p
if l <= mean and mean <= u:
print 1.0
else:
if mean < l:
epsilon = l - mean
else:
epsilon = mean - u
variance = n * p * (1-p)
print variance / (epsilon ** 2)
| true |
c043db57a5c8e24ae7a4a7fcfd0ba358e4eee2c4 | Python | rabintiwari45/Essay_Grader | /essay_grader_feature_extraction.py | UTF-8 | 2,531 | 2.65625 | 3 | [] | no_license |
import nltk
import spacy
import language_check
import essay_grader_baseline
import pandas as pd
import numpy as np
from textblob import Word
from textblob import TextBlob
from collection import Counter
from spellchecker import Spellchecker
nltk.download('punkt')
import warnings
warnings.filterwarnings("ignore")
PATH = '/content/Essay_Grader/data/training_set_rel3.xls'
def main():
df = essay_grader_baseline.load_data(PATH)
data = df[df['essay_set']==8]
data['essay_length'] = np.nan
data['sentence_length'] = np.nan
data['unique_word'] = np.nan
data['sentence'] = np.nan
data['average_word_length'] = np.nan
data['sentiment'] = np.nan
data['noun'] = np.nan
data['verb'] = np.nan
data['adjective'] = np.nan
data['adverb'] = np.nan
data['punctuatuion']
data['spelling_error'] = np.nan
data['grammar_error'] = np.nan
for i in range(0,data.shape[0]):
blob = TextBlob(str(data['essay'][i]))
spell = SpellChecker()
nlp = spacy.load('en')
docx = nlp(data['essay'][i])
sentence_len = [len(sentence.split(' ')) for sentence in blob.sentences]
word_len = [len(word) for word in blob.words]
nouns = [ token.text for token in docx if token.is_stop != True and token.is_punct !=True and token.pos_ == 'NOUN']
verb = [ token.text for token in docx if token.is_stop != True and token.is_punct !=True and token.pos_ == 'VERB']
adj = [ token.text for token in docx if token.is_stop != True and token.is_punct !=True and token.pos_ == 'ADJ']
adv = [ token.text for token in docx if token.is_stop != True and token.is_punct !=True and token.pos_ == 'ADV']
punc = [ token.text for token in docx if token.is_punct == True ]
error = spell.unknown(blob2.words)
tool = language_check.LanguageTool('en-US')
g_error = tool.check(str(data['essay'][i]))
data.at[i,'essay_length'] = len(blob.words)
data.at[i,'sentence_length'] = sum(sentence_len)/len(sentence_len)
data.at[i,'unique_word'] = len(set(blob.words))
data.at[i,'sentence'] = len(blob.sentences)
data.at[i,'average_word_length'] = sum(word_len) / len(word_len)
data.at[i,'sentiment'] = blob.sentiment.polarity
data.at[i,'noun'] = len(nouns)
data.at[i,'verb'] = len(verb)
data.at[i,'adjective'] = len(adj)
data.at[i,'adverb'] = len(adv)
data.at[i,'punctuatuion'] = len(punc)
data.at[i,'spelling_error'] = len(error)
data.at[i,'grammar_error'] = len(g_error)
data.to_csv("essay_grader_features.csv",index=False)
if __name__ == "__main__":
main()
| true |
09b07fe0c15326eec9005f52f3e7c1b5cd2628af | Python | Steps-devmyself/shiyanlouSratches | /MoreAboutPy/e-2-2.py | UTF-8 | 8,654 | 3.046875 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
#一般来说,X轴式分类数据,Y轴是数值型数据
path=r"E:\baidudownloads\FORAI\Python-7\(贪心学院)Python基础+机器学习\机器学习集训营资料\第三周\自行车数据实例"
daydf=pd.read_csv(path+r'\day.csv',parse_dates=[1],engine='python')
hourdf=pd.read_csv(path+r'\hour.csv',parse_dates=[1],engine='python')
droplist=['instant', 'season', 'yr', 'mnth', 'holiday', 'workingday', 'weathersit', 'atemp', 'hum']
daydf.drop(labels=droplist,axis=1,inplace=True)
print(daydf.head())
#print(daydf.info(),'\n',hourdf.info())
# TODO 画图参数配置
import matplotlib
matplotlib.rc('figure',figsize=(14,7))
matplotlib.rc('font',size=14)
matplotlib.rc('axes.spines',top=False,right=False)
matplotlib.rc('axes',grid=False)
matplotlib.rc('axes',facecolor='white')
plt.hist(daydf['windspeed'].values,bins=40)#单维度分析
# TODO 多维度分析,散点图,离散值与离散值之间
from matplotlib import font_manager
fonp=font_manager.FontProperties()
fonp.set_family('SimHei')
fonp.set_size(14)
# TODO 画图变量的细节设置
def scatterplot(x_data,y_data,x_label,y_label,title):
fig,ax=plt.subplots()
ax.scatter(x_data,y_data,s=10,color='#539caf',alpha=0.75)
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
scatterplot(x_data = daydf['temp'].values
, y_data = daydf['cnt'].values
, x_label = 'Normalized temperature (C)'
, y_label = 'Check outs'
, title = 'Number of Check Outs vs Temperature')
# TODO 拟合线性回归
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import summary_table#获得信息汇总
x=sm.add_constant(daydf['temp'])#temp作为自变量,并添加常数项
y=daydf['cnt']
regr=sm.OLS(y,x)#ordinary least square model
res=regr.fit()# fit之后获得一个对象,用summary_table去调用这个对象
st, data, ss2 = summary_table(res, alpha=0.05) # 置信水平alpha=5%,st数据汇总,data数据详情,ss2数据列名
fitted_values = data[:,2]
# TODO 画出拟合后的曲线
def lmlineplot(x_data,y_data,x_label,y_label,title):
fig,ax=plt.subplots()
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.plot(x_data,y_data,lw=2,alpha=1)#lw=linewidth,alpha=transparancy
lmlineplot(daydf['temp'],fitted_values,
x_label='Normalized temperature (C)'
, y_label='Check outs'
, title='Number of Check Outs vs Temperature(lm)'
)
# TODO 带置信区间的拟合结果
# 获得5%置信区间的上下界,得预先知道,fit后data里的结构
predict_mean_ci_low, predict_mean_ci_upp = data[:,4:6].T
# 创建置信区间DataFrame,上下界
CI_df = pd.DataFrame(columns = ['x_data', 'low_CI', 'upper_CI'])
CI_df['x_data'] = daydf['temp']
CI_df['low_CI'] = predict_mean_ci_low
CI_df['upper_CI'] = predict_mean_ci_upp
CI_df.sort_values('x_data', inplace = True) # 根据x_data进行排序,因为回归的时候是排过序的,要保持一致
# 绘制
def lineplotCI(x_data, y_data, sorted_x, low_CI, upper_CI, x_label, y_label, title):
# 创建绘图对象
fig, ax = plt.subplots()
# 绘制预测曲线
ax.plot(x_data, y_data, lw = 1, color = '#539caf', alpha = 1, label = 'Fit')
ax.fill_between(sorted_x,low_CI,upper_CI,color = '#539caf', alpha = 0.5, label = '95%CI')
ax.set_title(title)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend(loc='best')
lineplotCI(daydf['temp'],fitted_values,CI_df['x_data'],CI_df['low_CI'],CI_df['upper_CI'],
x_label='Normalized temperature (C)'
, y_label='Check outs'
, title='Number of Check Outs vs Temperature(lm with CI)')
# TODO 双坐标曲线
#•曲线拟合不满足置信阈值时,考虑增加独立变量
#•分析不同尺度多变量的关系
#share x axis
def lineplot2y(x_data, x_label, y1_data, y1_color, y1_label, y2_data, y2_color, y2_label, title):
fig,ax=plt.subplots()
ax.plot(x_data,y1_data,color=y1_color,label=y1_label)
ax.set_ylabel(y1_label,color=y1_color)
ax.set_title(title)
ax.legend(loc='upper left')
ax1=ax.twinx()
ax1.plot(x_data,y2_data,color=y2_color,label=y2_label)
ax1.set_ylabel(y2_label,color=y2_color)
ax1.spines['right'].set_visible(True)
ax1.legend(loc='upper right')
lineplot2y(x_data = daydf['dteday']
, x_label = 'Day'
, y1_data = daydf['cnt']
, y1_color = '#539caf'
, y1_label = 'Check outs'
, y2_data = daydf['windspeed']
, y2_color = '#7663b0'
, y2_label = 'Normalized windspeed'
, title = 'Check Outs and Windspeed Over Time')
#TODO 组间分析,定量比较,分组粒度,组间聚类
#groupby,很多函数都有,看自己怎么组合在一起了
cnt_day=daydf[['weekday','cnt']].groupby('weekday').agg(['mean','std'])#必须droplevel,不然取不到值
cnt_day.columns=cnt_day.columns.droplevel()
def barplot(x_data, y_data, error_data, x_label, y_label, title):
fig,ax=plt.subplots()
ax.bar(x_data,y_data,color='#539caf',align='center')
ax.errorbar(x_data, y_data, yerr=error_data, color='#297083',ls='None',lw=5)# ls='none'去掉bar之间的连线
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ax.set_title(title)
barplot(x_data = cnt_day.index.values
, y_data = cnt_day['mean']
, error_data = cnt_day['std']
, x_label = 'Day of week'
, y_label = 'Check outs'
, title = 'Total Check Outs By Day of Week (0 = Sunday)')
#TODO 堆积柱状图,分类少的话,比饼图好看
mean_by_reg_co_day=daydf[['weekday','registered','casual']].groupby('weekday').mean()
# 分天统计注册和偶然使用的占比,这里归一化处理
mean_by_reg_co_day['total'] = mean_by_reg_co_day['registered'] + mean_by_reg_co_day['casual']
mean_by_reg_co_day['reg_prop'] = mean_by_reg_co_day['registered'] / mean_by_reg_co_day['total']
mean_by_reg_co_day['casual_prop'] = mean_by_reg_co_day['casual'] / mean_by_reg_co_day['total']
def stackedbarplot(x_data, y_data_list, y_data_names, colors, x_label, y_label, title):
_, ax = plt.subplots()
# 循环绘制堆积柱状图
for i in range(0, len(y_data_list)):
if i == 0:
ax.bar(x_data, y_data_list[i], color = colors[i]
,align = 'center', label = y_data_names[i])
else:
# 采用堆积的方式,除了第一个分类,后面的分类都从前一个分类的柱状图接着画
# 用归一化保证最终累积结果为1
ax.bar(x_data, y_data_list[i], color = colors[i],
bottom = y_data_list[i - 1], align = 'center', label = y_data_names[i])
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ax.set_title(title)
ax.legend(loc = 'upper right') # 设定图例位置
stackedbarplot(x_data = mean_by_reg_co_day.index.values
, y_data_list = [mean_by_reg_co_day['reg_prop'], mean_by_reg_co_day['casual_prop']]
, y_data_names = ['Registered', 'Casual']
, colors = ['#539caf', '#7663b0']
, x_label = 'Day of week'
, y_label = 'Proportion of check outs'
, title = 'Check Outs By Registration Status and Day of Week (0 = Sunday)')
#TODO 分组柱状图
#•多级类间绝对数值比较
def groupedbarplot(x_data, y_data_list, y_data_names, colors, x_label, y_label, title):
_, ax = plt.subplots()
# 设置每一组柱状图的宽度
total_width = 0.8
# 设置每一个柱状图的宽度
ind_width = total_width / len(y_data_list)
# 计算每一个柱状图的中心偏移
alteration = np.arange(-total_width/2+ind_width/2, total_width/2+ind_width/2, ind_width)
# 分别绘制每一个柱状图
for i in range(0, len(y_data_list)):
# 横向散开绘制
ax.bar(x_data + alteration[i], y_data_list[i], color = colors[i], label = y_data_names[i], width = ind_width)
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
ax.set_title(title)
ax.legend(loc = 'upper right')
# 调用绘图函数
groupedbarplot(x_data = mean_by_reg_co_day.index.values
, y_data_list = [mean_by_reg_co_day['registered'], mean_by_reg_co_day['casual']]
, y_data_names = ['Registered', 'Casual']
, colors = ['#539caf', '#7663b0']
, x_label = 'Day of week'
, y_label = 'Check outs'
, title = 'Check Outs By Registration Status and Day of Week (0 = Sunday)')
plt.show() | true |
9c1f9327a542f40673828b7eee122f9ec4992d16 | Python | fecgov/regulations-parser | /tests/tree_xml_parser_simple_hierarchy_processor_tests.py | UTF-8 | 2,599 | 3.046875 | 3 | [
"CC0-1.0"
] | permissive | from unittest import TestCase
from regparser.test_utils.node_accessor import NodeAccessor
from regparser.test_utils.xml_builder import XMLBuilder
from regparser.tree.xml_parser.simple_hierarchy_processor import \
SimpleHierarchyMatcher
class SimpleHierarchyTests(TestCase):
def test_deep_hierarchy(self):
"""Run through a full example, converting an XML node into an
appropriate tree of nodes"""
with XMLBuilder("ROOT") as ctx:
ctx.P("(a) AAA")
ctx.P("(b) BBB")
ctx.P("i. BIBIBI")
ctx.P("ii. BIIBIIBII")
ctx.P("(1) BII1BII1BII1")
ctx.P("(2) BII2BII2BII2")
ctx.P("iii. BIIIBIIIBIII")
ctx.P("(c) CCC")
matcher = SimpleHierarchyMatcher(['ROOT'], 'some_type')
nodes = matcher.derive_nodes(ctx.xml)
self.assertEqual(1, len(nodes))
node = NodeAccessor(nodes[0])
self.assertEqual('some_type', node.node_type)
self.assertEqual(['a', 'b', 'c'], node.child_labels)
self.assertNotEqual('some_type', node['a'].node_type)
self.assertEqual(node['a'].text, '(a) AAA')
self.assertEqual([], node['a'].child_labels)
self.assertEqual(node['c'].text, '(c) CCC')
self.assertEqual([], node['c'].child_labels)
self.assertEqual(node['b'].text, '(b) BBB')
self.assertEqual(['i', 'ii', 'iii'], node['b'].child_labels)
self.assertEqual(node['b']['i'].text, 'i. BIBIBI')
self.assertEqual([], node['b']['i'].child_labels)
self.assertEqual(node['b']['iii'].text, 'iii. BIIIBIIIBIII')
self.assertEqual([], node['b']['iii'].child_labels)
self.assertEqual(node['b']['ii'].text, 'ii. BIIBIIBII')
self.assertEqual(['1', '2'], node['b']['ii'].child_labels)
self.assertEqual(node['b']['ii']['1'].text, '(1) BII1BII1BII1')
self.assertEqual([], node['b']['ii']['1'].child_labels)
self.assertEqual(node['b']['ii']['2'].text, '(2) BII2BII2BII2')
self.assertEqual([], node['b']['ii']['2'].child_labels)
def test_no_children(self):
"""Elements with only one, markerless paragraph should not have
children"""
with XMLBuilder("NOTE") as ctx:
ctx.P("Some text here")
matcher = SimpleHierarchyMatcher(['NOTE'], 'note')
nodes = matcher.derive_nodes(ctx.xml)
self.assertEqual(1, len(nodes))
node = nodes[0]
self.assertEqual('note', node.node_type)
self.assertEqual('Some text here', node.text)
self.assertEqual([], node.children)
| true |
ceba6b82bbbbcc0ffb77a034a4087aeea1ac1e43 | Python | drrid/ArchitectureDict | /arch_dict.py | UTF-8 | 1,406 | 2.828125 | 3 | [] | no_license | from requests import get
from bs4 import BeautifulSoup
from docx import Document
doc = Document()
DICT_BASE_URL = "http://www.linternaute.com"
all_terms = []
def get_terms(pageNumber):
url = "{}/dictionnaire/fr/theme/architecture/{}/".format(DICT_BASE_URL, pageNumber)
base_url = get(url)
soup = BeautifulSoup(base_url.content, "html.parser")
term_grid = soup.find("ul", class_="dico_liste grid_line")
terms = term_grid.find_all("li")
for term in terms:
all_terms.append(DICT_BASE_URL + term.a.get("href"))
def get_term(term):
n = 0
base_url = get(term)
soup = BeautifulSoup(base_url.content, "html.parser")
for l, el in enumerate(soup.find_all("div", class_="grid_left")):
if el.div and ('Architecture' in el.div.get_text()):
n = l
definition = soup.find_all("div", class_="grid_last")[n].get_text().strip()
definition = ' '.join(definition.split())
title = ' '.join(soup.find("span", class_="dico_title_2").get_text().split())
return [title, definition]
if __name__ == '__main__':
for i in ["1", "2", "3"]:
get_terms(i)
for index, t in enumerate(all_terms):
term_list = get_term(t)
title = term_list[0].split(',')[0]
catg = term_list[0].split(',')[1].strip()
content = term_list[1]
doc.add_heading(title + ' :', level=1)
doc.add_heading(catg, level=3)
doc.add_paragraph(term_list[1])
print str(index) + ' . ' + title
doc.save('dict_test.docx')
| true |
a5e27837fded8e826ab5af36d455a5bab3466944 | Python | daniel-reich/ubiquitous-fiesta | /GPibesdkGf433vHBX_5.py | UTF-8 | 430 | 3.046875 | 3 | [] | no_license |
def goldbach_conjecture(n):
if n > 2 and n % 2 == 1:
return []
if n <= 2:
return []
p = []
for i in range(2,n):
isPrime = True
for j in range(2,i):
if i % j == 0:
isPrime = False
break
if isPrime:
p.append(i)
for i in range(len(p)-1,-1,-1):
if n - p[i] in p:
return [n-p[i],p[i]]
| true |
1f7b281ee1d7ad0fd87e9e0bda440fec0db61924 | Python | VetoFTW/CSCI127 | /program_37.py | UTF-8 | 521 | 3.34375 | 3 | [] | no_license | # Name: Martin Czarnecki
# Email: martin.czarnecki99@myhunter.cuny.edu
# Date: October 9, 2021
from os import sep
import pandas as pd
userInputName = input("Enter file name: ")
vgSalesCSV = pd.read_csv(userInputName)
print("There are", vgSalesCSV.count().get("Rank"), "total games")
print("The number of game in each genre are", vgSalesCSV.groupby("Genre")["Name"].count().sort_values(ascending=False), sep="\n")
print("The top 3 game publishers are", vgSalesCSV.groupby("Publisher")["Name"].count().sort_values(ascending=False)[:3], sep="\n")
| true |
a807db1e50131a54f304cc914104e0d433667b6c | Python | alopolska2018/image_search_products | /lib/func_txy.py | UTF-8 | 2,256 | 2.5625 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
import time
import random
import string
import requests
import contextlib
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
def get_random_only_letter(k):
return ''.join(random.choices(string.ascii_letters, k=k))
def get_random_str(k):
return ''.join(random.choices(string.ascii_letters + string.digits, k=k))
def request_get(url, params=None, headers={}, allow_redirects=True):
try:
with contextlib.closing(
requests.get(url, params=params, headers=headers, timeout=30, allow_redirects=allow_redirects)) as req:
data = req.json()
# print("[{}]\nurl:{}\nparams:{}\nresult:{}".format(now(), url, json.dumps(params, ensure_ascii=False),
# json.dumps(data, ensure_ascii=False)))
return "succ", data
except Exception as e:
print(e)
return "fail", {}
def request_get_content(url, params=None, headers={}, allow_redirects=True):
try:
with contextlib.closing(
requests.get(url, params=params, headers=headers, timeout=30, allow_redirects=allow_redirects)) as req:
data = req.content
return "succ", data
except Exception as e:
print(e)
return "fail", ""
def request_get_text(url, params=None, headers={}, allow_redirects=True):
try:
with contextlib.closing(
requests.get(url, params=params, headers=headers, timeout=30, allow_redirects=allow_redirects)) as req:
data = req.text
return "succ", data
except Exception as e:
print(e)
return "fail", ""
def request_post(url, data=None, files=None, headers={}):
try:
with contextlib.closing(requests.post(url, data=data, files=files, headers=headers, timeout=30)) as req:
data = req.text
return "succ", data
except Exception as e:
print(e)
return "fail", {}
def filter_map(params):
for k in list(params.keys()):
if not params.get(k, ''):
params.pop(k)
def now():
'''
获取当前时间
:return: str 2019-10-13 12:00:00
'''
return time.strftime("%Y-%m-%d %X")
| true |
dd81122bff5dda0f0725b497e8345ca4b332e02e | Python | troyjmiller/bin | /quiz | UTF-8 | 621 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python3
import random
import colors as c
import quiz_questions as q
import questions_troy as t
score = 0
print(c.pink + '''
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
Welcome to the game of quiz!!!
~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
''' +c.reset)
print("Would you like Geography or Troy?")
questions = [ q.q1, q.q2, q.q3, q.q4 ]
while questions:
q.question = random.choice(questions)
points = q.question()
if points:
questions.remove(question)
score += points
else:
print("Disheartening, please try again")
print('Well done. Your score is: ' + c.green + str(score) + c.reset)
| true |
087cd98b4a39c67b4c71e33971bd3936b85e245a | Python | redice44/Traffic-Simulator-2.0 | /Code/server/algo/graph/util.py | UTF-8 | 11,469 | 3.046875 | 3 | [] | no_license | # Separation of utily functions previously known as the class GraphUtils.
from math import ceil, log2
from typing import Any, Tuple, List, Dict, FrozenSet, NamedTuple, Set
import networkx as nx
import numpy as np
class NonPowerOf2Graph(Exception):
pass
# Types / Structs =============================================================
HDS_Element = FrozenSet[FrozenSet]
HDS = List[HDS_Element]
HDT_Node = NamedTuple("HDT_Node", [('set', FrozenSet), ('level', int)])
# =============================================================================
def create_in_out_mat(M, new_weight=np.float64(1)):
"""Every node in the represented graph G is broken up into two nodes.
One of those two nodes represents an "in" node and the other an "out"
node. The "in" node retains all incoming edges from the original node,
with one outgoing edge into the new "out" node. The "out" node retains
all outgoing edges from the original node.
The directed edge from "in" to "out" node is given a new weight
(random or specified), while all original edges retain their original
weight.
"""
if M.shape[0] != M.shape[1]:
raise NonSquareMatrix
i, j = M.shape[0], M.shape[1]
double_i, double_j = i*2, j*2
new_matrix = np.zeros((double_i, double_j), dtype=np.float64)
# Copying old matrix M into bottom left of the new matrix
new_matrix[i:double_i, 0:j] = M
# Diagonal of top right square corresponds to the edge weight from
# in to out vertices
np.fill_diagonal(
new_matrix[0:double_i, j:double_j],
new_weight
)
return new_matrix
def create_pow2_diameter_mat(np_mat):
"""Out of an adjacency matrix which denotes some graph G = (V, E, w)
create an adjacency matrix which denotes some graph G' = (V, E, w_c)
with diameter equal to some power of 2. Also, each edge will have
weight >= 1.
See Lemma 3.1 for more details.
NOTE:
The given adjacency matrix MUST denote a STRONGLY connected graph.
"""
# Negative weights delimit a non-existent edge between two nodes, which
# is equivalent to edge weight of infinity. 0.0 will be used to
# represent a non-existent edge as this is what networkx seems to
# expect in order to not have an edge between the two nodes.
# print("np_mat")
# print(np_mat)
# print(np_mat.max())
# print(np_mat.mean())
np_mat[np_mat < 0.0] = 0.0
G_min_edge = np_mat[np_mat > 0.0].min()
G = nx.DiGraph(np_mat)
G_diam = graph_diameter(G)
# No need to do any transformations
if G_min_edge >= 1.0 and \
G_diam.is_integer() and \
GraphUtils.check_num_pow2(int(G_diam)):
return np_mat
epsilon = np.float(0.01)
mult_const = np.float((1 + epsilon) / G_min_edge)
# @TODO: check to see if there's a faster way of doing this
vec_func = np.vectorize(lambda x: mult_const * x, otypes=[np.float])
np_mat = vec_func(np_mat)
G_p = nx.DiGraph(np_mat)
# print("G_p")
# print(G_p)
G_p_diam = graph_diameter(G_p)
# print("diam")
# print(G_p_diam)
mult_const = ((2 ** ceil(log2(G_p_diam))) / G_p_diam)
# @TODO: check to see if there's a faster way of doing this
vec_func = np.vectorize(lambda x: mult_const * x, otypes=[np.float])
np_mat = vec_func(np_mat)
return np_mat
def graph_diameter(G):
"""Compute the diameter of a given graph.
NOTE:
Given graph MUST be STRONGLY connected.
"""
# @TODO: choose the better algorithm depending on the density of
# the graph
return nx.floyd_warshall_numpy(G).max()
def check_num_pow2(num):
num_type = type(num)
if num_type is not int:
# if (isinstance(num_type, float) or
# isinstance(num_type, np.float) or
# isinstance(num_type, np.float32) or
# isinstance(num_type, np.float64)) and not num.is_integer():
raise TypeError(
"{} is not an integer. Type: {}".format(num, type(num))
)
# num = int(num)
return num != 0 and num & (num - 1) == 0
def filter_out_above(original_dict, equal_or_above):
"""Remove all keys as well as all nodes that are equal or above to
equal_or_above.
"""
filtered_dict = {}
for k1, v1 in original_dict.items():
if k1 >= equal_or_above:
continue
filtered_dict[k1] = {}
for k2, v2 in v1.items():
if k2 >= equal_or_above:
continue
filtered_dict[k1][k2] = [node for node in v2 if node < equal_or_above]
return filtered_dict
def top_down_integral_scheme_generation(G, const=27) -> Dict[int, Dict[int, List[int]]]:
if not check_num_pow2(G.diam):
raise NonPowerOf2Graph("{}".format(G.diam))
V = set(G.nodes())
num_iterations = const * int(log2(len(V)))
HDST_list = [None] * num_iterations
for i in range(num_iterations):
hds = randomized_HDS_gen(G)
hdt = HDS_to_HDT(hds)
HDST_list[i] = (hds, hdt)
alpha = min((1 / log2(len(V))), 1/8)
alpha_padded = {}
for node in V:
alpha_padded[node] = [False] * num_iterations
for i, (hds, _) in enumerate(HDST_list):
alpha_padded[node][i] = check_alpha_padded(G, hds, alpha, node)
scheme = {} # type: Dict[int, Dict[int, List[int]]]
for s in V:
s_int = int(s)
scheme[s_int] = {}
for t in V - {s}:
t_int = int(t)
tree = None
for i in range(num_iterations):
if alpha_padded[s][i] and alpha_padded[t][i]:
tree = HDST_list[i][1]
break
# if tree is None:
# import test
# for (hds, _) in HDST_list:
# test.Test.verify_valid_hds(G, hds)
# print("all good")
# s and t are not simultaenously alpha-padded in any of the
# generated HDS's
assert(tree is not None)
s_node = HDT_Node(frozenset([s]), 0)
t_node = HDT_Node(frozenset([t]), 0)
path = GraphUtils.HDT_leaf_to_leaf_path(tree, s_node, t_node)
scheme[s_int][t_int] = [int(v) for v in GraphUtils.projection(G, path)]
for v in V:
v_int = int(v)
scheme[v_int][v_int] = [int(v)]
return scheme
def randomized_HDS_gen(G, pi=None, U=None) -> HDS:
"""Generate a HDS based on given or randomly generated paramters.
Using Algorithm 3.1 (Fakcharoenphol's Algorithm).
"""
class Vertex(object):
# Trying to reduce object size by using __slots__
__slots__ = ['rep', 'cluster', 'flag']
def __init__(self, rep, cluster, flag):
self.rep = rep
self.cluster = cluster
self.flag = flag
num_vertices = len(G.nodes())
V = frozenset(np.arange(num_vertices))
if not pi:
pi = np.random.permutation(num_vertices)
# print("Random permutation: {}".format(pi))
if not U:
U = np.random.uniform(.5, 1)
# print("Random num: {}".format(U))
h = int(log2(G.diam))
H = [None] * (h + 1) # type: List[FrozenSet[FrozenSet]]
H[h] = frozenset([V])
vertex_dict = {}
for v in V:
vertex_dict[v] = Vertex(None, None, True)
for i in reversed(range(h)):
H_i = set()
# TODO: is there an issue in the book? Is it actually supposed to
# be 2**(i-1)?
r = U * (2**(i))
memoized_nbhds = {} # type: Dict[Tuple[int, int], List[int]]
for cluster in H[i+1]:
for v in cluster:
v_ver = vertex_dict[v]
v_ver.cluster = set()
v_ver.flag = True
v_ver.rep = None
if (v, i) not in memoized_nbhds:
v_nbhd = G.r_neighborhood(v, r)
memoized_nbhds[(v, i)] = v_nbhd
else:
v_nbhd = memoized_nbhds[(v, i)]
# Get first vertex in random permutation that is both
# in cluster and in the r_neighborhood of v. Set this
# vertex as the representative of v.
for j in pi:
if j in cluster and j in v_nbhd:
v_ver.rep = j
break
# Could not find a representative. Something is wrong.
assert(v_ver.rep is not None)
for v in cluster:
v_ver = vertex_dict[v]
for u in cluster:
u_ver = vertex_dict[u]
if u_ver.flag and u_ver.rep == v:
u_ver.flag = False
v_ver.cluster.add(u)
for v in cluster:
v_ver = vertex_dict[v]
if v_ver.cluster:
H_i.add(frozenset(v_ver.cluster))
H[i] = frozenset(H_i)
return H
def HDS_to_HDT(hds: HDS):
G = nx.Graph()
for i in reversed(range(len(hds) - 1)):
for C in hds[i]:
node = HDT_Node(C, i)
G.add_node(node)
added_parent = False
for Cp in hds[i+1]:
parent_node = HDT_Node(Cp, i+1)
if C <= Cp:
G.add_edge(node, parent_node)
added_parent = True
break
assert(added_parent is True)
return G
def check_alpha_padded(G, hds: HDS, alpha: float, v: int, debug=False) -> bool:
def _is_subset_of_a_cluster(nbhd, d_partition):
for cluster in d_partition:
if nbhd <= cluster: return True
return False
for i, delta_partition in enumerate(hds):
v_nbhd = G.r_neighborhood(v, alpha * (2**i))
if not _is_subset_of_a_cluster(v_nbhd, delta_partition):
if debug: print(i)
return False
return True
def projection(G, hdt_path: List[HDT_Node]) -> List[int]:
starting_node = hdt_path[0] # type: HDT_Node
prev_representative = np.random.choice(tuple(starting_node.set))
# Setting this up for easy calls to merge function later
projection_path = [prev_representative]
for hdt_node in hdt_path[1:]:
representative = np.random.choice(tuple(hdt_node.set))
projection_path = merge(
projection_path,
G.get_shortest_path(prev_representative, representative)
)
prev_representative = representative
return compress_path(projection_path)
def merge(path1, path2: List[Any]) -> List[Any]:
"""Merge two paths that have overlapping vertices.
path1: [v_1, v_2, ... , v_k]
||
path2: [v_k, v_k+1, ...]
"""
if path1[-1] != path2[0]:
raise NonOverlappingEndVertices
return path1[:-1] + path2
def compress_path(path: List[Any]) -> List[Any]:
"""Remove any cycles from a given path.
"""
previously_seen = set() # type: Set
new_path = [] # type: List
for v in reversed(path):
if v in previously_seen:
popped = new_path.pop()
while popped != v:
previously_seen.remove(popped)
popped = new_path.pop()
new_path.append(popped)
else:
previously_seen.add(v)
new_path.append(v)
return list(reversed(new_path))
def all_pairs_dijkstra_shortest_path_and_length(G):
num_nodes = len(G.nodes())
# all_pairs = 0
# all_sp_len = 0
all_pairs = nx.all_pairs_dijkstra_path(G)
# all_sp_len = [array('f', [0.0] * num_nodes) for _ in range(num_nodes)]
all_sp_len = [[0.0] * num_nodes for _ in range(num_nodes)]
# Takes time
for s in all_pairs:
for t in all_pairs[s]:
length = 0.0
prev = all_pairs[s][t][0]
for v in all_pairs[s][t][1:]:
length += G[prev][v]['weight']
prev = v
all_sp_len[s][t] = length
return all_pairs, all_sp_len
def dict_to_json(d):
pairs_len = len(d)
new_pairs = {}
for i in range(pairs_len):
temp1 = d[i]
new_pairs[str(i)] = {}
if not isinstance(temp1, int):
sub_pair_len = len(temp1)
for j in range(sub_pair_len):
temp2 = d[i][j]
temp3 = []
for x in temp2:
if not isinstance(x, int):
temp3.append(x.item())
# elif not isinstance(x, float):
# temp3.append(x.item())
else:
temp3.append(x)
new_pairs[str(i)][str(j)] = temp3
return new_pairs;
| true |
781613325c4120e4e7d5380cc185c33798921787 | Python | maiorem/Artificial-Intelligence | /Study/keras/keras64_ImageDataGen1.py | UTF-8 | 2,674 | 2.84375 | 3 | [] | no_license | # 남자 여자 구분하기를
# 넘파이 저장
# fit_generator로 코딩
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from sklearn.model_selection import train_test_split
np.random.seed(33)
# 이미지 생성 옵션 정하기
train_datagen = ImageDataGenerator(rescale=1./255,
horizontal_flip=True,
vertical_flip=True,
width_shift_range=0.1,
height_shift_range=0.1,
rotation_range=5,
zoom_range=1.2,
shear_range=0.7,
fill_mode='nearest'
)
test_datagen = ImageDataGenerator(rescale=1./255)
# flow 또는 flow_from_directory
# 실제 데이터가 있는 곳을 알려주고, 이미지를 불러오는 작업
xy=train_datagen.flow_from_directory(
'./data/data2', #실제 이미지가 있는 폴더는 라벨이 됨. (ad/normal=0/1)
target_size=(150,150),
batch_size=200,
class_mode='binary'
#, save_to_dir='./data/data1_2/train' #변환한 파일을 저장
) # x와 y가 이미 갖춰진 데이터셋
np.save('./data/keras64_x.npy', arr=xy[0][0])
np.save('./data/keras64_y.npy', arr=xy[0][1])
# xy_train, xy_test=train_test_split(xy, train_size=0.8)
# # # print(xy_train[0][0].shape) #(10, 300, 300, 3) batch_size=10 # x
# # # print(xy_train[0][1].shape) #(10,) # y
# model=Sequential()
# model.add(Conv2D(10, (2,2), input_shape=(150,150,3)))
# model.add(Conv2D(30, (3,3)))
# model.add(Conv2D(20, (2,2)))
# model.add(MaxPooling2D())
# model.add(Flatten())
# model.add(Dense(200, activation='relu'))
# model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='sparse_categorical_crossentopy', optimizer='adam', metrics=['accuracy'])
# # train 셋에 이미 x와 y가 존재하므로 하나만 써주면 됨
# history=model.fit_generator(
# xy_train,
# steps_per_epoch=17,
# epochs=20,
# validation_data=xy_test,
# validation_steps=4
# )
# acc=history.history['accuracy']
# val_acc=history.history['val_accuracy']
# loss=history.history['loss']
# val_loss=history.history['val_loss']
# plt.plot(acc)
# plt.plot(val_acc)
# plt.plot(loss)
# plt.plot(val_loss)
# plt.title('loss & acc')
# plt.ylabel('loss, acc')
# plt.xlabel('epoch')
# plt.legend(['loss', 'val_loss', 'acc', 'val_acc'])
# plt.show() | true |
272bc2581af5ec189cba70d070b8742d5017d8f1 | Python | gabrielos307/Python2020-2 | /PythonBásico/tupla.py | UTF-8 | 87 | 3.390625 | 3 | [] | no_license | lista1= [1,2,3]
lista2 = ['d','e','f']
for tupla in zip (lista1, lista2):
print(tupla) | true |
59019dea373696623b5a3d6416db7f1a3e30dd8b | Python | chenxu0602/LeetCode | /1410.html-entity-parser.py | UTF-8 | 2,273 | 3.359375 | 3 | [] | no_license | #
# @lc app=leetcode id=1410 lang=python3
#
# [1410] HTML Entity Parser
#
# https://leetcode.com/problems/html-entity-parser/description/
#
# algorithms
# Medium (54.13%)
# Likes: 69
# Dislikes: 184
# Total Accepted: 13.6K
# Total Submissions: 25.1K
# Testcase Example: '"& is an HTML entity but &ambassador; is not."'
#
# HTML entity parser is the parser that takes HTML code as input and replace
# all the entities of the special characters by the characters itself.
#
# The special characters and their entities for HTML are:
#
#
# Quotation Mark: the entity is " and symbol character is ".
# Single Quote Mark: the entity is ' and symbol character is '.
# Ampersand: the entity is & and symbol character is &.
# Greater Than Sign: the entity is > and symbol character is >.
# Less Than Sign: the entity is < and symbol character is <.
# Slash: the entity is ⁄ and symbol character is /.
#
#
# Given the input text string to the HTML parser, you have to implement the
# entity parser.
#
# Return the text after replacing the entities by the special characters.
#
#
# Example 1:
#
#
# Input: text = "& is an HTML entity but &ambassador; is not."
# Output: "& is an HTML entity but &ambassador; is not."
# Explanation: The parser will replace the & entity by &
#
#
# Example 2:
#
#
# Input: text = "and I quote: "...""
# Output: "and I quote: \"...\""
#
#
# Example 3:
#
#
# Input: text = "Stay home! Practice on Leetcode :)"
# Output: "Stay home! Practice on Leetcode :)"
#
#
# Example 4:
#
#
# Input: text = "x > y && x < y is always false"
# Output: "x > y && x < y is always false"
#
#
# Example 5:
#
#
# Input: text = "leetcode.com⁄problemset⁄all"
# Output: "leetcode.com/problemset/all"
#
#
#
# Constraints:
#
#
# 1 <= text.length <= 10^5
# The string may contain any possible characters out of all the 256 ASCII
# characters.
#
#
#
# @lc code=start
import re
class Solution:
def entityParser(self, text: str) -> str:
entities = [('"', '\"'), (''', '\''), ('>', '>'), ('<', '<'), ('⁄', '/'),('&', '&')]
for pat, repl in entities:
text = re.sub(pat, repl, text)
return text
# @lc code=end
| true |
463446371b579a8fb236f3584881ad07f2fd7a23 | Python | svetoslavastoyanova/Python_Advanced | /Multidimensional_list_exercises/04.Matrix_Shuffling.py | UTF-8 | 808 | 2.859375 | 3 | [] | no_license | rows, cols = [int(x) for x in input().split()]
matrix = []
for _ in range(rows):
data = input().split()
matrix.append(data)
line = input()
while line != "END":
command = line.split()[0]
if command == "swap" and len(line.split()) == 5:
row_one = int(line.split()[1])
col_one = int(line.split()[2])
row_two = int(line.split()[3])
col_two = int(line.split()[4])
if 0 <= row_one < rows and 0 <= row_two < rows and 0 <= col_one < cols and 0 <= col_two < cols:
matrix[row_one][col_one], matrix[row_two][col_two] = matrix[row_two][col_two], matrix[row_one][col_one]
for row in matrix:
print(' '.join(row))
else:
print("Invalid input!")
else:
print("Invalid input!")
line = input()
| true |
1eeb61abb1088c9fdde61275e15eedc5aec67a4b | Python | jiangnanhugo/cs590-RL | /hw2/MOSS.py | UTF-8 | 1,729 | 3.09375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
plt.style.use('classic')
plt.grid(None)
from hw2.bernoullibandit import BernoulliBandit
def log_star(x):
maxed = np.max([1,x])
return np.log2(maxed)
# page 122
class Minimax_optimal_Strategy_in_stochastic_case(object):
def __init__(self, m, means):
self.m = m
self.K = len(means)
self.mu_hat = np.zeros(self.K)
self.estimate = np.zeros(self.K)
self.T = np.zeros(self.K)
self.means = means
self.bandit = BernoulliBandit(means=self.means)
def run_Moss(self, n):
for t in range(n):
for i in range(self.K):
if self.T[i] == 0:
self.estimate[i] = 10 ** 10
else:
self.estimate[i] = self.mu_hat[i] + np.sqrt(4*log_star(N/self.K * self.T[i])/self.T[i])
a_t = np.argmax(self.estimate)
reward_t = self.bandit.pull(a_t)
self.mu_hat[a_t] = (self.mu_hat[a_t] * self.T[a_t] + reward_t)/(self.T[a_t] + 1)
self.T[a_t] += 1
if __name__ == '__main__':
K = 5
means = np.random.random(K)
N = 1000
ratio = [(i+1)/100 for i in range(10*10)]
x, y = [], []
for i, ra in enumerate(ratio):
m = int(N * ra)
sumed = 0
for tryout in range(10):
moss_method = Minimax_optimal_Strategy_in_stochastic_case(m, means)
moss_method.run_Moss(N)
sumed+=moss_method.bandit.random_regret()
x.append(ratio[i])
y.append(sumed / 10)
# same plotting code as above!
plt.plot(x, y)
plt.xlabel("the raio of N/m")
plt.ylabel("The regret")
# plt.show()
plt.savefig("moss.pdf")
| true |
afc4f758a5fcdb3bea7f68aad32eae2474b0d014 | Python | shrikantchine/algorithm-practice | /iDeserve/find_missing_number.py | UTF-8 | 270 | 3.40625 | 3 | [] | no_license | def find_missing(arr):
start = 0
end = len(arr)-1
while end > start:
mid = (start+end)//2
if arr[mid] == mid+1:
start = mid+1
else:
end = mid-1
print(start+1)
arr = [1, 2, 3, 4, 6, 7, 8]
find_missing(arr) | true |
73bdb1bb3a7bca0d58a503cdd6e42bb5efafdfe9 | Python | nicky-eng/Escuelita_Gabo_Lato | /Ej_2_6.py | UTF-8 | 332 | 3.609375 | 4 | [] | no_license | from espar import*
"""Programa que imprime todos los números pares entre dos números dados por el ususario"""
print("Ingrese un número entero.")
num1=input()
print("Ingrese un número entero mayor que el anterior")
num2=input()
num1= int(num1)
num2= int(num2)+1
i=num1
for i in range(num1, num2):
espar(i)
i=i+1
| true |
16d6ef724c40a9958e37ee5386ed0f2eda8e98a5 | Python | Shalaka2021/Python-codes | /AddOfFactors.py | UTF-8 | 272 | 3.828125 | 4 | [] | no_license | def AddFact(no):
ans=0;
for i in range(no//2,0,-1):
if no%i==0:
ans=ans+i;
return ans;
def main():
print("Enter a number:");
no=int(input());
ans=AddFact(no);
print("Addition of factorial is ",ans);
if __name__=="__main__":
main(); | true |
6e7a61cc0cf8add14899f400013059fd260495b4 | Python | nmercer/raceall_api | /database/db.py | UTF-8 | 1,203 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
from pymongo import Connection
from config.parser import Config
class Database():
def __init__(self):
config = Config()
self.host = config.parser['database']['host']
self.port = int(config.parser['database']['port'])
self.db_name = config.parser['database']['name']
self.conn = Connection(self.host, self.port)
self.db = self.conn[self.db_name]
def insert(self, collection=None, *args):
if collection:
return self.db[collection].insert(args[0])
def select(self, collection=None, *args):
if collection:
return self.db[collection].find(args[0])
def select_one(self, collection=None, *args):
if collection:
return self.db[collection].find_one(args[0])
def delete(self, collection=None, *args):
if collection:
return self.db[collection].remove(args[0])
def update(self, collection=None, *args):
if collection:
return self.db[collection].update(args[0])
def close(self):
self.conn.disconnect()
#Return database cursor
def connect(self):
return Connection(self.host, self.port)
| true |
81ad9fea258e5cb68be3be0a3e5186970df49168 | Python | wzzhhh1/SmoWeb | /smo/web/exceptions.py | UTF-8 | 595 | 2.703125 | 3 | [] | no_license | # To be relocated to smo.util
class CustomException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ArgumentError(CustomException):
pass
class ArgumentTypeError(CustomException):
pass
class FieldError(CustomException):
pass
class ConvergenceError(CustomException):
pass
class ConnectionError(Exception):
def __init__(self, var1, var2, msg):
self.var1 = var1
self.var2 = var2
self.msg = msg
def __str__(self):
return "Failed connecting {} and {}:\n {}".format(
self.var1.qName, self.var2.qName, self.msg)
| true |
9834346f208f5f630a6447480f8c68c8f612142a | Python | basicskywards/data_structures_algorithms | /Heaps/heap_merge_sorted_arrays.py | UTF-8 | 1,460 | 3.515625 | 4 | [] | no_license | import heapq
import sys
import random
# Time complexity O(nlogk)
# n: total elements
# k: number of sorted arrays/sequences
def merge_sorted_arrays(sorted_arrays):
min_heap = []
# build a list of iterators for each array in sorted arrays
sorted_arrays_iters = [iter(x) for x in sorted_arrays]
#print(sorted_arrays_iters)
# put first element from each iterator in min_heap
for i, it in enumerate(sorted_arrays_iters):
first_element = next(it, None)
if first_element is not None:
heapq.heappush(min_heap, (first_element, i))
#print(min_heap)
result = []
while min_heap:
smallest_entry, smallest_array_i = heapq.heappop(min_heap)
smallest_array_iter = sorted_arrays_iters[smallest_array_i]
result.append(smallest_entry)
next_element = next(smallest_array_iter, None)
if next_element is not None:
heapq.heappush(min_heap, (next_element, smallest_array_i))
return result
def main():
S = [[1, 5, 10], [2, 3, 100], [2, 12, 2**64 - 1]]
print('Inputs: ', S)
print('\nOutputs: ', merge_sorted_arrays(S))
for _ in range(100):
n = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 5)
S = []
for i in range(n):
S.append(
sorted(
random.randint(-9999, 9999)
for j in range(random.randint(1, 10))))
print('\nInputs: ', S)
print('\nOutputs: ', merge_sorted_arrays(S))
if __name__ == '__main__':
main() | true |
a66312da8f532b21bbc633d9b479a025a83ac838 | Python | angon1/Let-s-learn-Python | /pullUps/models.py | UTF-8 | 1,545 | 2.515625 | 3 | [] | no_license | from django.db import models
from pullUps.excercise.models import *
# Create your models here.
class ExcerciseSet(models.Model):
repsCount = models.IntegerField()
usedExcercise = models.ForeignKey(Excercise, on_delete=models.CASCADE, null=True)
breakTimeAfterSet = models.IntegerField()
def getReps(self):
return self.repsCount #przenieść do blocks
def __str__(self):
return 'Used excercise: {} reps: {} break after set: {}'.format(self.usedExcercise, self.repsCount, self.breakTimeAfterSet)
class ExcerciseBlock(models.Model):
breakTimeAfterBlock = models.IntegerField()
usedExcerciseSets = models.ManyToManyField(ExcerciseSet, through='ExcerciseBlockSets', blank=True)
def __str__(self):
return ' break after block: {}\nsets count: {}'.format(self.breakTimeAfterBlock, self.usedExcerciseSets.count())
class ExcerciseBlockSets(models.Model):
blockKey = models.ForeignKey(ExcerciseBlock, on_delete=models.CASCADE, unique=False)
setKey = models.ForeignKey(ExcerciseSet, on_delete=models.CASCADE, unique=False)
def __str__(self):
return 'My id: {} ::: blockKey: {} - setKey {}\n'.format(self.pk, self.blockKey.pk, self.setKey.pk)
class Training(models.Model):
blocks = models.ManyToManyField(ExcerciseBlock, unique=False)
name = models.CharField(max_length=150)
#todo latwy sposob wyswietlenia cwiczen uzytych w treningu
def __str__(self):
return 'Training: {} Blocks count used: {}'.format(self.name, self.blocks.count())
| true |
9262cfdf63b10fd083e49e6c24b3df41adc7a9ba | Python | GustavoLeao2018/aula-22-04 | /exercicio_1.py | UTF-8 | 2,037 | 4.3125 | 4 | [] | no_license |
"""
1) Dada uma matriz mxn, onde cada posição contem um valor entre 0 e 255 (escala de cinzas),
implemente um algoritimo recursivo que receba 2 parâmetros, um ponto (x,y), uma cor (0 a 255),
e um par de valores (a, b).
A partir do ponro inicial,
o algoritimo deve sibistituir todos os pontos adjascentes dentro da faixa
de valores [a .. b], pela cor
ta entre a e b
ve todos na avolta
criterio de parada
recursão
pontos adjascentes
"""
from colorama import init
from termcolor import colored
from random import randint
init()
m = 10
n = 10
matriz = [[] , []]
maior_linha = m
maior_coluna = n
cont1 = 0
cont2 = 0
while cont1 < maior_linha:
while cont2 < maior_coluna:
matriz[0].append(randint(0, 255))
cont2 += 1
matriz[1].append(randint(0, 255))
cont1 += 1
x_y = (randint(0, (maior_linha-1)), randint(0, (maior_coluna-1)))
print(f"Ponto inicial: {x_y}")
cor = randint(0, 255)
print(f"Cor: {cor}")
par_valores = (randint(0, maior_linha), randint(0, maior_coluna))
print(f"A, B: {par_valores}")
def pintar(x_y, cor, par_valores):
for cont_linha, linha in enumerate(matriz[0]):
for cont_coluna, coluna in enumerate(matriz[1]):
if(x_y[0] == cont_linha and x_y[1] == cont_coluna):
print(colored(f"[{cor}]", "grey", "on_white"), end=" ")
if((x_y[0]-1) == cont_linha and x_y[1] == cont_coluna):
print(colored(f"[{cor}]", "grey", "on_white"), end=" ")
if((x_y[0]+1) == cont_linha and x_y[1] == cont_coluna):
print(colored(f"[{cor}]", "grey", "on_white"), end=" ")
if(x_y[0] == cont_linha and (x_y[1]-1) == cont_coluna):
print(colored(f"[{cor}]", "grey", "on_white"), end=" ")
if(x_y[0] == cont_linha and (x_y[1]+1) == cont_coluna):
print(colored(f"[{cor}]", "grey", "on_white"), end=" ")
else:
print(f"[{cont_linha}, {cont_coluna}]", end=" ")
print()
pintar(x_y, cor, par_valores)
| true |
e3e8ed7f26bbaa90452f0f0a660f6c941c5bf63d | Python | s3341458/ctyp | /add.py | UTF-8 | 192 | 2.53125 | 3 | [] | no_license | import ctypes
test_c = ctypes.CDLL('add.o')
test_c.add.argtypes = (ctypes.c_int, ctypes.c_int)
def add(x, y):
result = test_c.add(ctypes.c_int(x), ctypes.c_int(y))
return int(result) | true |
7fb624dc7c831ca552d1487e361472f3fc4c8d72 | Python | amitt001/cs-review | /ds/trees/sorted_arry_balanced_tree.py | UTF-8 | 1,077 | 3.9375 | 4 | [] | no_license | """CCI:
Minimal Tree: Given a sorted (increasing order) array with unique integer elements,
write an algorithm to create a binary search tree with minimal height.
[1, 3, 5, 6, 9, 11, 17, 23, 53]
9
3 17
1 5 11 23
6 53
Note:
One interesting point to note here, this approach can be used for crateing a balanced BST.
"""
class Node:
def __init__(self, key):
self.key = key
self.left = self.right = None
def balanced_tree(sorted_arr, start, end) -> Node:
# Check for empty arr
if end < start:
return
# NOTE: // -> for python 3
mid = (end + start) // 2
node = Node(sorted_arr[mid])
node.left = balanced_tree(sorted_arr, start, mid - 1)
node.right = balanced_tree(sorted_arr, mid + 1, end)
return node
if __name__ == '__main__':
arr = [1, 3, 5, 6, 9, 11, 17, 23, 53]
tree = balanced_tree(arr, 0, len(arr)-1)
assert tree.key == 9
assert tree.left.key == 3
assert tree.right.key == 17
assert tree.left.left.key == 1
| true |
e08cc5f9a17780cbd48d4b80b20573f365cfa507 | Python | yennanliu/CS_basics | /leetcode_python/Hash_table/shortest-completing-word.py | UTF-8 | 4,175 | 3.875 | 4 | [] | no_license | # Time: O(n)
# Space: O(1)
# Find the minimum length word from a given dictionary words,
# which has all the letters from the string licensePlate.
# Such a word is said to complete the given string licensePlate
#
# Here, for letters we ignore case.
# For example, "P" on the licensePlate still matches "p" on the word.
#
# It is guaranteed an answer exists.
# If there are multiple answers, return the one that occurs first in the array.
#
# The license plate might have the same letter occurring multiple times.
# For example, given a licensePlate of "PP",
# the word "pair" does not complete the licensePlate, but the word "supper" does.
#
# Example 1:
# Input: licensePlate = "1s3 PSt", words = ["step", "steps", "stripe", "stepple"]
# Output: "steps"
# Explanation: The smallest length word that contains the letters "S", "P", "S", and "T".
# Note that the answer is not "step", because the letter "s" must occur in the word twice.
# Also note that we ignored case for the purposes of comparing whether a letter exists in the word.
# Example 2:
# Input: licensePlate = "1s3 456", words = ["looks", "pest", "stew", "show"]
# Output: "pest"
# Explanation: There are 3 smallest length words that contains the letters "s".
# We return the one that occurred first.
# Note:
# - licensePlate will be a string with length in range [1, 7].
# - licensePlate will contain digits, spaces, or letters (uppercase or lowercase).
# - words will have a length in the range [10, 1000].
# - Every words[i] will consist of lowercase letters, and have length in range [1, 15].
# V0
import collections
import re
class Solution(object):
def shortestCompletingWord(self, licensePlate, words):
"""
:type licensePlate: str
:type words: List[str]
:rtype: str
"""
clicense = collections.Counter(re.sub('[^a-z]','',licensePlate.lower()))
ans = '#' * 1111
for word in words:
cword = collections.Counter(word)
if all(clicense[k] <= cword[k] for k in clicense) and len(word) < len(ans):
ans = word
return ans
# V1
# http://bookshadow.com/weblog/2017/12/17/leetcode-shortest-completing-word/
import collections
import re
class Solution(object):
def shortestCompletingWord(self, licensePlate, words):
"""
:type licensePlate: str
:type words: List[str]
:rtype: str
"""
clicense = collections.Counter(re.sub('[^a-z]','',licensePlate.lower()))
ans = '#' * 1111
for word in words:
cword = collections.Counter(word)
if all(clicense[k] <= cword[k] for k in clicense) and len(word) < len(ans):
ans = word
return ans
# V1'
# https://www.jiuzhang.com/solution/shortest-completing-word/#tag-highlight-lang-python
import collections
class Solution:
"""
@param licensePlate: a string
@param words: List[str]
@return: return a string
"""
def shortestCompletingWord(self, licensePlate, words):
# write your code here
ans = ""
d = collections.defaultdict(int)
for c in licensePlate:
if c.isalpha():
d[c.lower()] += 1
for w in words:
for k, v in d.items():
if w.count(k) < v:
break
else:
if not ans:
ans = w
elif len(w) < len(ans):
ans = w
return ans
# V2
import collections
class Solution(object):
def shortestCompletingWord(self, licensePlate, words):
"""
:type licensePlate: str
:type words: List[str]
:rtype: str
"""
def contains(counter1, w2):
c2 = collections.Counter(w2.lower())
c2.subtract(counter1)
return all([x >= 0 for x in list(c2.values())])
result = None
counter = collections.Counter(c.lower() for c in licensePlate if c.isalpha())
for word in words:
if (result is None or (len(word) < len(result))) and \
contains(counter, word):
result = word
return result | true |
f40a80bd2f822deffe348e651ce95f260d6cd4c6 | Python | cytusalive/bullet-hell-game | /stages.py | UTF-8 | 973 | 2.921875 | 3 | [] | no_license | import pygame
import random
import math
from enemy import Enemy, Boss
from spritesheet_functions import SpriteSheet
from angle_movement import find_distance, find_angle, calculate_new_xy
class Level:
def __init__(self, gamearea):
self.timer = 0
self.gamearea = gamearea
self.background = False
self.enemies = []
def draw(self):
self.gamearea.blit(self.background, (0, 0))
class Stage_01(Level):
def __init__(self, gamearea, playerbulletlist, enemybulletlist):
super().__init__(gamearea)
self.background = SpriteSheet("stage.png").get_image(0, 0, 520, 680)
self.playerbullets = playerbulletlist
self.enemybullets = enemybulletlist
def update(self):
if cd % 180 == 0:
self.enemies.append(Enemy(random.randint(0, gamearea.get_width()), (random.randint(0, gamearea.get_width()), random.randint(0, 400)), 3, 150, gamearea, enemy_sprite))
self.timer += 1
| true |
bf95187224e99f8aab9b7162802e7ab95cde227b | Python | dickrd/cla_tool | /cla/learn/test/test_word2vec.py | UTF-8 | 755 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | # coding=utf-8
from cla.learn.word2vec import VectorModel
import pytest
@pytest.fixture(scope="module")
def setup_model():
model = VectorModel(source_corpus_path="cla/res/test/words.txt")
return model
def test_vector_model(setup_model):
vector = setup_model.to_vector([u"美国", u"网民", u"纷纷", u"谴责", u"美联航", u"暴力", u"逐客", u"事件"])
similar_documents = setup_model.similar_documents(vector)
print similar_documents
assert similar_documents.__sizeof__() > 0
print vector
assert vector.__sizeof__() > 0
def test_save_vector_model(setup_model):
setup_model.save("cla/res/test/model.bin")
loaded_model = VectorModel("cla/res/test/model.bin")
assert loaded_model.model is not None
| true |
75b9cffe126855dc5fe5e7e638fd734dbeeab215 | Python | StephanBischoff-Digle/adventofcode | /2022/12/map.py | UTF-8 | 1,960 | 3.171875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import numpy as np
import png
def read_path() -> list[(int, int)]:
path = []
with open("path.txt", "r") as f:
for line in f.readlines():
x, y = line.strip().split(" ")
path.append((int(x), int(y)))
return path
def convert(c: chr) -> int:
return (ord(c) - ord('a'))
def color(v: int) -> tuple[int, int, int]:
if v >= 0:
c = min(v * 255//ord('z'), 255)
return (c, c, c)
if v == -14:
return (255, 0, 0)
if v == -28:
return (0, 255, 0)
def write_png(data):
path = read_path()
height = len(data)
width = len(data[0])
img = []
for y in range(height):
row = ()
for x in range(width):
if (x, y) in path:
row = row + (0, 0, 255)
else:
row = row + color(data[y][x])
img.append(row)
with open("map.png", "wb") as f:
w = png.Writer(width, height, greyscale=False)
w.write(f, img)
def contour(data):
height = len(data)
width = len(data[0])
path = read_path()
xs = [x for x, _ in path]
ys = [y for _, y in path]
X, Y = np.meshgrid(range(width), range(height))
locA = plticker.MultipleLocator(base=5)
locB = plticker.MultipleLocator(base=5)
# cmap="autumn_r",
ax = plt.axes()
ax.xaxis.set_major_locator(locA)
ax.yaxis.set_major_locator(locB)
ax.grid(which='major', axis='both', linestyle='-', alpha=0.3)
ax.contour(X, Y, data, (ord('z')-ord('a'))*2,
linewidths=0.5, cmap="gnuplot")
ax.plot(xs, ys, c="black", linewidth=2, linestyle=":")
ax.axes.set_aspect('equal')
plt.show()
def main() -> None:
with open("input.txt", "r") as f:
data = [[convert(c) for c in line.strip()] for line in f.readlines()]
# write_png(data)
contour(data)
if __name__ == "__main__":
main()
| true |
e978b08ff3b145b943337bd3eb2bd54608911df1 | Python | Naysla/Machine_Learning | /3_scikit-learn_/Unsupervised Learning in Python_/23_Clustering Wikipedia part I.py | UTF-8 | 1,132 | 3.96875 | 4 | [] | no_license | #Clustering Wikipedia part I
#You saw in the video that TruncatedSVD is able to perform PCA on sparse arrays in csr_matrix format, such as word-frequency arrays. Combine your knowledge of TruncatedSVD and k-means to cluster some popular pages from Wikipedia. In this exercise, build the pipeline. In the next exercise, you'll apply it to the word-frequency array of some Wikipedia articles.
#
#Create a Pipeline object consisting of a TruncatedSVD followed by KMeans. (This time, we've precomputed the word-frequency matrix for you, so there's no need for a TfidfVectorizer).
#
#The Wikipedia dataset you will be working with was obtained from here.
# Perform the necessary imports
from sklearn.decomposition import TruncatedSVD
from sklearn.cluster import KMeans
from sklearn.pipeline import make_pipeline
# Create a TruncatedSVD instance: svd
svd = TruncatedSVD(n_components=50)
# Create a KMeans instance: kmeans
kmeans = KMeans(n_clusters=6)
# Create a pipeline: pipeline
pipeline = make_pipeline(svd,kmeans)
#Excellent! Now that you have set up your pipeline, you will use it in the next exercise to cluster the articles. | true |
5f17b5c803f2900cac4b641a91b53d2b6ecf9403 | Python | ClausRipp/lazyflow | /tests/unit/testOperatorInterface.py | UTF-8 | 4,279 | 2.515625 | 3 | [] | no_license |
import nose
from lazyflow import graph
from lazyflow import stype
from lazyflow import operators
import numpy
class OpA(graph.Operator):
name = "OpA"
Input1 = graph.InputSlot() # required slot
Input2 = graph.InputSlot(optional = True) # optional slot
Input3 = graph.InputSlot(value = 3) # required slot with default value, i.e. already connected
Output1 = graph.OutputSlot()
Output2 = graph.OutputSlot()
Output3 = graph.OutputSlot()
def __init__(self, parent):
graph.Operator.__init__(self,parent)
self.configured = False
def setupOutputs(self):
self.configured = True
self.Output1.meta.shape = self.Input1.meta.shape
self.Output1.meta.dtype = self.Input1.meta.dtype
self.Output2.meta.shape = self.Input1.meta.shape
self.Output2.meta.dtype = self.Input1.meta.dtype
self.Output3.meta.shape = self.Input1.meta.shape
self.Output3.meta.dtype = self.Input1.meta.dtype
print "OpInternal shape=%r, dtype=%r" % (self.Input1.meta.shape, self.Input1.meta.dtype)
def execute(self, slot, roi, result):
if slot == self.Output1:
result[0] = self.Input1[:].allocate().wait()[0]
elif slot == self.Output2:
result[0] = self.Input2[:].allocate().wait()[0]
elif slot == self.Output3:
result[0] = self.Input3[:].allocate().wait()[0]
return result
class TestOperator_setupOutputs(object):
def setUp(self):
self.g = graph.Graph()
def tearDown(self):
self.g.stopGraph()
def test_disconnected_connected(self):
# check that operator is not configuerd initiallia
# since it has a slot without default value
op = OpA(self.g)
assert op.configured == False
# check that the operator is configued
# after connecting the slot without default value
op.Input1.setValue(1)
assert op.configured == True
op.configured = False
# check that the operatir is reconfigured
# when connecting the slot with default value
# to another value
op.Input3.setValue(2)
assert op.configured == True
def test_default_value(self):
op = OpA(self.g)
op.Input1.setValue(1)
# check that the slot with default value
# returns the correct value
result = op.Output3[:].allocate().wait()[0]
assert result == 3
# check that the slot with default value
# returns the new value when it is connected
# to something else
op.Input3.setValue(2)
result = op.Output3[:].allocate().wait()[0]
assert result == 2
def test_connect_propagate(self):
# check that connecting a required slot to an
# already configured slots notifes the operator
# of connecting
op1 = OpA(self.g)
op1.Input1.setValue(1)
op2 = OpA(self.g)
op2.Input1.connect(op1.Output1)
assert op2.configured == True
def test_deferred_connect_propagate(self):
# check that connecting a required slot to an
# not yet configured slots notifes the operator
# of connecting after configuring the first operator
# in the chain
op1 = OpA(self.g)
op2 = OpA(self.g)
op2.Input1.connect(op1.Output1)
assert op2.configured == False
op1.Input1.setValue(1)
assert op2.configured == True
class TestOperator_meta(object):
def setUp(self):
self.g = graph.Graph()
def tearDown(self):
self.g.stopGraph()
def test_meta_propagate(self):
# check that connecting a required slot to an
# already configured slots notifes the operator
# of connecting and the meta information of
# is correctly passed on between the slots
op1 = OpA(self.g)
op1.Input1.setValue(numpy.ndarray((10,)))
op2 = OpA(self.g)
op2.Input1.connect(op1.Output1)
assert op2.Output1.meta.shape == (10,)
def test_deferred_meta_propagate(self):
# check that connecting a required slot to an
# not yet configured slots notifes the operator
# of connecting after configuring the first operator
# and propagates the meta information correctly
# between the slots
op1 = OpA(self.g)
op2 = OpA(self.g)
op2.Input1.connect(op1.Output1)
op1.Input1.setValue(numpy.ndarray((10,)))
assert op2.Output1.meta.shape == (10,)
op1.Input1.setValue(numpy.ndarray((20,)))
assert op2.Output1.meta.shape == (20,)
| true |
cf7d422dbed1c8e2e739a50867ebce71ffc67619 | Python | Shilpa25453/codeigniterproject | /python/list.py | UTF-8 | 296 | 3.578125 | 4 | [] | no_license | '''n=[1,10,16,20,58,22]
n.sort()
print(n)
print("largest number is {}".format(n[-1]))'''
a=[]
n=int(input("enter the size"))
for i in range(n):
b=int(input("enter the numbers"))
a.append(b)
li=a[0]
for i in range(n)
if(a[i]);
l=a[i]
print("the largest number is {}".format(l[1])
| true |
b7417049b42104ff72e0d023fc7b8e2bea03a434 | Python | ElectronicBabylonianLiterature/ebl-api | /ebl/fragmentarium/domain/annotation.py | UTF-8 | 4,445 | 2.53125 | 3 | [
"MIT"
] | permissive | import base64
import io
from PIL import Image
from enum import Enum
from typing import Sequence, Optional
from uuid import uuid4
import attr
from ebl.fragmentarium.application.cropped_sign_image import CroppedSign, Base64
from ebl.fragmentarium.domain.fragment import Script
from ebl.transliteration.domain.museum_number import MuseumNumber
@attr.attrs(auto_attribs=True, frozen=True)
class Geometry:
x: float
y: float
width: float
height: float
class AnnotationValueType(Enum):
HAS_SIGN = "HasSign"
NUMBER = "Number"
SURFACE_AT_LINE = "SurfaceAtLine"
RULING_DOLLAR_LINE = "RulingDollarLine"
BLANK = "Blank"
PREDICTED = "Predicted"
PARTIALLY_BROKEN = "PartiallyBroken"
STRUCT = "Struct"
UnclearSign = "UnclearSign"
ColumnAtLine = "ColumnAtLine"
CompoundGrapheme = "CompoundGrapheme"
@attr.attrs(auto_attribs=True, frozen=True)
class AnnotationData:
id: str
value: str
type: AnnotationValueType
path: Sequence[int]
sign_name: str
@attr.attrs(auto_attribs=True, frozen=True)
class Annotation:
geometry: Geometry
data: AnnotationData
cropped_sign: Optional[CroppedSign]
def crop_image(self, image: Image.Image) -> Base64:
bounding_box = BoundingBox.from_annotations(
image.size[0], image.size[1], [self]
)[0]
area = (
int(bounding_box.top_left_x),
int(bounding_box.top_left_y),
int(bounding_box.top_left_x + bounding_box.width),
int(bounding_box.top_left_y + bounding_box.height),
)
cropped_image = image.crop(area)
MAX_SIZE = (800, 800)
if cropped_image.size[0] * cropped_image.size[1] >= MAX_SIZE[0] * MAX_SIZE[1]:
cropped_image.thumbnail(MAX_SIZE)
buf = io.BytesIO()
cropped_image.save(buf, format="PNG")
return Base64(base64.b64encode(buf.getvalue()).decode("utf-8"))
@classmethod
def from_prediction(cls, geometry: Geometry) -> "Annotation":
data = AnnotationData(uuid4().hex, "", AnnotationValueType.PREDICTED, [], "")
return cls(geometry, data, None)
@attr.attrs(auto_attribs=True, frozen=True)
class BoundingBox:
top_left_x: float
top_left_y: float
width: float
height: float
def to_list(self) -> Sequence[float]:
return [self.top_left_x, self.top_left_y, self.width, self.height]
@classmethod
def from_relative_coordinates(
cls,
relative_x,
relative_y,
relative_width,
relative_height,
image_width,
image_height,
) -> "BoundingBox":
absolute_x = int(round(relative_x / 100 * image_width))
absolute_y = int(round(relative_y / 100 * image_height))
absolute_width = int(round(relative_width / 100 * image_width))
absolute_height = int(round(relative_height / 100 * image_height))
return cls(absolute_x, absolute_y, absolute_width, absolute_height)
@staticmethod
def from_annotations(
image_width: int, image_height: int, annotations: Sequence[Annotation]
) -> Sequence["BoundingBox"]:
return tuple(
BoundingBox.from_relative_coordinates(
annotation.geometry.x,
annotation.geometry.y,
annotation.geometry.width,
annotation.geometry.height,
image_width,
image_height,
)
for annotation in annotations
)
@attr.s(auto_attribs=True, frozen=True)
class BoundingBoxPrediction(BoundingBox):
probability: float
@attr.attrs(auto_attribs=True, frozen=True)
class Annotations:
fragment_number: MuseumNumber
annotations: Sequence[Annotation] = tuple()
script: Optional[Script] = None
@classmethod
def from_bounding_boxes_predictions(
cls,
fragment_number: MuseumNumber,
bboxes: Sequence[BoundingBoxPrediction],
image_height: int,
image_width: int,
) -> "Annotations":
annotations = []
for bbox in bboxes:
geometry = Geometry(
bbox.top_left_x / image_width * 100,
bbox.top_left_y / image_height * 100,
bbox.width / image_width * 100,
bbox.height / image_width * 100,
)
annotations.append(Annotation.from_prediction(geometry))
return cls(fragment_number, annotations)
| true |
95ef2b3471d49590a3b45e0892adee2a24389187 | Python | louisapjG/NEAI_scripts | /TTT/visualize_data_TTT.py | UTF-8 | 1,494 | 3.453125 | 3 | [] | no_license | #Show data in graph format with one subplot per axis
import csv
import matplotlib.pyplot as plt
input_file = "1_col_3d.csv"
value_delimiter = ','
has_header = False
has_index = False
save_to = "test.jpeg"
#Takes in the data and returns an array containing it. Cleans up index and headers if present.
def intake(file_in,value_delimiter,has_header,has_index):
#Read data in
with open(file_in) as csvfile:
csvreader = list(csv.reader(csvfile, delimiter=value_delimiter))
if has_header:
headers = csvreader[0]
csvreader = csvreader[1:]
else:
headers = [i for i in range(len(csvreader[0]))]
if has_index:
dataset = [[float(val) for val in row[1:]] for row in csvreader]
headers = headers[1:]
else:
dataset = [[float(val) for val in row] for row in csvreader]
return dataset, headers
#Display the data with one graph per column
def multi_plots(dataset,headers,save_to=""):
plt.figure()
if len(headers) <= 1:
plt.title(headers[0])
plt.plot(dataset)
for nbr,subplot_name in enumerate(headers):
sub_id = int(str(len(headers))+'1'+str(nbr+1))
plt.subplot(sub_id)
plt.title(subplot_name)
plt.plot(dataset[:][nbr])
if save_to != "":
plt.savefig(save_to)
plt.show()
def main(input_file,value_delimiter,has_header,has_index,save_to=""):
dataset, headers = intake(input_file,value_delimiter,has_header,has_index)
multi_plots(dataset,headers,save_to)
if __name__ == "__main__":
main(input_file,value_delimiter,has_header,has_index,save_to)
| true |
5b577c371011bcc0554f5429f125395a440e6966 | Python | HyungJiny/HobbyProject | /crawler/navershop_review.py | UTF-8 | 2,713 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import configparser
import time
import pandas as pd
import random
from datetime import date
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
# config
config = configparser.ConfigParser()
config.read('config.ini')
# webdriver로 chrome 실행
driver = webdriver.Chrome(config.get('SELENIUM', 'chromedriver'))
driver.implicitly_wait(5)
def open_navershoppage():
# 네이버 쇼핑 접속 접속
driver.get('https://shopping.naver.com/')
time.sleep(3)
def get_items_form_csv(filepath):
csv_df = pd.read_csv(filepath)
return csv_df['title'].to_list()
def search(item):
# 제품 검색
driver.find_element_by_xpath('//*[@id="autocompleteWrapper"]/input[1]').send_keys(item)
driver.find_element_by_xpath('//*[@id="autocompleteWrapper"]/input[1]').send_keys(Keys.ENTER)
time.sleep(1)
print(item)
def page_wait(id):
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "id"))
)
finally:
pass
def move_review_page():
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
# 리뷰 페이지 링크 가져오기
links = soup.find('div',{'class':'info'}).find('span',{'class':'etc'}).findAll('a', href=True)
driver.get(links[0].attrs['href'])
time.sleep(random.randrange(2,5))
def get_reviews():
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
stars = []
for star in soup.select('#_review_list > li > div > div.avg_area > a > span.curr_avg > strong'):
stars.append(star.text)
reviews = []
for review in soup.select('#_review_list > li > div > div.atc'):
reviews.append(review.text)
return (stars, reviews)
if __name__ == "__main__":
today = date.today()
input_filename = 'nshop-cream.csv'
items = get_items_form_csv(input_filename)
with open('./output/{}-{}.csv'.format(input_filename.split('.')[0], today.strftime("%Y%m%d")), 'w') as output:
output.write('title,review,star')
for item in items:
open_navershoppage()
search(item)
# page_wait('info')
time.sleep(3)
move_review_page()
stars, reviews = get_reviews()
for i in range(len(stars)):
output.write('\n{title},"{review}",{star}'.format(title=item, review=reviews[i], star=stars[i]))
time.sleep(random.randrange(2,4))
driver.close()
| true |
11c472fe54f1a7520c3f92285ff5c5efe440bf64 | Python | Ander20n/Codigos-Faculdade | /Códigos Lab/Pratos_Juvenal.py | UTF-8 | 422 | 3.125 | 3 | [] | no_license | user = int(input())
mesa = []
players = []
for x in range(user):
players.append([])
parar = 0
cm = input().split(" ")
cml = [int(c) for c in cm]
mesa.append(cml)
if parar != -1:
cp = input().split(" ")
if cp[0] == "-1":
parar = -1
else:
cpl = [int(p) for p in cp]
players[x].append(cpl)
cp = []
print(mesa)
print(players[0]) | true |
0c93d6980fcee9e301f7e5d0d55642bc0e64c377 | Python | harry418/Credit-card-fraud-detection | /hybrid_model.py | UTF-8 | 2,670 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 13 12:44:27 2020
@author: harit
"""
# Unzipping dataset in local directory
#from zipfile import ZipFile
#local_zip = 'creditcardfraud.zip'
#zip_ref = ZipFile(local_zip,'r')
#zip_ref.extractall()
# Import necessary library
import pandas as pd
import numpy as np
# Importing the dataset
dataset = pd.read_csv('creditcard.csv')
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,-1].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler(feature_range=(0,1))
sc.fit_transform(X)
# Training the som
from minisom import MiniSom
som = MiniSom(x = 10 ,y = 10 ,input_len = 30 , learning_rate = 0.5 ,sigma = 1.0)
som.random_weights_init(X)
som.train_random(data = X ,num_iteration = 100)
# Visualizing the results
from pylab import bone, pcolor, colorbar, plot, show
bone()
pcolor(som.distance_map().T)
colorbar()
markers = ['o', 's']
colors = ['r', 'g']
for i, x in enumerate(X):
w = som.winner(x)
plot(w[0] + 0.5,
w[1] + 0.5,
markers[y[i]],
markeredgecolor = colors[y[i]],
markerfacecolor = 'None',
markersize = 10,
markeredgewidth = 2)
show()
# finding the frauds
mapping = som.win_map(X)
frauds = mapping[w]
frauds = sc.inverse_transform(frauds)
# Going from Unsupervised learning to superwised learning
# creating frature matrix
customers = dataset.drop('Amount',axis=1)
customers = customers.iloc[:,:].values
# statndard scaling of feature matrix
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
customers = sc.fit_transform(customers)
# creating dependent variable
is_fraud = np.zeros(len(customers))
for i in range(len(customers)):
if dataset.iloc[i,0] in frauds:
is_fraud[i] =1
# creating ANN model
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(units = 2, kernel_initializer = 'uniform', activation = 'relu', input_dim = 30))
# Adding the output layer
classifier.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(customers, is_fraud, batch_size = 1, epochs = 5)
# predicting the probabilities of frauds
y_pred = classifier.predict(customers)
y_p = pd.DataFrame(data = y_pred , index =None , columns = 'Probability of Fraud')
output = pd.concat([dataset ,y_p],axis =1) | true |
5a5b8a4303ec8595719b5585ecb8d9156ce62154 | Python | EmilianStankov/HackBulgaria | /week0/problem 28 - is_an_bn/tests.py | UTF-8 | 531 | 2.84375 | 3 | [] | no_license | from solution import is_an_bn
import unittest
class Test(unittest.TestCase):
def test_problem_statement_cases(self):
self.assertEqual(True, is_an_bn(""))
self.assertEqual(False, is_an_bn("rado"))
self.assertEqual(False, is_an_bn("aaabb"))
self.assertEqual(True, is_an_bn("aaabbb"))
self.assertEqual(False, is_an_bn("aabbaabb"))
self.assertEqual(False, is_an_bn("bbbaaa"))
self.assertEqual(True, is_an_bn("aaaaabbbbb"))
if __name__ == '__main__':
unittest.main()
| true |
6ff8db09eac5a0edd7cc3d4ef56b0415d5bca738 | Python | RomainSabathe/kaggle_airbnb2015 | /Code/prediction_test_data.py | UTF-8 | 2,414 | 2.546875 | 3 | [
"MIT"
] | permissive | import pandas as pd
import numpy as np
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from score import calc_score
from toolbox import transform_to_submission_format
import xgboost as xgb
classif_name = 'XGBoost_2016-2-3_10h57m'
""" Recovering the data and the classifier """
print 'Recovering the data'
store = pd.HDFStore('../Data/enhanced_testing_data.h5')
data = store['data_users']
data = data.fillna(0)
store.close()
""" Making sure that the number of features in the testing data
is the same as in the training data.
"""
store = pd.HDFStore('../Data/enhanced_learning_restricted_data.h5')
training_data = store['data_users']
training_data = training_data.drop('country_destination', axis=1)
store.close()
#with open('../Data/features_to_keep.dt', 'r') as f:
# features_to_keep = pickle.load(f)
#training_data = training_data.loc[:,features_to_keep]
train_columns = training_data.columns
test_columns = data.columns.values
missing_columns = [col for col in train_columns if col not in test_columns]
overflow_columns = [col for col in test_columns if col not in train_columns]
emptyDataFrame = pd.DataFrame(0, columns=missing_columns, index=data.index)
data = pd.concat([data, emptyDataFrame], axis=1)
data = data.drop(overflow_columns, axis=1)
""" Loading the classifier """
#classif = pickle.load(open('../Models/%s.md' % classif_name))
xg_test = xgb.DMatrix(data)
param = {}
param['nthread'] = 4
param['num_class'] = 12
classif = xgb.Booster(param)
classif.load_model('../Models/%s.md' % classif_name)
proba_countries = classif.predict( xg_test, ntree_limit=9 )
print 'Making a prediction'
#proba_countries = classif.predict_proba(data)
print 'Outputting'
find_5_best_countries = lambda x: x.argsort()[-5:][::-1]
best_guesses = np.apply_along_axis(find_5_best_countries, 1, proba_countries)
predictions = pd.DataFrame(best_guesses, index=data.index)
# Generating a proper DataFrame format
le_country = pickle.load(open('../Encoding/LabelEncoder_country_destination.md', 'r'))
one_col_pred_digits = transform_to_submission_format(predictions, data.index)
one_col_pred_names = le_country.inverse_transform(one_col_pred_digits)
final_pred = pd.DataFrame(one_col_pred_names, index=one_col_pred_digits.index)
final_pred.columns = ['country']
final_pred.to_csv('../Predictions/%s.csv' % classif_name, index_label='id')
| true |
b4a7b81a19ce8bf1b02e1eb4854d8684df7be03e | Python | fianteh011/Raytracer_Stufe_2 | /Render/scene.py | UTF-8 | 310 | 2.53125 | 3 | [] | no_license | class Scene:
"""Scene-Klasse bündelt/sammelt alle Informationen für die ray tracing engine"""
def __init__(self, camera, objects, lights, width, height):
self.camera = camera
self.objects = objects
self.lights = lights
self.width = width
self.height = height
| true |
930a9d861e4acc77a5ae978dc1297f4f3a6b03ce | Python | jeykang/TimeSeries-WGAN-TF2 | /master_thesis/generative_models/wgan/wgan_utils.py | UTF-8 | 4,233 | 2.671875 | 3 | [
"MIT"
] | permissive | import keras
from keras import Model
from keras.layers import *
from keras.optimizers import RMSprop
from generative_models import utils
def build_generator(latent_dim, timesteps):
generator_inputs = Input((latent_dim,))
generated = generator_inputs
generated = Dense(15)(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = Lambda(lambda x: K.expand_dims(x))(generated)
generated = Conv1D(32, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = UpSampling1D(2)(generated)
generated = Conv1D(32, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = UpSampling1D(2)(generated)
generated = Conv1D(32, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = UpSampling1D(2)(generated)
generated = Conv1D(1, 3, padding='same')(generated)
generated = utils.BatchNormalization()(generated)
generated = LeakyReLU(0.2)(generated)
generated = Lambda(lambda x: K.squeeze(x, -1))(generated)
generated = Dense(timesteps, activation='tanh')(generated)
generator = Model(generator_inputs, generated, 'generator')
return generator
def build_critic(timesteps):
kernel_initializer = keras.initializers.RandomNormal(0, 0.02)
critic_inputs = Input((timesteps,))
criticized = Lambda(lambda x: K.expand_dims(x, -1))(critic_inputs)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = MaxPooling1D(2, padding='same')(criticized)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = MaxPooling1D(2, padding='same')(criticized)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = MaxPooling1D(2, padding='same')(criticized)
criticized = Conv1D(32, 3, padding='same', kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = Flatten()(criticized)
criticized = Dense(50, kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = Dense(15, kernel_initializer=kernel_initializer)(criticized)
criticized = LeakyReLU(0.2)(criticized)
criticized = Dense(1, kernel_initializer=kernel_initializer)(criticized)
critic = Model(critic_inputs, criticized, 'critic')
return critic
def build_generator_model(generator, critic, generator_lr, latent_dim):
utils.set_model_trainable(generator, True)
utils.set_model_trainable(critic, False)
noise_samples = Input((latent_dim,))
generated_samples = generator(noise_samples)
generated_criticized = critic(generated_samples)
generator_model = Model(noise_samples, generated_criticized, 'generator_model')
generator_model.compile(loss=utils.wasserstein_loss, optimizer=RMSprop(generator_lr))
return generator_model
def build_critic_model(generator, critic, critic_lr, latent_dim, timesteps):
utils.set_model_trainable(generator, False)
utils.set_model_trainable(critic, True)
noise_samples = Input((latent_dim,))
real_samples = Input((timesteps,))
generated_samples = generator(noise_samples)
generated_criticized = critic(generated_samples)
real_criticized = critic(real_samples)
critic_model = Model([real_samples, noise_samples],
[real_criticized, generated_criticized], 'critic_model')
critic_model.compile(loss=[utils.wasserstein_loss, utils.wasserstein_loss], optimizer=RMSprop(critic_lr),
loss_weights=[1 / 2, 1 / 2])
return critic_model
def clip_weights(model, clip_value):
for l in model.layers:
weights = [np.clip(w, -clip_value, clip_value) for w in l.get_weights()]
l.set_weights(weights)
| true |
23f205b4e9d2ad1bd141f85a3b272ead8d398b08 | Python | Mallik-G/datalake-etl-pipeline | /src/etl/ETL.py | UTF-8 | 2,189 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | import datetime
import pyspark.sql.functions as f
import pytz
from pyspark.sql import SparkSession
from pyspark.sql.types import *
lookup = {}
# ToDo - Yet to add many potential UDFs
def registerAllUDF(sc: SparkSession):
sc.udf.register(name='datetimetogmt', f=datetimeToGMT)
sc.udf.register(name='zonedatetimetogmt', f=zoneDatetimeToGMTZone)
sc.udf.register(name='isnullorempty', f=isNullOrEmpty)
sc.udf.register(name='datetimetogmt', f=datetimeToGMT)
sc.udf.register(name='udfnvl', f=udfNvl)
sc.udf.register(name='udflookup', f=udfLookups)
def datetimeToGMT(dt, fmt):
local = pytz.timezone("America/Los_Angeles")
# format = "%Y-%m-%d %H:%M:%S"
naive = datetime.datetime.strptime(str(dt).strip(), str(fmt).strip())
local_dt = local.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
def strSplitSep(s, sep=','):
return str(s).split(str(sep))
def varargsToList(*fields, sep):
return str(sep).join(fields)
def zoneDatetimeToGMTZone(dt, fmt, zone):
local = pytz.timezone(str(zone).strip())
# format = "%Y-%m-%d %H:%M:%S"
naive = datetime.datetime.strptime(str(dt).strip(), str(fmt).strip())
local_dt = local.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
@f.udf(returnType=StringType())
def udfNvl(field):
if isNullOrEmpty(field) is None:
return "-"
else:
return field
@f.udf(returnType=StringType())
def udfLookups(clname, s):
finallookupvalue = []
if s is None:
return ""
else:
codes = str(s).split(sep=';')
for cd in codes:
if f"{clname} {cd}" in lookup.keys():
finallookupvalue.append(lookup[f"{clname} {cd}"])
else:
finallookupvalue.append(cd)
return ';'.join(finallookupvalue)
def squared_udf(s):
if s is None:
return None
return s * s
def nullString(s):
return s is None or str(s).strip().__eq__("") is None
def isNullOrEmpty(s):
if s is None:
return None
if str(s).strip() is None or str(s).strip().__eq__(""):
return None
return str(s).strip()
| true |
bab00b01831391c536c104e63a1e90144d5e4589 | Python | godnnness/pyactr_for_train_stop_control | /venv/Lib/site-packages/pyactr/chunks.py | UTF-8 | 22,194 | 2.890625 | 3 | [] | no_license | """
Chunks
"""
import collections
import re
import random
import warnings
import numbers
import pyactr.utilities as utilities
from pyactr.utilities import ACTRError
def chunktype(cls_name, field_names, defaults=None):
"""
Creates type chunk. Works like namedtuple.
For example:
>>> chunktype('chunktype_example0', 'value')
:param field_names: an iterable or a string of slot names separated by spaces
:param defaults: default values for the slots, given as an iterable, counting from the last element
"""
if cls_name in utilities.SPECIALCHUNKTYPES and field_names != utilities.SPECIALCHUNKTYPES[cls_name]:
raise ACTRError("You cannot redefine slots of the chunk type '%s'; you can only use the slots '%s'" % (cls_name, utilities.SPECIALCHUNKTYPES[cls_name]))
try:
field_names = field_names.replace(',', ' ').split()
except AttributeError: # no .replace or .split
pass # assume it's already a sequence of identifiers
field_names = tuple(sorted(name + "_" for name in field_names))
for each in field_names:
if each == "ISA" or each == "isa":
raise ACTRError("You cannot use the slot 'isa' in your chunk. That slot is used to define chunktypes.")
try:
Chunk._chunktypes.update({cls_name:collections.namedtuple(cls_name, field_names, defaults=defaults)}) #chunktypes are not returned; they are stored as Chunk class attribute
except TypeError:
Chunk._chunktypes.update({cls_name:collections.namedtuple(cls_name, field_names)}) #chunktypes are not returned; they are stored as Chunk class attribute
class Chunk(collections.Sequence):
"""
ACT-R chunks. Based on namedtuple (tuple with dictionary-like properties).
For example:
>>> Chunk('chunktype_example0', value='one')
chunktype_example0(value= one)
"""
class EmptyValue(object):
"""
Empty values used in chunks. These are None values.
"""
def __init__(self):
self.value = utilities.EMPTYVALUE
def __eq__(self, val):
if val == utilities.EMPTYVALUE or val == str(utilities.EMPTYVALUE):
return True #Chunks make strings out of values; (this holds for everything but cases in which chunks themselves are values); so, None will be turned into a string as well, hence the equality
else:
return False
def __hash__(self):
return hash(self.value)
def __repr__(self):
return repr(self.value)
_chunktypes = {}
_undefinedchunktypecounter = 0
_chunks = {}
__emptyvalue = EmptyValue()
_similarities = {} #dict of similarities between chunks
def __init__(self, typename, **dictionary):
self.typename = typename
self.boundvars = {} #dict of bound variables
kwargs = {}
for key in dictionary:
#change values (and values in a tuple) into string, when possible (when the value is not another chunk)
if isinstance(dictionary[key], Chunk):
dictionary[key] = utilities.VarvalClass(variables=None, values=dictionary[key], negvariables=(), negvalues=())
elif isinstance(dictionary[key], utilities.VarvalClass):
for x in dictionary[key]._fields:
if x in {"values", "variables"} and not isinstance(getattr(dictionary[key], x), str) and getattr(dictionary[key], x) != self.__emptyvalue and not isinstance(getattr(dictionary[key], x), Chunk):
raise TypeError("Values and variables must be strings, chunks or empty (None)")
elif x in {"negvariables", "negvalues"} and (not isinstance(getattr(dictionary[key], x), collections.abc.Sequence) or isinstance(getattr(dictionary[key], x), collections.abc.MutableSequence)):
raise TypeError("Negvalues and negvariables must be tuples")
elif (isinstance(dictionary[key], collections.abc.Iterable) and not isinstance(dictionary[key], str)) or not isinstance(dictionary[key], collections.abc.Hashable):
raise ValueError("The value of a chunk slot must be hashable and not iterable; you are using an illegal type for the value of the chunk slot %s, namely %s" % (key, type(dictionary[key])))
else:
#create namedtuple varval and split dictionary[key] into variables, values, negvariables, negvalues
try:
temp_dict = utilities.stringsplitting(str(dictionary[key]))
except utilities.ACTRError as e:
raise utilities.ACTRError("The chunk %s is not defined correctly; %s" %(dictionary[key], e))
loop_dict = temp_dict.copy()
for x in loop_dict:
if x == "negvariables" or x == "negvalues":
val = tuple(temp_dict[x])
else:
try:
val = temp_dict[x].pop()
except KeyError:
val = None
temp_dict[x] = val
dictionary[key] = utilities.VarvalClass(**temp_dict)
#adding _ to minimize/avoid name clashes
kwargs[key+"_"] = dictionary[key]
try:
for elem in self._chunktypes[typename]._fields:
if elem not in kwargs:
kwargs[elem] = self.__emptyvalue #emptyvalues are explicitly added to attributes that were left out
dictionary[elem[:-1]] = self.__emptyvalue #emptyvalues are also added to attributes in the original dictionary (since this might be used for chunktype creation later)
if set(self._chunktypes[typename]._fields) != set(kwargs.keys()):
chunktype(typename, dictionary.keys()) #If there are more args than in the original chunktype, chunktype has to be created again, with slots for new attributes
warnings.warn("Chunk type %s is extended with new attributes" % typename)
except KeyError:
chunktype(typename, dictionary.keys()) #If chunktype completely missing, it is created first
warnings.warn("Chunk type %s was not defined; added automatically" % typename)
finally:
self.actrchunk = self._chunktypes[typename](**kwargs)
self.__empty = None #this will store what the chunk looks like without empty values (the values will be stored on the first call of the relevant function)
self.__unused = None #this will store what the chunk looks like without unused values
self.__hash = None, self.boundvars.copy() #this will store the hash along with variables (hash changes if some variables are resolved)
def _asdict(self):
"""
Create a dictionary out of chunk.
"""
temp_dict = self.actrchunk._asdict()
dictionary = {re.sub("_$", "", key): temp_dict[key] for key in temp_dict}
return dictionary
def __eq__(self, otherchunk):
if hash(self) == hash(otherchunk):
return True
else:
return False
def __getattr__(self, name):
if hasattr(self.actrchunk, name + "_"):
return getattr(self.actrchunk, name + "_")
else:
raise AttributeError("Chunk has no such attribute")
def __getitem__(self, pos):
return re.sub("_$", "", self.actrchunk._fields[pos]), self.actrchunk[pos]
def __hash__(self):
if self.__hash[0] and self.boundvars == self.__hash[1]:
return self.__hash[0]
def hash_func():
for x in self.removeempty():
varval = utilities.splitting(x[1])
temp_varval = {"values": set(), "negvalues": set()}
for key in ["variables", "negvariables"]:
if getattr(varval, key):
for value in getattr(varval, key):
try:
temp_varval[re.sub("variables", "values", key)].add(self.boundvars[utilities.ACTRVARIABLE + value]) #add (neg)value based on the (neg)variable
except KeyError:
if x[0]:
yield tuple([x[0], tuple([key, hash(value)])]) #get hash of variable if it is not bound
else:
yield tuple([key, hash(value)])
for key in ["values", "negvalues"]:
if key == "values" and getattr(varval, key) != self.__emptyvalue:
temp_varval[key].update(set([getattr(varval, key)]))
elif key == "negvalues":
temp_varval[key].update(set(getattr(varval, key)))
if temp_varval[key]:
for value in temp_varval[key]:
if x[0]:
yield tuple([x[0], tuple([key, hash(value)])]) #values get their hash directly
else:
yield tuple([key, hash(value)])
self.__hash = hash(tuple(hash_func())), self.boundvars.copy() #store the hash along with the vars used to calculate it, so it doesnt need to be recalculated
return self.__hash[0]
def __iter__(self):
for x, y in zip(self.actrchunk._fields, self.actrchunk):
yield re.sub("_$", "", x), y
def __len__(self):
return len(self.actrchunk)
def __repr__(self):
reprtxt = ""
for x, y in self:
if isinstance(y, utilities.VarvalClass):
y = str(y)
elif isinstance(y, self.EmptyValue):
y = ""
if reprtxt:
reprtxt = ", ".join([reprtxt, '%s= %s' % (x, y)])
elif x:
reprtxt = '%s= %s' % (x, y)
else:
reprtxt = '%s' % y
return "".join([self.typename, "(", reprtxt, ")"])
def __lt__(self, otherchunk):
"""
Check whether one chunk is proper part of another (given bound variables in boundvars).
"""
return not self == otherchunk and self.match(otherchunk, partialmatching=False)
def __le__(self, otherchunk):
"""
Check whether one chunk is part of another (given boundvariables in boundvars).
"""
return self == otherchunk or self.match(otherchunk, partialmatching=False) #actually, the second disjunct should be enough -- TODO: check why it fails in some cases; this might be important for partial matching
def match(self, otherchunk, partialmatching, mismatch_penalty=1):
"""
Check partial match (given bound variables in boundvars).
"""
similarity = 0
if self == otherchunk:
return similarity
#below starts the check that self is proper part of otherchunk. __emptyvalue is ignored. 4 cases have to be checked separately, =x, ~=x, !1, ~!1. Also, variables and their values have to be saved in boundvars. When self is not part of otherchunk the loop adds to (dis)similarity.
for x in self:
try:
matching_val = getattr(otherchunk.actrchunk, x[0] + "_") #get the value of attr
except AttributeError:
matching_val = None #if it is missing, it must be None
if isinstance(matching_val, utilities.VarvalClass):
matching_val = matching_val.values #the value might be written using _variablesvalues namedtuple; in that case, get it out
varval = utilities.splitting(x[1])
#checking variables, e.g., =x
if varval.variables:
#if matching_val == self.__emptyvalue:
# similarity -= 1 #these two lines would require that variables are matched only to existing values; uncomment if you want that
var = varval.variables
for each in self.boundvars.get("~=" + var, set()):
if each == matching_val:
if partialmatching:
similarity += utilities.get_similarity(self._similarities, each, matching_val, mismatch_penalty) #False if otherchunk's value among the values of ~=x
else:
return False
try:
if self.boundvars["=" + var] != matching_val:
if partialmatching:
similarity += utilities.get_similarity(self._similarities, self.boundvars["=" + var], matching_val, mismatch_penalty) #False if =x does not match otherchunks' value
else:
return False
except KeyError:
self.boundvars.update({"=" + var: matching_val}) #if boundvars lack =x, update and proceed
#checking negvariables, e.g., ~=x
if varval.negvariables:
for var in varval.negvariables:
try:
if self.boundvars["=" + var] == matching_val:
if partialmatching:
similarity += utilities.get_similarity(self._similarities, self.boundvars["=" + var], matching_val, mismatch_penalty) #False if =x does not match otherchunks' value
else:
return False
except KeyError:
pass
self.boundvars.setdefault("~=" + var, set([])).add(matching_val)
#checking values, e.g., 10 or !10
if varval.values:
val = varval.values
if val != None and val != matching_val: #None is the misssing value of the attribute
if partialmatching:
similarity += utilities.get_similarity(self._similarities, val, matching_val, mismatch_penalty)
else:
return False
#checking negvalues, e.g., ~!10
if varval.negvalues:
for negval in varval.negvalues:
if negval == matching_val or (negval in {self.__emptyvalue, 'None'} and matching_val == self.__emptyvalue):
if partialmatching:
similarity += utilities.get_similarity(self._similarities, negval, matching_val, mismatch_penalty)
else:
return False
if partialmatching:
return similarity
else:
return True
def removeempty(self):
"""
Remove slot-value pairs that have the value __emptyvalue, that is, None and 'None'.
Be careful! This returns a tuple with slot-value pairs.
"""
def emptying_func():
for x in self:
try:
if x[1].removeempty():
if x[1] != self.__emptyvalue:
yield x
except AttributeError:
try:
if x[1].values != self.__emptyvalue or x[1].variables or x[1].negvariables or x[1].negvalues:
yield x
except AttributeError:
pass
if not self.__empty:
self.__empty = tuple(emptying_func())
return self.__empty
def removeunused(self):
"""
Remove values that were only added to fill in empty slots, using None.
Be careful! This returns a generator with slot-value pairs.
"""
def unusing_func():
for x in self:
try:
if x[1].removeunused():
if x[1] != utilities.EMPTYVALUE:
yield x
except AttributeError:
try:
if x[1].values != utilities.EMPTYVALUE or x[1].variables or x[1].negvariables or x[1].negvalues:
yield x
except AttributeError:
pass
if not self.__unused:
self.__unused = tuple(unusing_func())
return self.__unused
#special chunk that can be used in production rules
for key in utilities.SPECIALCHUNKTYPES:
chunktype(key, utilities.SPECIALCHUNKTYPES[key])
def createchunkdict(chunk):
"""
Create typename and chunkdict from pyparsed list.
"""
sp_dict = {utilities.ACTRVARIABLE: "variables", utilities.ACTRNEG: "negvalues", utilities.ACTRNEG + utilities.ACTRVARIABLE: "negvariables", utilities.ACTRVALUE: "values", utilities.ACTRNEG + utilities.ACTRVALUE: "negvalues"}
chunk_dict = {}
for elem in chunk:
temp_dict = chunk_dict.get(elem[0], utilities.VarvalClass(variables=set(), values=set(), negvariables=set(), negvalues=set())._asdict())
for idx in range(1, len(elem)):
try:
if elem[idx][0][0] == utilities.VISIONGREATER or elem[idx][0][0] == utilities.VISIONSMALLER: #this checks special visual conditions on greater/smaller than
if elem[idx][0][-1] == utilities.ACTRVARIABLE:
temp_dict['variables'].add(elem[idx][1])
update_val = elem[idx][0][0]
else:
update_val = elem[idx][0] + elem[idx][1]
#here fix
updating = 'values'
elif elem[idx][1][0] == "'" or elem[idx][1][0] == '"':
updating = sp_dict[elem[idx][0]]
update_val = elem[idx][1][1:-1]
else:
updating = sp_dict[elem[idx][0]]
update_val = elem[idx][1]
except (KeyError, IndexError) as err: #indexerror --> only a string is present; keyerror: the first element in elem[idx] is not a special symbol (in sp)
if elem[idx][0] == "'" or elem[idx][0] == '"':
update_val = elem[idx][1:-1]
else:
#check if the string is an existing chunk in the database of chunks
try:
update_val = Chunk._chunks[elem[idx]]
#if not, save it as a string
except KeyError:
update_val = elem[idx]
updating = 'values'
finally:
temp_dict[updating].add(update_val)
chunk_dict[elem[0]] = temp_dict
for key in chunk_dict:
chunk_dict[key]["negvalues"] = tuple(chunk_dict[key]["negvalues"])
chunk_dict[key]["negvariables"] = tuple(chunk_dict[key]["negvariables"])
for x in ["values", "variables"]:
if len(chunk_dict[key][x]) > 1:
raise utilities.ACTRError("Any slot must have fewer than two %s, there is more than one in this slot" %x)
elif len(chunk_dict[key][x]) == 1:
chunk_dict[key][x] = chunk_dict[key][x].pop()
else:
chunk_dict[key][x] = None
chunk_dict[key] = utilities.VarvalClass(**chunk_dict[key])
type_chunk = ""
try:
type_chunk = chunk_dict.pop("isa").values #change this - any combination of capital/small letters
type_chunk = chunk_dict.pop("ISA").values
type_chunk = chunk_dict.pop("Isa").values
except KeyError:
pass
return type_chunk, chunk_dict
def makechunk(nameofchunk="", typename="", **dictionary):
"""
Create a chunk.
Three values can be specified:
(i) the name of the chunk (the name could be used if the chunk appears as a value of other chunks or production rules)
(ii) its type
(ii) slot-value pairs.
For example:
>>> makechunk(nameofchunk='example0', typename='chunktype_example0', value='one')
chunktype_example0(value= one)
This creates a chunk of type chunk1, which has one slot (value) and the value of that slot is one.
"""
if not nameofchunk:
nameofchunk = "unnamedchunk"
if not typename:
typename = "undefined" + str(Chunk._undefinedchunktypecounter)
Chunk._undefinedchunktypecounter += 1
for key in dictionary:
if isinstance(dictionary[key], Chunk):
pass
elif isinstance(dictionary[key], utilities.VarvalClass):
pass
else:
try:
temp_dict = utilities.stringsplitting(str(dictionary[key]))
except utilities.ACTRError as e:
raise utilities.ACTRError("The chunk value %s is not defined correctly; %s" %(dictionary[key], e))
loop_dict = temp_dict.copy()
for x in loop_dict:
if x == "negvariables" or x == "negvalues":
val = tuple(temp_dict[x])
else:
try:
val = temp_dict[x].pop()
except KeyError:
val = None
temp_dict[x] = val
dictionary[key] = utilities.VarvalClass(**temp_dict)
created_chunk = Chunk(typename, **dictionary)
created_chunk._chunks[nameofchunk] = created_chunk
return created_chunk
def chunkstring(name='', string=''):
"""
Create a chunk when given a string. The string is specified in the form: slot value slot value (arbitrary number of slot-value pairs can be used). isa-slot is used as the type of chunk. If no isa-slot is provided, chunk is assigned an 'undefined' type.
For example:
>>> chunkstring(name="example0", string='isa chunktype_example0 value one')
chunktype_example0(value= one)
"""
chunk_reader = utilities.getchunk()
chunk = chunk_reader.parseString(string, parseAll=True)
try:
type_chunk, chunk_dict = createchunkdict(chunk)
except utilities.ACTRError as e:
raise utilities.ACTRError("The chunk string %s is not defined correctly; %s" %(string, e))
created_chunk = makechunk(name, type_chunk, **chunk_dict)
return created_chunk
| true |
89c5a8a4e6efe9c4edf5e17e461d1a02eabdb84c | Python | miklosduma/magus | /magus_kalkulator/table_to_html.py | UTF-8 | 11,592 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | """
Utility function that creates an HTML page from the penalty dicts
(e.g. head_table.py).
Each penalty dict becomes an HTML table. The dicts look like:
Weapon type:
body part:
level1_penalties
level2_penalties
body part2:
level1_penalties
level2_penalties
Weapon type2:
body_part:
level1_penalties
level2_penalties
body_part2:
level1_penalties
level2_penalties
The output HTML tables look like:
|Weapon type |Weapon type2 |
|body part |l1_pen|l2_pen|l1_pen|l2_pen|
|body part2|l1_pen|l2_pen|l1_pen|l2_pen|
"""
import os
from magus_kalkulator.interface_elements import get_relative_dir
from magus_kalkulator import head_table
from magus_kalkulator import torso_table
from magus_kalkulator import limbs_table
HTML_PATH = get_relative_dir('resources/index.html')
TARGET_DICT_PATHS = [os.path.abspath(head_table.__file__),
os.path.abspath(torso_table.__file__),
os.path.abspath(limbs_table.__file__)]
TARGET_DICTS = [
(head_table.FEJ_TABLA, 'Fej'),
(limbs_table.VEGTAG_TABLA, 'Vegtagok'),
(torso_table.TORZS_TABLA, 'Torzs')]
CSS_LINK = '<link rel=\"stylesheet\" type=\"text/css\" href=\"tables.css\">'
JS_LINK = '<script src=\"collapse.js\" type=\"text/javascript\"></script>'
def add_attributes(**attr):
"""
Generator that iterates through all attributes
and yields them formatted as HTML attributes.
- attr:
A Python dict. The function formats
them as HTML attributes.
"""
while attr:
key = list(attr.keys())[0]
value = attr[key]
yield '{}=\"{}\"'.format(key, value)
del attr[key]
def tag_builder(tag, **attr):
"""
Creates the start tag for an element.
- tag:
The name of the HTML element. E.g. 'div' or 'table'.
- attr:
A Python dict with the attributes of the HTML element.
E.g. {'style': 'color:red;', 'colspan:3}
"""
# Since 'class' is a Python key word, use 'klass' instead.
if 'klass' in attr.keys():
class_value = attr.pop('klass')
attr['class'] = class_value
attr_generator = add_attributes(**attr)
return '<{} {}>'.format(tag, ' '.join(attr_generator))
def html_wrapper(tag, **attr):
"""
HTML wrapper. Wraps the content in start and
end tags. The tags are built using the 'tag' and 'attr' arguments.
- tag:
The tag of the HTML element. E.g. 'div' or 'td'.
- attr:
A Python dict with the attributes of the HTML element.
E.g. {'style': 'color:red;', 'colspan:3}
"""
def real_decorator(fun):
"""
Inner wrapper. It modifies the decorated function.
- fun:
The function being wrapped.
"""
def wrapper(html_writer, *args, **kwargs):
"""
Calls the actual function.
- html_writer:
A co-routine all funs use to write the HTML file.
- args:
Other positional arguments of the decorated function.
- kwargs:
Other keyword arguments of the decorated function.
They can be used to override or extend the HTML attributes
defined on the wrapper.
"""
# Calculate all attributes for HTML tag
for key, value in kwargs.items():
attr[key] = value
start_tag = tag_builder(tag, **attr)
# Write HTML start tag.
html_writer.send(start_tag)
# Invoke actual function to write content of HTML element.
try:
fun(html_writer, *args, **kwargs)
# Close element with HTML end tag.
finally:
html_writer.send('</{}>'.format(tag))
return wrapper
return real_decorator
def row_content_generator(target_dict, weapon_types, body_parts):
"""
Generator that yields row content for an HTML table.
- target_dict:
One of the penalty tables.
"""
while body_parts:
body = body_parts[0]
cells = []
for wtype in weapon_types:
penalties = [', '.join(penalty)
if isinstance(penalty, list)
else penalty for penalty
in target_dict[wtype][body][1:-1]]
cells.append((wtype, penalties))
yield body, cells
del body_parts[0]
@html_wrapper('button', onclick='collapse(this)')
def create_button(html_writer, button_text, **_attr):
"""
Creates a button element.
- html_writer:
A co-routine all funs use to write the HTML file.
- button_text:
The label of the button.
- _attr:
Any additional attributes for the button.
The decorator writes the start and end tags of the button,
and adds the onclick attribute to the start tag.
"""
html_writer.send(button_text)
@html_wrapper('td')
def create_header_cell(html_writer, header_text, **_attr):
"""
Creates a cell in the first row of the table.
- html_writer:
A co-routine all funs use to write the HTML file.
- header_text:
The text content of the cell.
- _attr:
Optional HTML arguments.
"""
html_writer.send(header_text)
create_button(html_writer, '<')
@html_wrapper('tr')
def create_header_row(html_writer, first_cells, **_attr):
"""
Creates the first row of a penalty table.
The row comprises an empty cell, and the weapon types,
each having a colspan of 3 (one per penalty rank).
"""
create_cell(html_writer, '')
for cell_header, penalties in first_cells:
create_header_cell(html_writer, cell_header, colspan=len(penalties))
@html_wrapper('td')
def create_cell(html_writer, cell_text, **_attr):
"""
Creates a simple penalty cell in the table.
- html_writer:
A co-routine all funs use to write the HTML file.
- cell_text:
The penalty content of the cell.
- _attr:
Optional HTML arguments.
"""
html_writer.send(cell_text)
@html_wrapper('tr')
def create_penalty_row(html_writer, body_part, cells, **_attr):
"""
Writes a table row, where the first cell is the
body part, the remaining cells contain the penalties.
"""
create_cell(html_writer, body_part)
for _cell_header, penalties in cells:
for penalty in penalties:
create_cell(html_writer, penalty)
@html_wrapper('table')
def create_table(html_writer, table, caption, **_attr):
"""
Creates an HTML table using a generator that yields
the cell content and the headers.
"""
weapon_types = list(table.keys())
body_parts = list(table[weapon_types[0]].keys())
# Start generator.
row_content = row_content_generator(table, weapon_types, body_parts)
# Start HTML table, give it a title.
html_writer.send('<caption>{}</caption>'.format(caption))
first_body_part, first_cells = next(row_content)
create_header_row(html_writer, first_cells)
create_penalty_row(html_writer, first_body_part, first_cells)
# Iterate through rest of the penalty cells and bodyparts.
for body_part, cells in row_content:
create_penalty_row(html_writer, body_part, cells)
@html_wrapper('head')
def write_head(html_writer, **_attr):
"""
Creates the header of the HTML file.
"""
# Link CSS file and JavaScript.
html_writer.send(CSS_LINK)
html_writer.send(JS_LINK)
@html_wrapper('body')
def write_body(html_writer, target_dicts, **_attr):
"""
Writes the HTML tables into the HTML file.
- html_writer:
A co-routine all funs use to write the HTML file.
- target_dicts:
A list of tuples, where each tuple comprises:
- A penalty dictionary
- A caption for the table
- _attr:
Optional HTML arguments.
"""
table_no = 1
for table, caption in target_dicts:
caption = 'Tablazat {}: {}'.format(
table_no, caption)
create_table(html_writer, table, caption)
table_no += 1
@html_wrapper('html')
def process_target_dicts(html_writer, target_dicts, **_attr):
"""
Builds an HTML page from the supplied penalty
dictionaries.
Returns the constructed HTML string.
"""
write_head(html_writer)
write_body(html_writer, target_dicts)
def get_latest_mod(src_file):
"""
Gets the last modification date of
the specified file.
"""
# Get date of latest modification
stats = os.stat(src_file)
latest_mod = stats[8]
return latest_mod
def any_change_to_target_dicts(html_file, target_modules):
"""
Checks whether any of the target dict files
(e.g. head_table.py) was changed after the
creation of the HTML page.
Returns either:
- True:
One of the target dicts was modified
after the creation of the page or
the page has not been created.
- False:
The page is created and up-to-date with
the target dict files.
"""
try:
html_ch_date = get_latest_mod(html_file)
# Return True if the HTML page has not been created.
except FileNotFoundError:
return True
target_dict_ch_dates = [get_latest_mod(target_module)
for target_module in target_modules]
# Check if any target dict file was modified after the page.
true_or_false = any(target_ch_date > html_ch_date
for target_ch_date in target_dict_ch_dates)
return true_or_false
def writer(log_file_handle):
"""
A co-routine that writes what it receives to a file.
- log_file_handle:
A handle to the file the co-routine writes to.
The file must be opened in append mode.
"""
while True:
to_write = (yield)
log_file_handle.write(to_write)
def start_html_page(path_to_html):
"""
Creates the specified file or wipes its content if
it already exists.
Starts a co-routine that writes to this file, and sends the
co-routine the current time.
Returns the co-routine instance and a handle to
the opened file.
"""
# Create/wipe content of file.
open(path_to_html, 'w').close()
# Open file in append mode.
html_handle = open(path_to_html, 'a')
# Create and start co-routine.
html_writer = writer(html_handle)
html_writer.send(None)
html_writer.send('<!DOCTYPE html>')
# Return the co-routine and the file handle.
return html_writer, html_handle
def transform_html(path_to_html):
"""
Create the HTML file and write its content.
"""
html_writer, html_handle = start_html_page(path_to_html)
try:
process_target_dicts(html_writer, TARGET_DICTS)
finally:
html_writer.close()
html_handle.close()
def target_dicts_to_html():
"""
Transforms all target_dict files to HTML, writes
the result into file, and returns the
path to the file appended with the 'file://' protocol.
If the HTML page has already been created, and none of the
target dict files changed after it, the function simply
returns the path to the HTML file.
"""
if any_change_to_target_dicts(HTML_PATH, TARGET_DICT_PATHS):
transform_html(HTML_PATH)
return 'file://{}'.format(HTML_PATH)
| true |
bd981095c7002c30630a94a21999feeaca15c6df | Python | dtzqsy/czsc | /examples/signals_dev/merged/asi_up_dw_line_V230603.py | UTF-8 | 3,135 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | from collections import OrderedDict
import numpy as np
import pandas as pd
from czsc.connectors import research
from czsc import CZSC, check_signals_acc, get_sub_elements
from czsc.utils import create_single_signal
def asi_up_dw_line_V230603(c: CZSC, **kwargs) -> OrderedDict:
"""ASI多空分类,贡献者:琅盎
参数模板:"{freq}_D{di}N{n}P{p}_ASI多空V230603"
**信号逻辑:**
由于 SI 的波动性比较大,所以我们一般对 SI 累计求和得到 ASI 并捕
捉 ASI 的变化趋势。一般我们不会直接看 ASI 的数值(对 SI 累计求
和的求和起点不同会导致求出 ASI 的值不同),而是会观察 ASI 的变
化方向。我们利用 ASI 与其均线的交叉来产生交易信号,上穿/下穿均
线时买入/卖出
**信号列表:**
- Signal('日线_D1N30P120_ASI多空V230603_看多_任意_任意_0')
- Signal('日线_D1N30P120_ASI多空V230603_看空_任意_任意_0')
:param c: CZSC对象
:param kwargs: 参数字典
- :param di: 信号计算截止倒数第i根K线
- :param n: 获取K线的根数,默认为30
- :param p: 获取K线的根数,默认为20
:return: 信号识别结果
"""
di = int(kwargs.get("di", 1))
n = int(kwargs.get("n", 30))
p = int(kwargs.get("p", 120))
freq = c.freq.value
k1, k2, k3 = f"{freq}_D{di}N{n}P{p}_ASI多空V230603".split('_')
v1 = "其他"
if len(c.bars_raw) < di + p + 10:
return create_single_signal(k1=k1, k2=k2, k3=k3, v1=v1)
_bars = get_sub_elements(c.bars_raw, di=di, n=p)
close_prices = np.array([bar.close for bar in _bars])
open_prices = np.array([bar.open for bar in _bars])
high_prices = np.array([bar.high for bar in _bars])
low_prices = np.array([bar.low for bar in _bars])
o = np.concatenate([[close_prices[0]], close_prices[:-1]])
a = np.abs(high_prices - o)
b = np.abs(low_prices - o)
c = np.abs(high_prices - np.concatenate([[low_prices[0]], low_prices[:-1]])) # type: ignore
d = np.abs(o - np.concatenate([[open_prices[0]], open_prices[:-1]]))
k = np.maximum(a, b)
m = np.maximum(high_prices - low_prices, n)
r1 = a + 0.5 * b + 0.25 * d
r2 = b + 0.5 * a + 0.25 * d
r3 = c + 0.25 * d
r4 = np.where((a >= b) & (a >= c), r1, r2)
r = np.where((c >= a) & (c >= b), r3, r4)
if (r * k / m != 0).all():
si = 50 * (close_prices - c + (c - open_prices) + 0.5 * (close_prices - open_prices)) / (r * k / m)
else:
return create_single_signal(k1=k1, k2=k2, k3=k3, v1=v1)
asi = np.cumsum(si)
v1 = "看多" if asi[-1] > np.mean(asi[-p:]) else "看空"
return create_single_signal(k1=k1, k2=k2, k3=k3, v1=v1)
def main():
symbols = research.get_symbols('A股主要指数')
bars = research.get_raw_bars(symbols[0], '15分钟', '20181101', '20210101', fq='前复权')
signals_config = [
{'name': asi_up_dw_line_V230603, 'freq': '日线', 'di': 1},
]
check_signals_acc(bars, signals_config=signals_config) # type: ignore
if __name__ == '__main__':
main()
| true |
a84ef1c41d745a558fda05feea77e599b91a25b5 | Python | KartikTalwar/Puzzles | /CodeEval/Easy/24SumofIntegersfromFile.py | UTF-8 | 140 | 2.734375 | 3 | [] | no_license | import sys
test_cases = open(sys.argv[1], 'r')
total = 0
for test in test_cases:
total += int(test)
print total
test_cases.close()
| true |
40dfbf6bcd2939ca42c6e3f1c99a5d2203f461b9 | Python | idcrypt3/camp_2019_07_14 | /Aman/RailFenceCipher.py | UTF-8 | 2,800 | 3.625 | 4 | [] | no_license | import re
def cipher_encryption(message, key):
message = message.replace(" ", "")
railMatrix = []
for i in range(key):
railMatrix.append([])
for row in range(key):
for column in range(len(message)):
railMatrix[row].append('.')
row = 0
check = 0
for i in range(len(message)):
if check == 0:
railMatrix[row][i] = message[i]
row += 1
if row == key:
check = 1
row -= 1
elif check == 1:
row -= 1
railMatrix[row][i] = message[i]
if row == 0:
check = 0
row = 1
encryp_text = ""
for i in range(key):
for j in range(len(message)):
encryp_text += railMatrix[i][j]
encryp_text = re.sub(r"\.", "", encryp_text)
return encryp_text
def cipher_decryption(message, key):
message = message.replace(" ", "")
railMatrix = []
for i in range(key):
railMatrix.append([])
for row in range(key):
for column in range(len(message)):
railMatrix[row].append('.')
row = 0
check = 0
for i in range(len(message)):
if check == 0:
railMatrix[row][i] = message[i]
row += 1
if row == key:
check = 1
row -= 1
elif check == 1:
row -= 1
railMatrix[row][i] = message[i]
if row == 0:
check = 0
row = 1
ordr = 0
for i in range(key):
for j in range(len(message)):
temp = railMatrix[i][j]
if re.search("\\.", temp):
continue
else:
railMatrix[i][j] = message[ordr]
ordr += 1
for i in railMatrix:
for column in i:
print(column, end="")
print("\n")
check = 0
row = 0
decryp_text = ""
for i in range(len(message)):
if check == 0:
decryp_text += railMatrix[row][i]
row += 1
if row == key:
check = 1
row -= 1
elif check == 1:
row -= 1
decryp_text += railMatrix[row][i]
if row == 0:
check = 0
row = 1
decryp_text = re.sub(r"\.", "", decryp_text)
return decryp_text
def main():
message = input("What is your message? ")
key = int(input("How many rails do you want (keep it under 20)"))
choice = int(input("1. Encryption\n2. Decryption\nChoose(1,2): "))
if choice == 1:
print(cipher_encryption(message, key))
elif choice == 2:
print(cipher_decryption(message, key))
else:
print("Invalid Choice")
if __name__ == '__main__':
main() | true |
8356040211c3773dbf1a40872b55ccf986289ebf | Python | HUFGhani/Stockulus-Shift | /backend.py | UTF-8 | 2,431 | 2.796875 | 3 | [] | no_license | import os
import time
import random
def path_leaf(path):
filename = path.split("/")
root,_,_ = filename[-1].partition('.')
return root
if __name__ == "__main__":
filedir = "Data/"
files = []
for f in os.listdir(filedir):
if '.txt' in f:
files.append(filedir+f)
files = sorted(files)
equity = "AAPL"
balance = 10000.0
stockcounter = 1
while 1:
with open("Data/"+equity+".txt", "r") as f:
with open("stock", "w") as stockf:
print >> stockf, equity
print "STOCK", equity
for lineno, line in enumerate(f):
skip = False
for r in ['=', 'EXCHANGE']:
if r in line:
skip = True
if skip:
continue
date, close, high, low, _open, volume = line.strip().split(',')
with open("price", "w") as pricef:
print >> pricef, high
print "PRICE", high
with open("action", "r") as actionf:
action = actionf.read().strip()
print "ACTION", action
high = float(high)
if action == "BUY":
balance -= high
stockcounter += 1
if action == "SELL":
if stockcounter >= 1:
balance += high
stockcounter -= 1
if stockcounter < 0:
stockcounter = 0
with open("balance", "w") as balancef:
print >> balancef, balance
print "BALANCE", balance
if "SWITCH" in action:
equity = random.choice(files)
print equity
equity = path_leaf(equity)
print equity
balance += high * stockcounter
stockcounter = 0
time.sleep(5)
break
print "EQUITY", equity
time.sleep(5)
equity = random.choice(files)
print equity
equity = path_leaf(equity)
print equity
balance += high * stockcounter
stockcounter = 0
time.sleep(5) | true |
7e02cbcb10420442c186dc74cda9995bc84ee64c | Python | KienMN/30-days-of-python | /Day11/htmlmail.py | UTF-8 | 1,046 | 2.6875 | 3 | [] | no_license | from smtplib import SMTP, SMTPException
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
try:
host = "smtp.gmail.com"
port = 587
username = "example@gmail.com"
password = "password"
from_email = username
to_list = ["example@gmail.com"]
email_connection = SMTP(host, port)
email_connection.ehlo()
email_connection.starttls()
email_connection.login(username, password)
the_msg = MIMEMultipart("alternative")
the_msg['Subject'] = "Email Testing"
the_msg['From'] = from_email
the_msg['To'] = to_list[0]
plain_txt = "This is a greeting message"
html_txt = """\
<html>
<head>
</head>
<body>
<p>Hello,<br/>
This is a message made by <a href="https://www.google.com">KienMN</a>
</p>
</body>
</html>
"""
part_1 = MIMEText(plain_txt, "plain")
part_2 = MIMEText(html_txt, "html")
the_msg.attach(part_1)
the_msg.attach(part_2)
# print(the_msg.as_string())
email_connection.sendmail(from_email, to_list, the_msg.as_string())
email_connection.quit()
except:
print("Some error occured")
| true |
5b68ba7a8f9716ee170fda768f024556be197992 | Python | inwk6312winter2019/classroomtask-2-khyathinalluri | /task2.py | UTF-8 | 365 | 3.390625 | 3 | [] | no_license | #program to write a class function for ip addressess and their subnet masks
from random import getrandbits
import ipaddress
class ipaddressess():
def __init__(self,a,b):
self.a=ipaddress.IPv4Address(a)
self.b=b
def __str__(self):
print ("ipadress is :" ,self.a)
print("subnetmask is:" ,self.b)
return
a=getrandbits(32)
c=ipaddressess(a,32)
print(c)
| true |
373f5d9f792067d38131659004eaf47e89dcd793 | Python | mayankdaruka/Omelia | /server/spreadsheet.py | UTF-8 | 1,128 | 2.921875 | 3 | [] | no_license | import gspread
from oauth2client.service_account import ServiceAccountCredentials
from pprint import pprint
class Spreadsheet:
def __init__(self):
self.sheetId = None
self.worksheetId = None
self.authorize()
self.curr_gs = None
self.sheet = None
def authorize(self):
scope = [
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive'
]
creds = ServiceAccountCredentials.from_json_keyfile_name("creds.json", scope)
self.client = gspread.authorize(creds)
def set_sheet(self, sheetId):
if(sheetId != self.sheetId):
self.sheetId = sheetId
self.curr_gs = self.client.open_by_key(self.sheetId)
def set_worksheet(self, worksheetId):
if(worksheetId != self.worksheetId):
self.worksheetId = worksheetId
self.select_worksheet_by_gid()
def select_worksheet_by_gid(self):
for sheet in self.curr_gs.worksheets():
if sheet.id == int(self.worksheetId):
self.sheet = sheet
break | true |
5231eec6941b5f3082a187b11003ffd194f9166e | Python | cleitonpin/BotLIKE | /bot.py | UTF-8 | 2,277 | 2.859375 | 3 | [] | no_license | from selenium import webdriver
import os
import time
import configparser
class InstagramBot:
def __init__(self, username, password):
"""
Inicialates an instance of the instagramBot class. call the login method to athenticat
Args:
username:str: The instagram username for a user
password:str: The instagram apssword for a user
Areibutters:
driver:selenium.webdriver.Chrome: The Chromedriver that is used to automate browser actions
"""
self.username = username
self.password = password
self.base_url = 'https://www.instagram.com'
self.driver = webdriver.Chrome('chromedriver.exe')
self.login()
def login(self):
self.driver.get(f'{self.base_url}/accounts/login/')
self.driver.find_element_by_name('username').send_keys(self.username)
self.driver.find_element_by_name('password').send_keys(self.password)
self.driver.find_elements_by_xpath("//div[contains(text(), 'Entrar')]")[0].click()
time.sleep(2)
def nav_user(self, user):
self.driver.get('{}/{}'.format(self.base_url, user))
def seguir_user(self, user):
self.nav_user(user)
follow_button = self.driver.find_elements_by_xpath("//button[contains(text(), 'Seguir')]")[0]
follow_button.click()
# def seguir_follow_action(self, user, unfollow=False):
# if unfollow == True:
# action_button_text = 'Seguindo'
def unfollow_user(self, user):
self.nav_user(user)
unfollow_button = self.driver.find_elements_by_xpath("//button[contains(text(), 'Seguindo')]")[0]
unfollow_button.click()
time.sleep(1)
unfollow = self.driver.find_elements_by_xpath("//button[contains(text(), 'Deixar de seguir')]")[0]
unfollow.click()
if __name__ == '__main__':
# config_path = './config.ini'
# cparser = configparser.ConfigParser()
# cparser.read(config_path)
# username = cparser['AUTH']['USERNAME']
# password = cparser['AUTH']['PASSWORD']
ig_bot = InstagramBot('username', 'password')
#ig_bot.nav_user('garyvee')
ig_bot.seguir_user('maaurotony')
| true |
6b7747e51f1221aaa154961422b6e919d9ed4be9 | Python | dev-nullified/blackjack-hmwk | /tests/card_tests.py | UTF-8 | 5,293 | 2.9375 | 3 | [] | no_license | import unittest
import sys
# sys.path.append('../')
from src.card import Card
class TestCardCreation(unittest.TestCase):
# cardVal = 3
# cardFace = str(3)
# cardAce = False
# testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
def test_card_creation_cardValue(self):
cardVal = 3
cardFace = str(3)
cardAce = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertEqual(testCard.__value__, cardVal)
def test_card_creation_cardFace(self):
cardVal = 3
cardFace = str(3)
cardAce = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertEqual(testCard.__face__, cardFace)
def test_card_creation_cardAce_when_false(self):
cardVal = 3
cardFace = str(3)
cardAce = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertFalse(testCard.__is_ace__)
def test_card_creation_cardAce_when_True(self):
cardVal = 11
cardFace = 'A'
cardAce = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertTrue(testCard.__is_ace__)
def test_card_creation_cardAce_when_false(self):
cardVal = 3
cardFace = str(3)
cardAce = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertFalse(testCard.__is_ace__)
def test_card_creation_cardAce_is_false_by_default(self):
cardVal = 3
cardFace = str(3)
cardAce = False
testCard = Card(value=cardVal, face=cardFace)
self.assertFalse(testCard.__is_ace__)
def test_card_creation_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
self.assertTrue(testCard.__is_hidden__)
def test_card_creation_not_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
self.assertFalse(testCard.__is_hidden__)
def test_card_creation_not_hidden_by_default(self):
cardVal = 11
cardFace = 'A'
cardAce = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertFalse(testCard.__is_hidden__)
class TestCardHiddenFunction(unittest.TestCase):
def test_unhide_when_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
testCard.unhide()
self.assertFalse(testCard.__is_hidden__)
def test_unhide_when_not_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
testCard.unhide()
self.assertFalse(testCard.__is_hidden__)
def test_hide_when_not_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
testCard.hide()
self.assertTrue(testCard.__is_hidden__)
def test_hide_when_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
testCard.hide()
self.assertTrue(testCard.__is_hidden__)
def test_is_hidden_when_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
self.assertTrue(testCard.is_hidden())
def test_is_hidden_when_not_hidden(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
testCard.__is_hidden__ = cardHidden
self.assertFalse(testCard.is_hidden())
class TestCardFaceAndValueFunction(unittest.TestCase):
def test_card_face(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertEqual(testCard.getFace(), cardFace)
def test_card_value(self):
cardVal = 9
cardFace = str(9)
cardAce = False
cardHidden = True
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertEqual(testCard.getValue(), cardVal)
def test_card_is_ace(self):
cardVal = 11
cardFace = 'A'
cardAce = True
cardHidden = False
testCard = Card(value=cardVal, face=cardFace, ace=cardAce)
self.assertEqual(testCard.is_ace(), cardAce)
if __name__ == "__main__":
unittest.main() | true |
e6feb90e9a26a080af600229b3dfbf2a58ef9d7e | Python | cloudstrife9999/bogus-ocsp-responder | /http_dissector.py | UTF-8 | 3,653 | 3.09375 | 3 | [] | no_license | from ocsp_dissector import OCSPDissector
class HTTPDissector:
def __init__(self, raw_data: bytes) -> None:
if raw_data is None:
raise ValueError("Bad HTTP raw data")
self.__raw_data: bytes = raw_data
self.__next_position_to_parse = 0
self.__method: str = None
self.__resource: str = None
self.__http_version: str = None
self.__headers: dict = {}
self.__ocsp_raw_request: bytes = None
self.__ocsp_dissector: OCSPDissector = None
def parse(self) -> None:
first_line: str = self.__get_first_line()
self.__parse_first_line(line=first_line)
self.__parse_headers()
self.__ocsp_raw_request = self.__raw_data[self.__next_position_to_parse:]
self.__check_ocsp_request_length()
self.__parse_ocsp_request()
def __check_ocsp_request_length(self) -> None:
if "Content-Length" in self.__headers:
length = int(self.__headers["Content-Length"])
if len(self.__ocsp_raw_request) < length:
raise ValueError("The OCSP request is shorter than expected.")
elif len(self.__ocsp_raw_request) > length:
raise ValueError("The OCSP request is longer than expected.")
else:
print("The OCSP request is %d characters long, which matches what expected.\n" % length)
def dump(self) -> None:
if self.__method is None:
raise ValueError("Nothing to visualize!")
else:
s: str = "Dissected HTTP message:\n"
s += " Method: %s\n" % self.__method
s += " Requested resource: %s\n" % self.__resource
s += " HTTP version: %s\n" % self.__http_version
s += " HTTP request headers:\n"
s += self.__dump_headers()
s += self.__ocsp_dissector.dump()
print(s)
def __dump_headers(self) -> str:
s: str = ""
for header_name, header_value in self.__headers.items():
s += " " + header_name + ": " + header_value + "\n"
return s
def __parse_ocsp_request(self) -> None:
self.__ocsp_dissector = OCSPDissector(self.__ocsp_raw_request)
self.__ocsp_dissector.parse()
def __get_first_line(self) -> str:
line: list = []
for byte in self.__raw_data:
if byte != 13:
line.append(byte)
else:
break
self.__next_position_to_parse = len(line) + 2 # +2 is to exclude \r\n
return "".join(chr(c) for c in line)
def __parse_first_line(self, line: str) -> None:
tokens: list = line.split(" ")
method_candidate = tokens[0]
resource_candidate = tokens[1]
version_candidate = tokens[2]
if method_candidate not in ["GET", "POST"]:
raise ValueError("Bad request method!")
if version_candidate != "HTTP/1.1":
raise ValueError("Bad HTTP version!")
self.__method = method_candidate
self.__resource = resource_candidate
self.__http_version = version_candidate
def __parse_headers(self) -> None:
tokens: bytes = self.__raw_data[self.__next_position_to_parse:].split(b"\r\n")[:-2]
self.__next_position_to_parse = self.__raw_data.find(b"\r\n\r\n") + 4
for token in tokens:
token: bytes = token
header_name, header_value = str(token, "utf-8").split(": ")
if header_name == "" or header_value == "":
raise ValueError("Bad header!")
else:
self.__headers[header_name] = header_value
| true |
1e61aab3212096a4b48d1f0ac582a7e454cc6fae | Python | yeruvamaheswar/python-challenge | /PyPoll/main.py | UTF-8 | 3,255 | 3.546875 | 4 | [] | no_license | # This will allow us to create file paths across operating systems.
import os
# Module for reading CSV files.
import csv
# Path where the Input CSV file stored.
path = "/Users/mr7macbookpro/Documents/DAV BC/HomeWork/3. PythonChallenge/python-challenge/PyPoll/Resources"
# Joining the path to get the full path of csv file.
CsvPath = os.path.join(path,"election_data.csv")
#Intializing Variables.
ColVoterID = []
ColCounty = []
ColCandidate=[]
PercntageList =[]
NoOfKhan =0
NoOfCorrey =0
NoOfLi =0
NoOfTooley =0
#Opening CSV file to read the items inside.
with open(CsvPath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents.
csvreader = csv.reader(csvfile, delimiter=',')
#Skiping the CSV coloumn headers.
next(csvreader)
# Read each row of data after the header in to a list variables.
for row in csvreader:
ColVoterID.append(int(row[0]))
ColCounty.append(row[1])
ColCandidate.append(row[2])
#Assignining all the candidates to a list.
Candidates=["Khan","Correy","Li","O'Tooley"]
#Getting how many votes each candidate gets.
for EveryCan in ColCandidate:
if EveryCan == Candidates[0]:
NoOfKhan += 1
elif EveryCan == Candidates[1]:
NoOfCorrey += 1
elif EveryCan == Candidates[2]:
NoOfLi += 1
elif EveryCan == Candidates[3]:
NoOfTooley += 1
#All No of votes are created in to list variable.
NoOfVotes = [NoOfKhan,NoOfCorrey,NoOfLi,NoOfTooley]
#Zipping together Candidates and there respective votes in to a dictionary.
ZipCandnVotesAsDic = dict(zip(Candidates,NoOfVotes))
#This gets the Key of a Max Value.
KeyOfMaxVoter = max(ZipCandnVotesAsDic,key=ZipCandnVotesAsDic.get)
#Getting all the voter percentages in to a list variable.
for everyNumofVotes in NoOfVotes:
PercntageList.append(round(((everyNumofVotes/sum(NoOfVotes))*100),2))
#Printing all the Results on Terminal.
print("Election Results")
print("-------------------------")
print(f"Total Votes: {len(ColVoterID)-1}")
print("-------------------------")
for x in range(len(Candidates)):
print(f"{Candidates[x]}: {PercntageList[x]}% ({NoOfVotes[x]})")
print("-------------------------")
print(f"Winner: {KeyOfMaxVoter}")
print("-------------------------")
# Path where the Output CSV file stored.
OutputPath= "/Users/mr7macbookpro/Documents/DAV BC/HomeWork/3. PythonChallenge/python-challenge/PyPoll/analysis"
# Joining the path to get the full path of csv file.
ResultsTextFile = os.path.join(OutputPath,"VoterResults.txt")
#Printing all the results on to Text file.
with open(ResultsTextFile,"w") as file:
file.write("Election Results")
file.write("\n")
file.write("----------------------------")
file.write("\n")
file.write(f"Total Votes: {len(ColVoterID)}")
file.write("\n")
file.write("-------------------------")
file.write("\n")
for x in range(len(Candidates)):
file.write(f"{Candidates[x]}: {PercntageList[x]}% ({NoOfVotes[x]})")
file.write("\n")
file.write("-------------------------")
file.write("\n")
file.write(f"Winner: {KeyOfMaxVoter}")
file.write("\n")
file.write("-------------------------")
| true |
8ee490605de92662a16ecec39ebbed79917fe92e | Python | ddubbu/AI-Speaker-Recognition | /follow_git_[dydtjr1128]/3. Record_wav_file_for_input/record_source.py | UTF-8 | 2,446 | 2.84375 | 3 | [] | no_license | import pyaudio # 마이크를 사용하기 위한 라이브러리
import wave
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100 # 비트레이트 설정
CHUNK = int(RATE / 10) # 버퍼 사이즈 1초당 44100비트레이트 이므로 100ms단위
RECORD_SECONDS = 5 # 녹음할 시간 설정
# WAVE_OUTPUT_FILENAME = "record.wav"
def record(who):
print("record start!")
# 파일명 조심 : 파일명에 콜론 들어가면 안됨
now = datetime.today().strftime('%Y-%m-%d-%H-%M-%S-')
WAVE_OUTPUT_FILENAME = "data_record/" + now + str(who) + ".wav"
print(WAVE_OUTPUT_FILENAME)
p = pyaudio.PyAudio() # 오디오 객체 생성
stream = p.open(format=FORMAT, # 16비트 포맷
channels=CHANNELS, # 모노로 마이크 열기
rate=RATE, #비트레이트
input=True,
# input_device_index=1,
frames_per_buffer=CHUNK)
# CHUNK만큼 버퍼가 쌓인다.
print("Start to record the audio.")
frames = [] # 음성 데이터를 채우는 공간
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
#지정한 100ms를 몇번 호출할 것인지 10 * 5 = 50 100ms 버퍼 50번채움 = 5초
data = stream.read(CHUNK)
frames.append(data)
print("Recording is finished.")
stream.stop_stream() # 스트림닫기
stream.close() # 스트림 종료
p.terminate() # 오디오객체 종료
# WAVE_OUTPUT_FILENAME의 파일을 열고 데이터를 쓴다.
wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
# 우선, wave plot 생략
# spf = wave.open(WAVE_OUTPUT_FILENAME,'r')
#
# signal = spf.readframes(-1)
# signal = np.fromstring(signal, dtype=np.int16)
#
# #시간 흐름에 따른 그래프를 그리기 위한 부분
# Time = np.linspace(0,len(signal)/RATE, num=len(signal))
#
# fig1 = plt.figure()
# plt.title('Voice Signal Wave...')
# #plt.plot(signal) // 음성 데이터의 그래프
# plt.plot(Time, signal)
# plt.show()
# plt.close(fig1) # 닫아줘야하는 번거로움
print("record end!!")
return WAVE_OUTPUT_FILENAME
| true |
0b5264c2e5915c1b540c0b80e516bfd3c42dcf9f | Python | vmalloc/json_rest | /json_rest/raw.py | UTF-8 | 334 | 2.953125 | 3 | [] | no_license | class Raw(object):
def __init__(self, data):
super(Raw, self).__init__()
self.data = data
def __repr__(self):
return "RAW(%r)" % (self.data,)
def __eq__(self, other):
return isinstance(other, Raw) and other.data == self.data
def __ne__(self, other):
return not (self == other)
| true |
84de7cfee835ba5855cc831017b336e2959f5b04 | Python | Aasthaengg/IBMdataset | /Python_codes/p02789/s703743715.py | UTF-8 | 288 | 3.265625 | 3 | [] | no_license | #入力:N(int:整数)
def input1():
return int(input())
#入力:N,M(int:整数)
def input2():
return map(int,input().split())
#入力:[n1,n2,...nk](int:整数配列)
def input_array():
return list(map(int,input().split()))
N,M=input2()
if N==M:
print("Yes")
else:
print("No")
| true |
ee6995e66d84d1b51e17ad3b1ca17fcc5cf3812d | Python | zn-jba/Python-TextBasedBrowser | /Problems/Multiplying lists/main.py | UTF-8 | 89 | 2.703125 | 3 | [] | no_license | def my_product(list_1, list_2):
return list(map(lambda a, b: a * b, list_1, list_2))
| true |
a9f10fdecd58ecb05dac3191918104113b79fe52 | Python | aldiwa/AldiwaLPC4337-Serial | /app.py | UTF-8 | 3,424 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# Programa : Aldiwa Serial
# Autor : Maicon Gabriel Schmitz
# Site : http://www.aldiwa.com.br
# Carrega as bibliotecas
import time, sys, threading, datetime
import serial
import requests
import logging
import pprint
import re
from ConfigParser import SafeConfigParser
# Carrega as configuracoes
config = SafeConfigParser()
config.read('.env')
# Arquivo de log
arquivo_log = config.get('arquivo_log', 'arquivo')
logging.basicConfig(level=logging.DEBUG, filename=arquivo_log)
url_log = config.get('api_local', 'url_log')
url_api = config.get('api_local', 'url_api')
# Funcionalidade de log
def logar(sensor, mensagem):
retorno = requests.post(url=url_log.format(sensor, mensagem))
print retorno
# Funcionalidade de Chamada da API
def chamadaAPI(urlchamada, secao, sensor, conteudo):
# Grava os dados de temperatura
try:
retorno = requests.post(url=urlchamada.format(secao, sensor, conteudo))
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
raise Exception("Erro: Tempo excedido")
except requests.exceptions.TooManyRedirects:
# Tell the user their URL was bad and try a different one
raise Exception("Erro: Muitos redirecionamentos")
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
raise Exception("Erro: Catastrofico")
print e
sys.exit(1)
# Configura a conexão com porta serial
conexaoSerial = serial.Serial()
conexaoSerial.port = '/dev/tty.usbserial'
conexaoSerial.baudrate = 115200
conexaoSerial.timeout = 1
# Conecta na porta serial
conexaoSerial.open()
conexaoSerial.isOpen()
while 1:
try:
# Captura os dados enviados pelo dispositivo
dados = conexaoSerial.readline()
# Remove as quebras de linha
dados = dados.rstrip('\r\n')
# Se a string possui os delimitadores
if dados.startswith('#') and dados.endswith('#'):
# Remove os delimitadores da string
dados = re.sub('[#]', '', dados)
# Obtem os dados
tipo = dados.split(";")[0]
sensor = dados.split(";")[1]
dataatual = dados.split(";")[2]
valor = dados.split(";")[3]
# Testa o tipo dos valores
if tipo == "0":
print 'uptime'
elif tipo == "1":
if sensor == "1":
# Grava os dados de fluxo de agua
chamadaAPI(url_api, 'fluxoagua', sensor, valor)
print 'fluxo'
print valor
print dataatual
else:
# Grava os dados de vazao de agua
chamadaAPI(url_api, 'vazaoagua', 1, valor)
print 'possui vazao de agua'
print valor
print dataatual
elif tipo == "2":
# Grava os dados de temperatura
chamadaAPI(url_api, 'temperatura', sensor, valor)
elif tipo == "3":
# Grava os dados de umidade
chamadaAPI(url_api, 'umidade', sensor, valor)
else:
print 'tipo nao implementado'
# Percorre os valores da string
except KeyboardInterrupt:
print "\nFechando..."
conexaoSerial.close()
| true |
7c6816483673eea6dda1f49dcf0d05a6168e549d | Python | thehacker-4chan/boobsnail | /excel4lib/sheet/cell.py | UTF-8 | 3,146 | 3.671875 | 4 | [
"MIT"
] | permissive | import string
class CellReferenceStyle:
'''
Excel reference style constants. Represents Excel references styles.
Used by Cell object.
'''
RC_STYLE = 1
A1_STYLE = 2
class Cell(object):
'''
Represents Excel cell.
This class stores address of cell (`x`,`y`) and `value`.
- `x` - represents column number;
- `y` - represents row number.
'''
def __init__(self, x, y, value="", tag=""):
# Columns
self.x = x
# Rows
self.y = y
# Cell value
self.value = value
# Tags are used to group cells
self.tag = tag
# Default R1C1 reference style
self.reference_style = CellReferenceStyle.RC_STYLE
# Characters representing ROW and COLUMN in RC_STYLE
self.row_character = "R"
self.col_character = "C"
# If true then cell will not be placed in worksheet.
self.block_add = False
def get_cell_address(self, row_char=None, col_char=None):
'''
Returns cell address with reference style defined by `reference_style` property.
If `reference_style` is equal to `RC_STYLE` then `row_char` and `col_char` are used as ROW and COLUMN characters.
If `row_char` or `col_char` are None then `row_character` and `col_character` properties are used as ROW and COLUMN characters.
By default 'R' and 'C' characters are used to represent row and column.
:param row_char: character representing ROW
:param col_char: character representing COLUMN
:return: string representing cell address
'''
if not row_char:
row_char = self.row_character
if not col_char:
col_char = self.col_character
if self.reference_style == CellReferenceStyle.RC_STYLE:
return "{}{}{}{}".format(row_char, self.y, col_char, self.x)
else:
return "{}{}".format(self.x, self.y)
def get_address(self):
'''
Returns cell address with reference style defined by `reference_style` property.
:return: string representing cell address
'''
return self.get_cell_address()
def get_column_letter(self):
'''
Computes and returns column address in `A1_STYLE`.
:return: column address in A!_STYLE
'''
r = ""
temp = self.x
while temp > 0:
b = (temp - 1)%26
r = chr(65 + b) + r
temp = int((temp - b)/26)
return r
def __str__(self):
return self.value
def __getitem__(self, subscript):
if isinstance(subscript, slice):
return str(self)[subscript.start : subscript.stop : subscript.step]
else:
return str(self)[subscript]
def get_length(self):
'''
Returns length of cell `value`.
:return: int representing length of cell `value`
'''
return len(str(self))
def __len__(self):
return self.get_length()
def __add__(self, other):
return str(self) + other
def __radd__(self, other):
return other + str(self) | true |
88ab543f4e088617da6dc6ef0e31f2158dc2caa2 | Python | Gers2017/glitch | /shell.py | UTF-8 | 195 | 2.59375 | 3 | [
"MIT"
] | permissive | from glitch import run
while True:
text = input("glitch >>> ")
if text == "exit()":
break
result, errStr = run(text)
if errStr :
print(errStr)
break
else: print(result)
| true |
69dcd03f3ce1a3ad6a879f74bd275996a9e78a95 | Python | Zichen-Yan/DRL-Repo-Pytorch- | /Distributed-RL/ray/PPO/storage.py | UTF-8 | 1,076 | 2.5625 | 3 | [] | no_license | import ray
@ray.remote
class SharedStorage(object):
def __init__(self, agent):
self.update_counter = 0
self.interaction_counter = 0
self.sample_counter = 0
# create actor based on config details
self.agent = agent
self.evaluation_reward = {}
def get_weights(self):
return self.agent.get_weights()
def set_weights(self, weights):
return self.agent.set_weights(weights)
def add_update_counter(self):
self.update_counter += 1
def get_update_counter(self):
return self.update_counter
def add_sample_counter(self):
self.sample_counter += 1
def get_sample_counter(self):
return self.sample_counter
def reset_sample_counter(self):
self.sample_counter = 0
# def set_eval_reward(self, step, update_number, rewards):
# self.evaluation_reward[update_number] = rewards
def add_interactions(self, steps):
self.interaction_counter += steps
def get_interactions(self):
return self.interaction_counter
| true |
7abe341659fd249f77440546574abe0db321d442 | Python | ZiJianZhao/Couplet-Machine | /couplet_website/filter.py | UTF-8 | 5,812 | 2.5625 | 3 | [] | no_license | #-*- coding:utf-8 -*-
import re, os, sys, argparse, logging, collections
import codecs
import math
import random
from collections import defaultdict
import pypinyin
from pypinyin import pinyin
def read_chaizi(mode = 0):
'''mode: 0, 检索;1, 生成 '''
if mode == 0:
filename=u'/home/zjz17/couplet/检索拆字联.txt'
else:
filename=u'/home/zjz17/couplet/生成拆字联.txt'
with codecs.open(filename,encoding='utf-8') as f:
lines = f.readlines()
dic = {}
for line in lines:
line = line.strip()
if len(line) == 0:
continue
line_list = line.split()
assert len(line_list) == 3
dic[line_list[2]] = [line_list[0], line_list[1]]
return dic
def get_chaizi_dict(dic, word2idx):
he2chai_n = {} # 不重复
chai2he_n = defaultdict(list) # 不重复
he2chai_c = {} # 重复
chai2he_c = defaultdict(list) # 重复
for key in dic:
try:
index = word2idx.get(key)
index0 = word2idx.get(dic[key][0])
index1 = word2idx.get(dic[key][1])
if index0 == index1:
he2chai_c[index] = [(index0, index1)]
chai2he_c[index0].append((index1, index))
else:
he2chai_n[index] = [(index0, index1)]
chai2he_n[index0].append((index1, index))
except:
continue
return he2chai_c, chai2he_c, he2chai_n, chai2he_n
def get_chaizi(string_list, dic):
'''string_list is of type list '''
reverse_string_list = string_list[len(string_list)-1:None:-1]
chaizi = [None for i in range(len(string_list))]
for i, word in enumerate(string_list):
if chaizi[i] is None and word in dic:
try:
if dic[word][0] != dic[word][1]:
index0 = string_list.index(dic[word][0])
index1 = string_list.index(dic[word][1])
else:
index0 = string_list.index(dic[word][0])
index1 = len(string_list) - reverse_string_list.index(dic[word][1]) - 1
except:
continue
if index0 != -1 and index1 != -1 and index0 != index1:
begin = min([index0, index1, i])
end = max([index0, index1, i])
[mid] = list(set([index0, index1, i]) - set([begin, end]))
for index in [index0, index1, i]:
if index == begin:
chaizi[index] = ['B', begin, mid, end] # this word is the first occur word
elif index == end:
chaizi[index] = ['E', begin, mid, end] # this word is the last occur word
else:
chaizi[index] = ['I', begin, mid, end] # this word is the inside occur word
if string_list[index0] == string_list[index1]:
chaizi[index0].append('CC') # first c represents chai fen zi, last c represents chong fu
chaizi[index1].append('CC')
chaizi[i].append('HC') # this word is the he bing zi
else:
chaizi[index0].append('CN')
chaizi[index1].append('CN')
chaizi[i].append('HN')
return chaizi
def get_repetations(string_list):
'''string_list is of type list '''
repetations = []
dic = {}
for i in range(len(string_list)-1, -1, -1):
dic[string_list[i]] = i
for i in range(len(string_list)):
repetations.append(dic[string_list[i]])
return repetations
def get_rhyme(string_list):
''' string_list is of type list'''
string_list = ''.join(string_list)
rhyme = pinyin(string_list, style=pypinyin.TONE2)
result = []
for i in range(len(rhyme)):
if string_list[i] == u'。' or string_list[i] == u',':
result.append(3)
continue
word = rhyme[i][0]
lis = re.findall(r'\d+', word)
if len(lis) == 0:
tone = 1
else:
tone = int(lis[0])
if tone == 1 or tone == 2:
result.append(1)
elif tone == 3 or tone == 4:
result.append(2)
else:
result.append(0)
if len(result) != len(string_list):
print string_list
raw_input()
return result
def rescore(string_list, results):
res = []
string_list = string_list.strip().split()
# remove the error of length
new_results = []
for line in results:
sent = line[1].strip().split()
if len(sent) != len(string_list):
continue
new_results.append((line[0], sent))
results = new_results
# make the punctuations same
new_results = []
for line in results:
sent = line[1]
flag = True
for i in range(len(string_list)):
if string_list[i] == u',' or string_list[i] == u'。':
if sent[i] != string_list[i]:
flag = False
break
else:
if sent[i] == u',' or sent[i] == u'。':
flag = False
break
if flag:
new_results.append((line[0], sent))
results = new_results
# make sure the repetations same
new_results = []
string_list_repetation = get_repetations(string_list)
for line in results:
sent = line[1]
sent_repetation = get_repetations(sent)
if string_list_repetation == sent_repetation:
new_results.append((line[0], sent))
results = new_results
# make sure the rhyme suitable
final = [(score, ''.join(sent)) for (score, sent) in results]
# return the result
return final
| true |
20ccc11986b529559c5033d6e96c52f8ff443a55 | Python | Anoosh101/ClassroomHelper | /activeUserClass.py | UTF-8 | 1,812 | 2.9375 | 3 | [] | no_license | ###########################################################################
#
# Class used to hold the information of the actual logged on user details.
# Will be deleted on program close.
#
###########################################################################
from dbHandler import *
class activeUserClass(object):
#class-wide shared variables
crDB = DbConn("db/crDB.db")
c = crDB.cursor
# records from 'student' table
# id, studentID, studentGender, studentSurname, studentName, studentEmail, studentPassword
# records from 'teacher' table
# id, teacherID, teacherTitle, teacherSurname, teacherName, teacherEmail, teacherPassword
userType = ""
userID = ""
userName = ""
userSurname = ""
userEmail = ""
# def __init__(self, uid):
# self.userID = uid
# if self.userID[0:2].isnumeric():
# self.userType = "student"
# if self.userID[0:2] == "st":
# self.userType = "teacher"
def setUID(self, uid):
self.userID = uid
if self.userID[0:2].isnumeric():
self.userType = "student"
if self.userID[0:2] == "st":
self.userType = "teacher"
self.getUserData()
def getUserData(self):
if self.userType == "student":
self.c.execute("SELECT studentSurname, studentName FROM student WHERE studentID ='" + self.userID + "'")
if self.userType == "teacher":
self.c.execute("SELECT teacherSurname, teacherName FROM teacher WHERE teacherID ='" + self.userID + "'")
fetchedData = self.c.fetchone()
self.userName = fetchedData[0]
self.userSurname = fetchedData[1]
| true |
f6df84d06e5e6c722512df1e98d1bf10082c6571 | Python | TayKK/ICT1008T2-Project-2020 | /mrt.py | UTF-8 | 10,047 | 2.53125 | 3 | [] | no_license | import osmnx as ox
import folium as fo
import geopandas as gpd
import heapq as hq
import pandas as pd
import json as js
import networkx as nx
import numpy as np
import geopandas as gpd
from pandas import json_normalize
import json
import os
import shapely
# read the csv the csv file to get all the station name and coordinates in the east loop
df_east = pd.read_csv('MRT/MRT-EAST.csv')
df_east['geometry'] = df_east['geometry'].apply(shapely.wkt.loads)
geo_df_east = gpd.GeoDataFrame(df_east, crs="EPSG:4326", geometry='geometry')
# read the csv the csv file to get all the station name and coordinates in the west loop
df_west = pd.read_csv('MRT/MRT-WEST.csv')
df_west['geometry'] = df_west['geometry'].apply(shapely.wkt.loads)
geo_df_west = gpd.GeoDataFrame(df_west, crs="EPSG:4326", geometry='geometry')
class Mrt:
def __init__(self):
start_x = None
start_y = None
end_x = None
end_y = None
lastx = None
lasty = None
def MrtAlgo(self, x1, y1, x2, y2):
self.start_x = x1
self.start_y = y1
self.end_x = x2
self.end_y = y2
start_coordinate = (self.start_x, self.start_y)
end_coordinate = (self.end_x, self.end_y)
# Define the cordinates where the map will point to in folium.
centreCoordinate = (1.407937, 103.901702)
pm = fo.Map(location=centreCoordinate,
zoom_start=15, control_scale=True)
# plot the Start point and end point on the map with folium icon
fo.Marker(start_coordinate, popup="start", icon=fo.Icon(
color='red', icon='info-sign')).add_to(pm)
fo.Marker(end_coordinate, popup="end", icon=fo.Icon(
color='red', icon='info-sign')).add_to(pm)
# Read the punggol map
punggol = gpd.read_file('geojson/polygon-punggol.geojson')
polygon = punggol['geometry'].iloc[0]
# using Osmnx ro create a graph with nodes.
# mrt_station_response = ox.core.osm_net_download(
# polygon, infrastructure='node["railway"="station"]')
# mrt_station_Graph = ox.core.create_graph(
# mrt_station_response, retain_all=True)
mrt_station_Graph = ox.save_load.load_graphml("mrt.graphml")
mrt_station_Node, mrt_station_Edge = ox.graph_to_gdfs(
mrt_station_Graph)
# Define the name and osm id of the mrt station west and East Loop
mrt_west_stations = {1840734606: 'Sam Kee', 1840734600: 'Punggol Point', 1840734607: 'Samudera',
1840734598: 'Nibong', 1840734610: 'Sumang', 1840734608: 'Soo Teck', 213085056: 'Punggol'}
mrt_east_stations = {1840734592: 'Cove', 1840734597: 'Meridian', 1840734578: 'Coral Edge',
1840734604: 'Riviera', 1840734594: 'Kadaloor', 1840734599: 'Oasis', 1840734593: 'Damai', 213085056: 'Punggol'}
# Define Graph of the station with its osm id
graph = {213085056: [1840734593, 1840734592, 1840734608, 1840734606],
1840734593: [213085056, 1840734599],
1840734599: [1840734593, 1840734594],
1840734594: [1840734599, 1840734604],
1840734604: [1840734594, 1840734578],
1840734578: [1840734604, 1840734597],
1840734597: [1840734578, 1840734592],
1840734592: [1840734597, 213085056],
1840734608: [213085056, 1840734610],
1840734610: [1840734608, 1840734598],
1840734598: [1840734610, 1840734607],
1840734607: [1840734598, 1840734600],
1840734600: [1840734607, 1840734606],
1840734606: [1840734600, 213085056]
}
# using Osmnx to get the nearest nodes from the start and end cordinates
mrt_start_osmid = ox.geo_utils.get_nearest_node(
mrt_station_Graph, start_coordinate)
mrt_end_osmid = ox.geo_utils.get_nearest_node(
mrt_station_Graph, end_coordinate)
# using BFD algorithm to find the shortest MRT path that the user can take
def bfs_shortest_path(graph, start, end):
visited = []
queue = [[start]]
if start == end:
return 0
# traverse throug the graph and find its neighbour
while queue:
path = queue.pop(0)
node = path[-1]
if node not in visited:
neighbours = graph[node]
# if node neighbour is visited it will be marked as visited and it will be appended to the queue
for neighbour in neighbours:
short_route = list(path)
short_route.append(neighbour)
queue.append(short_route)
# stop when end of graph
if neighbour == end:
return short_route
visited.append(node)
return 0
# Displaying the station information and mark all the station in the route
def mrt_station_display(osm_df, east, west, route):
for station in route:
# with the use of OSMID to get the latitude and longtitude of the west LTR line
if station in west:
current_coord_lat = float(
osm_df[osm_df['osmid'] == station]['y'].values[0])
current_coord_long = float(
osm_df[osm_df['osmid'] == station]['x'].values[0])
print(west[station])
# plot the path of the MRt on the map with blue lines
fo.Marker([current_coord_lat, current_coord_long], popup=west[station], icon=fo.Icon(
color='blue', icon='info-sign')).add_to(pm)
else:
# if staion not in the west loop it will search the value from the east loop
# with the use of OSMID to get the latitude and longtitude of the west LTR line
current_coord_lat = float(
osm_df[osm_df['osmid'] == station]['y'].values[0])
current_coord_long = float(
osm_df[osm_df['osmid'] == station]['x'].values[0])
# plot the path of the MRt on the map with blue lines
fo.Marker([current_coord_lat, current_coord_long], popup=east[station], icon=fo.Icon(
color='blue', icon='info-sign')).add_to(pm)
print(east[station])
# loop throught the csv file and get allt he cordinatates that is needed to plot the graph from station to station
def mrt_route_display(east, east_geo, west, west_geo, route, fo_map):
result_df = pd.DataFrame(columns=west_geo.columns)
for i in range(len(route) - 1):
current_station, next_station = route[i], route[i + 1]
if current_station in west and next_station in west:
if ((west_geo['u'] == current_station) & (west_geo['v'] == next_station)).any():
row = west_geo[(west_geo['u'] == current_station)
& (west_geo['v'] == next_station)]
else:
row = west_geo[(west_geo['v'] == current_station)
& (west_geo['u'] == next_station)]
else:
if ((east_geo['u'] == current_station) & (east_geo['v'] == next_station)).any():
row = east_geo[(east_geo['u'] == current_station)
& (east_geo['v'] == next_station)]
else:
row = east_geo[(east_geo['v'] == current_station)
& (east_geo['u'] == next_station)]
result_df = result_df.append(row)
result_geo_df = gpd.GeoDataFrame(
result_df, crs="EPSG:4326", geometry='geometry')
# print the route connectiong the star node and the end nodes
fo.GeoJson(result_geo_df, style_function=lambda x: {
"color": "blue", "weight": "3"}, name="MRT").add_to(fo_map)
route = bfs_shortest_path(graph, mrt_start_osmid, mrt_end_osmid)
# if the mrt station start and end at the same staion show user that MRT is not needed
print("\n MRT Taken:")
if route != 0:
print(route)
mrt_station_display(mrt_station_Node, mrt_east_stations,
mrt_west_stations, route)
mrt_route_display(mrt_east_stations, geo_df_east,
mrt_west_stations, geo_df_west, route, pm)
# if start station is the same as the end station, print MRT not needed
# OSMID of Station
osmid_st = int(route[0])
osmid = int(route[-1])
self.firstx = mrt_station_Node[mrt_station_Node["osmid"]
== osmid_st]['y'].values[0]
self.firsty = mrt_station_Node[mrt_station_Node["osmid"]
== osmid_st]['x'].values[0]
self.lasty = mrt_station_Node[mrt_station_Node["osmid"]
== osmid]['x'].values[0]
self.lastx = mrt_station_Node[mrt_station_Node["osmid"]
== osmid]['y'].values[0]
else:
print("MRT is not needed!")
self.lasty = self.start_y
self.lastx = self.start_x
print("\n")
return pm
# self.last = (lastlong, lastlat)
# self.last = int(route[-1])
def getLastx(self):
return self.lastx
def getLasty(self):
return self.lasty
def getFirstx(self):
return self.firstx
def getFirsty(self):
return self.firsty
| true |
fdb9d899aa178acd71f6791d4e9b2bff91162e42 | Python | cstrahan/python_practice | /coding_bat/list_2/big_diff.py | UTF-8 | 712 | 3.71875 | 4 | [] | no_license | import pytest
# Given an array length 1 or more of ints, return the difference between the
# largest and smallest values in the array. Note: the built-in min(v1, v2) and
# max(v1, v2) functions return the smaller or larger of two values.
def big_diff(nums):
pass
@pytest.mark.parametrize(
"nums,expected",
[
([10, 3, 5, 6], 7),
([7, 2, 10, 9], 8),
([2, 10, 7, 2], 8),
([2, 10], 8),
([10, 2], 8),
([10, 0], 10),
([2, 3], 1),
([2, 2], 0),
([2], 0),
([5, 1, 6, 1, 9, 9], 8),
([7, 6, 8, 5], 3),
([7, 7, 6, 8, 5, 5, 6], 3),
],
)
def test_big_diff(nums, expected):
assert big_diff(nums) == expected
| true |