blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b787f8649d995938c0dd147dec09375d9d5b1c29 | Python | Satwik95/Coding-101 | /LeetCode/Top 100/medianOfSortedArrays.py | UTF-8 | 1,369 | 3.625 | 4 | [] | no_license | """
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
"""
import sys
class Solution:
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
if len(nums2)<len(nums1):
return self.findMedianSortedArrays(nums2,nums1)
start = 0
end = len(nums1)
while start<=end:
partX = int((start+end)/2)
partY = int((len(nums1)+len(nums2)+1)/2) - partX
maxLeftX = nums1[partX-1] if partX!=0 else -sys.maxsize
maxLeftY = nums2[partY-1] if partY!=0 else -sys.maxsize
minRightX = nums1[partX] if partX!=len(nums1) else sys.maxsize
minRightY = nums2[partY] if partY!=len(nums2) else sys.maxsize
if maxLeftX<=minRightY and maxLeftY<=minRightX:
if (len(nums1) + len(nums2))%2==0:
return float(max(maxLeftX,maxLeftY)+min(minRightX,minRightY))/2
else:
return float(max(maxLeftX,maxLeftY))
elif maxLeftX>minRightY:
end = partX-1
else:
start = partX+1
| true |
6e40a25cc8c995ec238932c8c3858044b356d416 | Python | KaspariK/leetcode-python | /group-anagrams-49/test_solution.py | UTF-8 | 517 | 3.015625 | 3 | [] | no_license | import unittest
from solution import Solution
class TestSolution(unittest.TestCase):
def test_groupAnagrams(self):
s = Solution()
result = s.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
self.assertEqual(
result, [["eat", "tea", "ate"], ["tan", "nat"], ["bat"]])
def test_groupAnagrams_empty_list(self):
s = Solution()
result = s.groupAnagrams([""])
self.assertEqual(result, [[""]])
if __name__ == "__main__":
unittest.main()
| true |
9bfa75d8cc2c20b9a27c90624cd3c92f0f15dc69 | Python | turash104/NLP | /evaluator/tmosharr/mateor_blue.py | UTF-8 | 7,938 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import argparse # optparse is deprecated
from itertools import islice # slicing for iterators
import numpy as np
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wdn
from nltk.stem import wordnet as wn
from nltk import pos_tag
from nltk import word_tokenize
from itertools import chain
import string
import nltk
parser = argparse.ArgumentParser(description='Evaluate translation hypotheses.')
parser.add_argument('-i', '--input', default='./data/hyp1-hyp2-ref', help='input file (default data/hyp1-hyp2-ref)')
parser.add_argument('-m', '--model', default='./model/ngram_model', help='input file (model)')
parser.add_argument('-n', '--num_sentences', default=None, type=int, help='Number of hypothesis pairs to evaluate')
parser.add_argument('-a', '--alpha', default=0.1, type=float, help='Number of hypothesis pairs to evaluate')
parser.add_argument('-b', '--beta', default=3.0, type=float, help='Number of hypothesis pairs to evaluate')
parser.add_argument('-g', '--gamma', default=0.5, type=float, help='Number of hypothesis pairs to evaluate')
opts = parser.parse_args()
cachedStopWords = stopwords.words("english")
wnlemma = wn.WordNetLemmatizer()
ngram_dict = {}
def wn_contains(word, ref):
synonyms = wdn.synsets(''.join(word))
synset = set(chain.from_iterable([word.lemma_names() for word in synonyms]))
refset = set([''.join(r) for r in ref])
result = bool(synset & refset)
return result # check intersection of sets
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[
j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def is_similar(word, ref):
synonyms = [word.lemma_names() for word in wdn.synsets(''.join(word))]
words = [''.join(r) for r in ref]
for syn in chain.from_iterable(synonyms):
for w in words:
if levenshtein(str(syn), str(w)) > 0.8:
return True
return False
def matches(h, e):
r = 0.0
p = 0.0
m = 0.0001
for w in h:
if w in e or wn_contains(w, e) or is_similar(w, e):
m += 1
r = float(m)/float(len(e)) if e else 0.0001
p = float(m)/float(len(h)) if h else 0.0001
f = 2 * p * r / (p + r)
return p, r, f
def str_matches(h, e):
p = 0.0
m = 0.0001
for w in h:
if w in e:
m += 1
p = float(m)/float(len(h)) if h else 0.0001
return p
def get_type_wordnet(tag):
ADJ, ADV, NOUN, VERB = 'a', 'r', 'n', 'v'
if tag.startswith('N'):
return NOUN
elif tag.startswith('V'):
return VERB
elif tag.startswith('J'):
return ADJ
elif tag.startswith('R'):
return ADV
return VERB
def sentences():
with open(opts.input) as f:
for pair in f:
value = [[],[],[]]
for i,sentence in enumerate(pair.split(' ||| ')):
sentence = sentence.decode('unicode_escape').encode('ascii', 'ignore').lower()
arr = [wnlemma.lemmatize(''.join(w[:1]), get_type_wordnet(''.join(w[1:])))
for w in pos_tag(word_tokenize(sentence))]
value[i] = str(" ".join(arr)).translate(None, string.punctuation).strip().split()
yield value
def get_model():
with open(opts.model) as f:
for pair in f:
yield tuple(pair.split(' ||| '))
def score_ngram(ngram):
if ngram in ngram_dict:
return ngram_dict[ngram]
else:
return -10
def fix_input(h):
h = [w.replace(""", '"') for w in h]
return h
def rsw(h):
return [word for word in h if word not in cachedStopWords]
def word_classifier(h):
words = []
func = []
cont = []
for word in h:
if word not in cachedStopWords:
cont.append(word)
else:
func.append(word)
words.append(word)
return words, func, cont
def rose(e, h1, h2, str_pos_e, str_pos_h1, str_pos_h2, pos_e, pos_h1, pos_h2,
e_words, e_func, e_cont, h1_words, h1_func, h1_cont, h2_words, h2_func, h2_cont, vc1, vc2):
score_cand1 = 0
score_cand2 = 0
for n in xrange(1, 5):
e_ngrams = [tuple(e[i:i + n]) for i in xrange(len(e) + 1 - n)]
h1_ngrams = [tuple(h1[i:i + n]) for i in xrange(len(h1) + 1 - n)]
h2_ngrams = [tuple(h2[i:i + n]) for i in xrange(len(h2) + 1 - n)]
(vc1[n - 1], vc1[n + 3], vc1[n + 7]) = matches(h1_ngrams, e_ngrams)
(vc2[n - 1], vc2[n + 3], vc2[n + 7]) = matches(h2_ngrams, e_ngrams)
for i in xrange(len(h1) + 1 - n):
score_cand1 += score_ngram(tuple(h1[i:i + n]))
for i in xrange(len(h2) + 1 - n):
score_cand2 += score_ngram(tuple(h2[i:i + n]))
# average of ngrams
vc1[12] = (vc1[0] + vc1[1] + vc1[2] + vc1[3]) / 4
vc2[12] = (vc2[0] + vc2[1] + vc2[2] + vc2[3]) / 4
vc1[13] = abs(len(e_words) - len(h1_words))
vc2[13] = abs(len(e_words) - len(h2_words))
vc1[14] = abs(len(e_func) - len(h1_func))
vc2[14] = abs(len(e_func) - len(h2_func))
vc1[15] = abs(len(e_cont) - len(h1_cont))
vc2[15] = abs(len(e_cont) - len(h2_cont))
vc1[16] = score_cand1 / 100
vc2[16] = score_cand2 / 100
for n in xrange(1, 5):
e_ngrams = [tuple(pos_e[i:i + n]) for i in xrange(len(pos_e) + 1 - n)]
h1_ngrams = [tuple(pos_h1[i:i + n]) for i in xrange(len(pos_h1) + 1 - n)]
h2_ngrams = [tuple(pos_h2[i:i + n]) for i in xrange(len(pos_h2) + 1 - n)]
(vc1[n + 16], vc1[n + 20], vc1[n + 24]) = matches(h1_ngrams, e_ngrams)
(vc2[n + 16], vc2[n + 20], vc2[n + 24]) = matches(h2_ngrams, e_ngrams)
for n in xrange(1, 5):
e_ngrams = [tuple(str_pos_e[i:i + n]) for i in xrange(len(str_pos_e) + 1 - n)]
h1_ngrams = [tuple(str_pos_h1[i:i + n]) for i in xrange(len(str_pos_h1) + 1 - n)]
h2_ngrams = [tuple(str_pos_h2[i:i + n]) for i in xrange(len(str_pos_h2) + 1 - n)]
vc1[n + 28] = str_matches(h1_ngrams, e_ngrams)
vc2[n + 28] = str_matches(h2_ngrams, e_ngrams)
return (vc1, vc2)
for words, count in get_model():
tp = tuple(words.split())
ngram_dict[tp] = float(count)
wt = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 2, 2, 2, 2, 2, 0.1, 0.1, 0.1,
0.4, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 2, 2, 2, 2, 2, 2, 2, 2]
for n, (h1, h2, e) in enumerate(islice(sentences(), opts.num_sentences)):
if n%500 == 0:
sys.stderr.write(str(n/500)+' percent\n')
h1 = fix_input(h1)
h2 = fix_input(h2)
e = fix_input(e)
str_pos_h1 = nltk.pos_tag(h1)
str_pos_h2 = nltk.pos_tag(h2)
str_pos_e = nltk.pos_tag(e)
pos_h1 = [tup[1] for tup in str_pos_h1]
pos_h2 = [tup[1] for tup in str_pos_h2]
pos_e = [tup[1] for tup in str_pos_e ]
h1_words, h1_func, h1_cont = word_classifier(h1)
h2_words, h2_func, h2_cont = word_classifier(h2)
e_words, e_func, e_cont = word_classifier(e)
vc1, vc2 = [0] * 33, [0] * 33
(vc1, vc2) = rose(e, h1, h2, str_pos_e, str_pos_h1, str_pos_h2, pos_e, pos_h1, pos_h2,
e_words, e_func, e_cont, h1_words, h1_func, h1_cont, h2_words, h2_func, h2_cont, vc1, vc2)
l1 = 0.0
l2 = 0.0
for i in range(len(wt)):
l1 += vc1[i] * wt[i]
l2 += vc2[i] * wt[i]
if l1 == l2:
print 0
elif l1 > l2:
print 1
else:
print -1
| true |
e40d3eb9791fb23b8cf6c834eacd72fa93204984 | Python | Entropie67/NSI-20202021 | /NSI 1/NSI1_forturtle_09092020.py | UTF-8 | 292 | 3.1875 | 3 | [] | no_license | from turtle import *
n = 10
m = 10
taille = 20
for i in range(n+1):
goto(n * taille, taille * i)
up()
goto(0, taille * (i+1))
down()
up()
goto(0, 0)
down()
for j in range(m+1):
goto(taille * j, taille * n)
up()
goto(taille * (j+1), 0)
down()
exitonclick() | true |
e0ea13542295f2b4a0cd56b9a75aea71eb8804e8 | Python | jimenbian/unittest | /Test.py | UTF-8 | 612 | 2.8125 | 3 | [] | no_license | # -*- coding: UTF-8 -*-
'''
Created on 2015年7月21日
@author: aohai.lb
'''
import TestClass
import unittest
class test(unittest.TestCase):
## 初始化
def setUp(self):
self.myclass=TestClass.TestClass()
pass
def testsum(self):
##如果sum函数输出错误,系那是test sum fail
self.assertEqual(self.myclass.sum(1,2),3,'test sum fail')
def testsub(self):
self.assertEqual(self.myclass.sub(),8,'test sub fail ')
def tearDown(self):
pass
if __name__=='__main__':
unittest.main()
| true |
b16c445d4804ec621374e2381a05bd1ee22b51ec | Python | aaronmorgenegg/cs5700 | /assn4/sudoku/sudoku_solver/strategies/naked_pair.py | UTF-8 | 1,716 | 2.875 | 3 | [] | no_license | from sudoku_solver.strategies.strategy import Strategy
class NakedPair(Strategy):
def __init__(self):
super().__init__()
self.type = "choice"
def _findChanges(self, sudoku_board, choices):
for row_x, row in enumerate(choices):
for row_y, choice_list in enumerate(row):
if len(choice_list) == 2:
self._checkRows(row_x, row_y, choices, choice_list)
self._checkColumns(row_x, row_y, choices, choice_list)
# self._checkBlocks(row_x, row_y, choices, choice_list)
return {}
def _checkRows(self, row_x, row_y, choices, choice_list):
size = len(choices)
for i in range(size):
if choice_list == choices[row_x][i] and i != row_y:
# found a naked pair
for j in range(size):
if j != row_y and j != i:
for choice in choice_list:
try:
choices[row_x][j].remove(choice)
except ValueError:
pass
return {}
def _checkColumns(self, row_x, row_y, choices, choice_list):
size = len(choices)
for i in range(size):
if choice_list == choices[i][row_y] and i != row_x:
# found a naked pair
for j in range(size):
if j != row_x and j != i:
for choice in choice_list:
try:
choices[j][row_y].remove(choice)
except ValueError:
pass
return {}
| true |
f77ec12a65508cf96a8a7413a4711b73184438c8 | Python | varshini2421/pre-emphasis | /librosa ellip.py | UTF-8 | 1,443 | 2.546875 | 3 | [] | no_license | import numpy as np
import scipy as sp
from scipy.io.wavfile import read
from scipy.io.wavfile import write
from scipy import signal
import matplotlib.pyplot as plt
import librosa
import soundfile as sf
(Frequency, array) = librosa.load('audio10.wav', sr= None)
len(Frequency)
plt.plot(Frequency)
plt.title('Original Signal Spectrum')
plt.xlabel('Frequency(Hz)')
plt.ylabel('Amplitude')
# plt.show()
# FourierTransformation = sp.fft(array)
#
# scale = sp.linspace(0, Frequency, len(array))
#
# plt.stem(scale[0:5000], np.abs(FourierTransformation[0:5000]), 'r')
# plt.title('Signal spectrum after FFT')
# plt.xlabel('Frequency(Hz)')
# plt.ylabel('Amplitude')
# # plt.show()
b,a = signal.ellip(6, 3, 50, 1000/(0.5*array), btype='highpass')
filteredSignal = signal.lfilter(b,a,Frequency)
plt.plot(filteredSignal) # plotting the signal.
plt.title('Highpass Filter')
plt.xlabel('Frequency(Hz)')
plt.ylabel('Amplitude')
# plt.show()
c,d = signal.ellip(5,3, 100, 380/(0.5*array), btype='lowpass') # ButterWorth low-filter
newFilteredSignal = signal.lfilter(c,d,filteredSignal) # Applying the filter to the signal
plt.plot(newFilteredSignal) # plotting the signal.
plt.title('Lowpass Filter')
plt.xlabel('Frequency(Hz)')
plt.ylabel('Amplitude')
# plt.show()
# filename = "output-ellip" + myFiles[i]
sf.write("ellipout.wav",newFilteredSignal, array) | true |
70df3f628914709fbf1304c96e0615707c94e32b | Python | Patryk9201/CodeWars | /Python/6kyu/one_plus_array_test.py | UTF-8 | 298 | 3.015625 | 3 | [] | no_license | import unittest
from one_plus_array import up_array
class TestList(unittest.TestCase):
def test_right(self):
self.assertEqual(up_array([2, 3, 9]), [2, 4, 0])
def test_false(self):
self.assertFalse(up_array([2, 3, 10]), None)
if __name__ == '__main__':
unittest.main() | true |
7a2a2b49880b2eee80c488b800e23179297d7959 | Python | maxpowel/sensorflow-server | /serial_proxy/messenger.py | UTF-8 | 2,472 | 3.078125 | 3 | [] | no_license | import serial
import struct
import command
class ProtocolBuffersSerial(object):
def __init__(self, port='/dev/ttyUSB0', baudrate=115200):
self.ser = serial.Serial(
port=port,
baudrate=baudrate
)
def send(self, data_object):
data = data_object.encode_to_bytes()
size = len(data)
self.ser.write(struct.pack("B", size))
self.ser.write(data)
def receive(self, data_object):
response_size = struct.unpack("B", self.ser.read())
data_object.parse_from_bytes(self.ser.read(response_size[0]))
def close(self):
self.ser.close()
class Nukebox(object):
def __init__(self):
self.ser = ProtocolBuffersSerial()
self.c = command.Command()
self.status = command.Status()
def _command_query(self, command_number):
self.c.command = command_number
self.ser.send(self.c)
self.ser.receive(self.status)
if self.status.code != 0:
raise Exception("Command not found")
def led_color(self, red=0, green=0, blue=0):
self._command_query(0)
led = command.RgbLed()
led.red = red
led.green = green
led.blue = blue
self.ser.send(led)
def digital_write(self, pin, value):
self._command_query(2)
# A0 es el 14
pin_status = command.PinStatus()
pin_status.pin.name = pin
pin_status.status.code = value
self.ser.send(pin_status)
def sensor_data(self):
self._command_query(1)
sensor_list = command.SensorList()
self.ser.receive(sensor_list)
return sensor_list.sensors
def close(self):
self.ser.close()
nuke = Nukebox()
input("dale")
nuke.led_color(blue=199)
for i in nuke.sensor_data():
print(i.name)
nuke.digital_write(10, 1)
nuke.close()
exit()
run = True
while run:
try:
valor = int(input("Numero"))
c = command.Command()
c.command = valor
data = c.encode_to_bytes()
# otro = command.Command()
# otro.parse_from_bytes(data)
size = len(data)
print("ENVIANDO", size)
ser.write(struct.pack("B", size))
ser.write(data)
response_size = struct.unpack("B", ser.read())
print("RESPUSTA tamano", response_size)
c.parse_from_bytes(ser.read(response_size[0]))
print(c.command)
except KeyboardInterrupt:
run = False
ser.close()
| true |
1fc804396f60ed71d17a6516e91bf870cc686215 | Python | SARWAR007/PYTHON | /Python Projects/list_insert.py | UTF-8 | 177 | 3.53125 | 4 | [] | no_license | list1 = [10,20,30,40]
list1.insert(2,50)
print(list1)
l1 = [1,8,6,4,3,2,1]
l1.reverse()
print(l1)
t1= (1,8,6,4,3,21,1)
t1.reverse()
print(t1)
11 = list(t1)
print(type(l1))
| true |
62194204d8f9381053c980de6de1df6247a30d66 | Python | phillipinseoul/StanceDetection | /KDEY/MPQA/reformat.py | UTF-8 | 1,033 | 2.984375 | 3 | [] | no_license | import re
f = open('lexicon_easy.csv', 'w')
# Read in the lexicon. Here's an example line:
#
# type=weaksubj len=1 word1=abandoned pos1=adj stemmed1=n priorpolarity=negative
#
# For now, just use a regular expression to grab the word and the priorpolarity parts.
with open('subjclueslen1-HLTEMNLP05.tff', 'r') as file:
for line in file.readlines():
# print (line)
m = re.search('.*type=(\S+).*word1=(\S+).*priorpolarity=(\S+)', line)
print(m.group(2))
sub_score = 0
senti_score = 0
if m.group(3) == 'positive':
senti_score = 1
if m.group(1) == 'weaksubj':
sub_score = 1
elif m.group(1) == 'strongsubj':
sub_score = 2
elif m.group(3) == 'negative':
senti_score = -1
if m.group(1) == 'weaksubj':
sub_score = -1
elif m.group(1) == 'strongsubj':
sub_score = -2
f.write("%s,%d,%d\n" % (m.group(2), sub_score,senti_score))
f.close() | true |
ecc6ade6a296c0f1d2b4f6567eba9a4d13951e00 | Python | michaelsong93/python-prac | /sep4-2.py | UTF-8 | 598 | 3.359375 | 3 | [] | no_license | lst1 = [('a',1),('b',2),('c','hi')]
lst2 = ['x','a',6]
d = {k:v for k,v in lst1}
s = {x for x in lst2}
print(d)
print(s)
def f(n):
yield n
yield n+1
yield n*n
print([i for i in f(3)])
def merge(l,r):
llen = len(l)
rlen = len(r)
i = 0
j = 0
while i < llen or j < rlen:
if j == rlen or (i < llen and l[i] < r[j]):
yield l[i]
i += 1
else:
yield r[j]
j += 1
# g = merge([1,3,5],[2,4,6])
# while True:
# print(g.__next__())
# print(merge([1,3,5],[2,4,6]))
print([x for x in merge([1,2,5],[3,4,6])]) | true |
6df4578b91e2bc75bcf97581128d8be8aa7550a0 | Python | kirbuchi/ProductDevelopmentProject | /backend/api/utils.py | UTF-8 | 2,414 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import random
from uuid import uuid4
from api.models import RiskType, GenericField, FieldType
def get_random_field_data():
"""
Utility function to generate all the parameters to create a GenericField
instance with random data.
"""
field_types = [t for t in FieldType]
field_unique_id = uuid4().hex
field_type = random.choices(field_types)[0]
field_options = {}
if field_type == FieldType.ENUM:
field_options = { 'choices': ['Choice A', 'Choice B', 'Choice C'] }
return { 'name': 'Field {}'.format(field_unique_id),
'description': 'Description for field {}'.format(field_unique_id),
'type': field_type,
'options': field_options }
def generate_risk_with_fields(db, risk_type_data):
"""
Utility function to create the risk and fields as specified via
`risk_type_data` at the corresponding tables on the `db`.
The expected `risk_type_data` must have the following form:
{
'name': 'Name of risk',
'description': 'Description of risk',
'fields': [
{ 'name': 'name of field',
'type': 'type of field',
'description': 'field desc',
'options': { ... } },
...
]
}
"""
risk = RiskType(name=risk_type_data['name'],
description=risk_type_data['description'])
db.session.add(risk)
for field_data in risk_type_data['fields']:
field = GenericField(**field_data)
risk.fields.append(field)
db.session.add(field)
db.session.commit()
return risk
def create_risk(db, risk_name, risk_description, n_fields=0):
"""
Utility function to create a risk and generate `n_fields` random fields for
it.
Returned value is the serialized representation of the risk.
"""
expected = { 'name': risk_name,
'description': risk_description,
'fields': [ get_random_field_data()
for _ in range(n_fields) ] }
risk = generate_risk_with_fields(db, expected)
# now that we have some ids, add them to the `expected` response
expected['id'] = risk.id
for field_index, field in enumerate(risk.fields):
expected['fields'][field_index]['id'] = field.id
expected['fields'][field_index]['type'] = field.type.value
return expected
| true |
ce02b16593691caebcfa186bc0e5c2e29d0a044b | Python | josue-castro/messageapp | /handlers/reactions.py | UTF-8 | 5,683 | 2.53125 | 3 | [] | no_license | from flask import jsonify
from dao.reaction import ReactionDAO
class ReactionHandler:
def build_user_dict(self, row):
result = {}
result['pid'] = row[0]
result['firstname'] = row[1]
result['lastname'] = row[2]
result['username'] = row[3]
result['phone'] = row[4]
result['email'] = row[5]
return result
def mapToDic(self, row):
result = {}
result['mid'] = row[0]
result['pid'] = row[1]
return result
def mapLikesToDic(self, row):
result = {}
result['username'] = row[0]
return result
def map_likes_dislikes_attributes(self, mid, pid):
result = {}
result['mid'] = mid
result['pid'] = pid
return result
def map_message_tags_attributes(self, mid, hid):
result = {}
result['mid'] = mid
result['hid'] = hid
return result
def map_hashtag_attributes(self, hid, tag):
result = {}
result['hid'] = hid
result['tag'] = tag
return result
def getMessageLikes(self, mid):
dao = ReactionDAO()
likes = dao.getMessageLikes(mid)
return jsonify(Likes=likes)
def getMessageDislikes(self, mid):
dao = ReactionDAO()
dislikes = dao.getMessageDislikes(mid)
return jsonify(Dislikes=dislikes)
def getWhoLikedMessage(self, mid, args):
who = args.get("who")
if (len(args) == 1) and who == 'true':
dao = ReactionDAO()
like_list = dao.getWhoLikedMessage(mid)
result_list = []
for m in like_list:
result_list.append(self.build_user_dict(m))
return jsonify(Like_list=result_list)
else:
return jsonify(Error="Malformed query string"), 400
def getWhoDislikedMessage(self, mid, args):
who = args.get("who")
if (len(args) == 1) and who == 'true':
dao = ReactionDAO()
dislike_list = dao.getWhoDislikedMessage(mid)
result_list = []
for m in dislike_list:
result_list.append(self.build_user_dict(m))
return jsonify(Dislike_list=result_list)
else:
return jsonify(Error="Malformed query string"), 400
def like(self, mid, json):
"""Add a like on the message"""
if len(json) != 1:
return jsonify(Error="Malformed post request"), 400
else:
pid = json['pid']
if pid:
dao = ReactionDAO()
if not dao.getWhoLikedById(mid, pid):
mid = dao.insertLike(mid, pid)
else:
dao.deleteLike(mid, pid)
result = self.map_likes_dislikes_attributes(mid, pid)
return jsonify(Message=result), 201
else:
return jsonify(Error="Unexpected attributes in post request"), 400
def dislike(self, mid, json):
"""Add a like on the message"""
if len(json) != 1:
return jsonify(Error="Malformed post request"), 400
else:
pid = json['pid']
if pid:
dao = ReactionDAO()
if not dao.getWhoDislikedById(mid, pid):
mid = dao.insertDislike(mid, pid)
else:
dao.deleteDislike(mid, pid)
result = self.map_likes_dislikes_attributes(mid, pid)
return jsonify(Message=result), 201
else:
return jsonify(Error="Unexpected attributes in post request"), 400
def undislike(self, mid, pid):
"""delete a like on a message"""
dao = ReactionDAO()
if not dao.getWhoDislikedById(mid, pid):
return jsonify(Error="User not found."), 404
else:
dao.deleteDislike(mid, pid)
return jsonify(DeleteStatus="OK"), 200
def tagToMessage(self, form):
"""Add a hashtag on a specified message"""
if len(form) != 2:
return jsonify(Error="Malformed post request"), 400
else:
mid = form['mid']
hid = form['hid']
if hid and mid:
dao = ReactionDAO()
mid = dao.insertTag(mid, hid)
result = self.map_message_tags_attributes(mid, hid)
return jsonify(Hashtag=result), 201
else:
return jsonify(Error="Unexpected attributes in post request"), 400
def un_tagToMessage(self, mid, hid):
"""delete a hashtag from a specified message"""
dao = ReactionDAO()
if not dao.getMessageHashtagById(mid, hid):
return jsonify(Error="User not found."), 404
else:
dao.deleteDislike(mid, hid)
return jsonify(DeleteStatus="OK"), 200
def createNewHashtag(self, form):
"""Create a new Hashtag"""
if len(form) != 2:
return jsonify(Error="Malformed post request"), 400
else:
tag = form['tag']
if tag:
dao = ReactionDAO()
hid = dao.createHashtag(tag)
result = self.map_hashtag_attributes(hid, tag)
return jsonify(Hashtag=result), 201
else:
return jsonify(Error="Unexpected attributes in post request"), 400
def removeHashtag(self, hid):
"""delete a hashtag."""
dao = ReactionDAO()
if not dao.getHashtagById(hid):
return jsonify(Error="User not found."), 404
else:
dao.deleteHashtag(hid)
return jsonify(DeleteStatus="OK"), 200
| true |
f166a24bf5bfa8d0499863ab0b9ff43881f12acc | Python | wiensgerman/Test | /1-Coding.py | UTF-8 | 766 | 4.09375 | 4 | [] | no_license | #Python Coding Test
import random
# Define function that takes a list of values and a list of weights for each value
# Returns a value from values list with a probability based on the weight assigned to it.
def weighted_random(values, weights):
total_weight = sum(weights)
acum_weights = [w / total_weight for w in weights[:]] # Make probabilities from weights
rand = random.random() # Generate a random number from (0,1) uniform distribution
for i in range(len(weights)-1):
acum_weights[i+1] += acum_weights[i] # Make cummulative probabilities
for value, weight in zip(values, acum_weights):
if weight > rand:
return value # Return the first value from the list that has cummulative probability greater than rand
| true |
6afb3e84088bc8a9698c9fdb8832140ef04a17f1 | Python | scoutchorton/IncompleteProjectsCollection | /Python/Slope-intercept form/main.py | UTF-8 | 941 | 3.578125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
print "=-{Calebot Productions}-=-{2016}-="
print "Slope-Intercept Form"
x1 = raw_input("Point 1: ( ")
y1 = raw_input("Point 1: ( " + x1 + ", ")
x2 = raw_input("Point 2: ( ")
y2 = raw_input("Point 2: ( " + x2 + ", ")
print "Points are: (" + x1 + ", " + y1 + ") (" + x2 + ", " + y2 + ")\n"
print "To find slope:"
print "y - y₁ = m(x - x₁)"
print y1 + " - " + y2 + " = m(" + x1 + " - " + x2 + ")"
y = float(y1) - float(y2)
x = float(x1) - float(x2)
print str(y) + " = m(" + str(x) + ")"
print str(y) + "/" + str(x) + " = m"
if len(str(y/x).split(".")[1]) <= 3:
print str(y/x) + " = m"
m = [y/x, str(y/x)]
else:
m = [y/x, str(y) + "/" + str(x)]
print "\nTo find y-intercept:"
print "y = mx + b"
print y1 + " = " + m[1] + "(" + x1 + ")" + " + b"
print y1 + " = " + str(m[0] * float(x1)) + " + b"
b = str(float(y1) - (m[0] * float(x1)))
print b + " = b"
print "Y-Intercept form:"
print "y = " + m[1] + " + " + b
| true |
476397a4580a08cdaa58e7a8baf3baf1c2c78266 | Python | Mandarpowar13/basics-of-python | /function.py | UTF-8 | 223 | 3.15625 | 3 | [] | no_license | '''
def say_hi():
print("hello user")
say_hi()
def say_hi(name):
print("hello " + name)
say_hi("mike")
say_hi("Tom")
'''
def say_hi(name, age):
print('hello ' + name + 'you are ' +str(age))
say_hi("mike", 35)
| true |
c5ef0194d9057df85939f9bd0ac3cecafb0db752 | Python | FevenKitsune/Fox-Utilities | /utils/chunklist.py | UTF-8 | 629 | 4.125 | 4 | [
"MIT"
] | permissive | def chunklist(inlist: list, chunksize: int) -> list:
"""Splits a list into equal chunks determined by chunksize.
Args:
inlist: The list to chunk.
chunksize: The number of elements in each chunk.
Returns:
Returns a list of lists, with each list containing chunksize number of elements each.
"""
if not isinstance(inlist, list):
raise TypeError
def __chunkyield() -> list:
# https://www.geeksforgeeks.org/break-list-chunks-size-n-python/
for i in range(0, len(inlist), chunksize):
yield inlist[i:i + chunksize]
return list(__chunkyield())
| true |
1cef67fe8678d83d4857bdbd63d2ef11bdaa0130 | Python | jupyter-notebooks/data-descriptors-and-metaclasses | /notebooks/solutions/metaclasses/ex2.py | UTF-8 | 1,646 | 3.359375 | 3 | [] | no_license | """My docstring is there and long enough.
"""
import re
class NoDocString(Exception):
"""Raised if there is no docstring.
"""
def is_special_method(name, regex=re.compile(r'^__[a-zA-Z\d]+__$')):
"""Find out if method is special, i.e. __xxx__.
"""
if regex.match(name):
return True
return False
class DocChecked(type):
"""Check for docstring.
"""
def __init__(cls, name, bases, cdict):
for name, value in cdict.items():
if (hasattr(value, '__call__') and
not is_special_method(name)):
doc_string = value.__doc__
if doc_string:
doc_len = len(doc_string.strip())
if doc_len < 5:
print name
msg = 'Docstring is too short. '
msg += 'Found %d characters.\n' % doc_len
msg += 'The minimum lenght is 5 characters.'
raise NoDocString(msg)
else:
raise NoDocString('No docstring for %s.' % name)
super(DocChecked, cls).__init__(name, bases, cdict)
if __name__ == '__main__':
class Test:
"""Test class other meta class than type.
"""
__metaclass__ = DocChecked
def meth1(self):
"""Nice docstring.
"""
pass
def meth2(self):
"""void_xx
"""
pass
def meth3(self):
"""xxx
"""
pass
#def meth4(self):
# pass
#def __add__(self):
# pass
| true |
5241e62a88be4cb4f4a93b246fd6688d05067f3f | Python | bbradt/InfoClass | /InfoClass/Classifier_Suite.py | UTF-8 | 5,205 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
SKLearn Classifier suite
=====================
Work off of arbitrary input saved as a numpy file
with X, y = data, labels
"""
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.svm import SVC
from sklearn.svm import NuSVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
class PolynomialRegression():
""""Wrapper Class for Polynomial Logistic Regression in SKLearn"""
def __input__(self, **kwargs):
deg = 2
if 'degree' in kwargs:
deg = kwargs.pop('degree', None)
return(Pipeline([('poly', PolynomialFeatures(degree=deg)),
('logistic',
LogisticRegression(**kwargs))]))
class Class_Suite():
MODELS = {"Ridge Regression": RidgeClassifier,
"Logistic Regression": LogisticRegression,
"SGD": SGDClassifier,
"Perceptron": Perceptron,
"Passive Aggressive": PassiveAggressiveClassifier,
"Polynomial Regression": PolynomialRegression,
"K Nearest Neighbors": KNeighborsClassifier,
"Radius Neighbors": RadiusNeighborsClassifier,
"Nearest Centroid": NearestCentroid,
"SVM": SVC,
"NuSVM": NuSVC,
"Gaussian Process": GaussianProcessClassifier,
"Decision Tree": DecisionTreeClassifier,
"Random Forest": RandomForestClassifier,
"AdaBoost": AdaBoostClassifier,
"Bagging": BaggingClassifier,
"Gradient Tree Boosting": GradientBoostingClassifier,
"Gaussian Naive Bayes": GaussianNB,
"Multinomial Naive Bayes": MultinomialNB,
"Bernoulli Naive Bayes": BernoulliNB,
"LDA": LinearDiscriminantAnalysis,
"QDA": QuadraticDiscriminantAnalysis,
"Neural Net": MLPClassifier
}
ACCEPTABLE_NAMES = {name: set([name, name.lower(), name.upper(),
name.strip(), name.strip().lower(),
name.strip().upper(),
''.join([w[0] if len(name.split()) > 1
else w[0:3].upper()
for w in name.split()]),
''.join([w[0] if len(name.split()) > 1
else w[0:3]
for w in name.split()]).lower()])
for name in MODELS}
def __init__(self, names, hyperparameters={}):
real_names = {}
for in_name in names:
name = self._get_model_key(in_name)
if name:
real_names[name] = in_name
self.names = set(real_names.keys())
self.classifiers = []
self.hyperparameters = {}
for name in self.names:
in_name = real_names[name]
kwargs = {}
if in_name in hyperparameters:
kwargs = hyperparameters[in_name]
self.classifiers.extend(self.MODELS(**kwargs))
self.hyperparameters[name] = kwargs
def _get_model_key(self, name):
for model_name in self.ACCEPTABLE_NAMES:
if name in self.ACCEPTABLE_NAMES[model_name]:
return(model_name)
return None
def evaluate_model(self, in_name, dataset, max_iter=None, random_state=42):
name = self._get_model_key(in_name)
if not name:
return
clf = self.classifiers(name)
X_train = dataset.x_train
y_train = dataset.y_train
X_test = dataset.x_test
y_test = dataset.y_test
clf.random_state = random_state
clf.fit(X_train, y_train)
y_hat = clf.predict(X_test)
score = clf.score(X_test, y_test)
return score, y_hat
| true |
71e4eeb5a25f32213ab45a6c976a199bf9509e6c | Python | winnychenn/line-bot-python | /lottery.py | UTF-8 | 558 | 3.09375 | 3 | [] | no_license | import random
def lottery(str0):
flag = random.randint(0, 100)
if "會不會" in str0:
if flag % 2 == 0:
result = '會'
elif flag % 2 == 1:
result = '不會'
elif "是不是" in str0:
if flag % 2 == 0:
result = '是'
elif flag % 2 == 1:
result = '不是'
else:
if flag % 5 == 0:
result = '大吉'
elif flag % 5 == 1:
result = '小吉'
elif flag % 5 == 2:
result = '中'
elif flag % 5 == 3:
result = '下'
elif flag % 5 == 4:
result = '下下'
return result
| true |
aea54b81f8dc4abf35e39d2f0fe72af8339f0fc7 | Python | PreshitaDave/Bioinformatics_coursera | /bioinfo1/w4_randommotifs.py | UTF-8 | 316 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 20:35:33 2020
@author: Presh
"""
import random
def RandomMotifs(Dna, k, t):
randmot = []
l = len(Dna[0])
for i in range(t):
r = random.randint(1, l-k)
r1 = Dna[i][r:r+k]
randmot.append(r1)
return randmot
| true |
260717f99c475536366bc38ea59b5314a30c1624 | Python | boringPpl/Crash-Course-on-Python | /Exercises on IDE/4 Conditionals/ex4_7_conditionals.py | UTF-8 | 1,081 | 4.625 | 5 | [] | no_license | '''Question 4.7:
Complete the body of the format_name function.
This function receives the first_name and last_name parameters
and then returns a properly formatted string.
Specifically:
If both the last_name and the first_name parameters are supplied,
the function should return like so:
print(format_name("Ella", "Fitzgerald"))
Name: Fitzgerald, Ella
If only one name parameter is supplied (either the first name or the last name),
the function should return like so:
print(format_name("Adele", ""))
Name: Adele
or
print(format_name("", "Einstein"))
Name: Einstein
Finally, if both names are blank, the function should return the empty string:
print(format_name("", ""))
'''
def format_name(first_name, last_name):
# code goes here
return string
print(format_name("Ernest", "Hemingway")) # Should return the string "Name: Hemingway, Ernest"
print(format_name("", "Madonna")) # Should return the string "Name: Madonna"
print(format_name("Voltaire", "")) # Should return the string "Name: Voltaire"
print(format_name("", "")) # Should return an empty string | true |
03661abfa3814dcb10e22ddd1972f73a4e34f472 | Python | gbvp/jd-scrapper | /jd_link_scrapper.py | UTF-8 | 1,402 | 2.734375 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import json
import xlsxwriter
array = []
city_array = ['CITY']
link = ['Link']
agent = {"User-Agent":'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}
city_list = ["Ahmedabad","Surat","Vadodara","Rajkot","Bhavnagar","Jamnagar","Mehsana","Porbandar","Bhuj","Morbi","Junagadh","Veraval","Amreli","Surendra-Nagar-Gujarat","Patan-Gujarat","Palanpur","Himatnagar","Idar","Vijapur","Visnagar","Modasa","Godhra","Dahod","Daang","Rajpipla","Halol","Bharuch","Anand","Nadiad","Ankleshwar","Vapi","Valsad","Diu","Daman","Botad","Gandhidham","Gandhinagar-Gujarat","Navsari","Viramgam","Tapi","Narmada"];
for i in range(len(city_list)):
city = str(city_list[i])
for i in range(1,100):
URL = "https://www.justdial.com/"+city+"/Cinema/page-"+str(i)
r = requests.get(URL, headers=agent)
soup = BeautifulSoup(r.content, 'html5lib')
rows = soup.findAll('li', attrs = {'class':'cntanr'})
print(len(rows))
if(len(rows) == 0):
break
for row in rows:
city_array.append(city)
link.append(row["data-href"])
print(row["data-href"])
array.append(city_array)
array.append(link)
workbook = xlsxwriter.Workbook('scrapp_link.xlsx')
worksheet = workbook.add_worksheet()
row = 0
for col, data in enumerate(array):
worksheet.write_column(row, col, data)
workbook.close()
print (array)
| true |
ec67bf2af4338135298750b1e7376071f3f48dac | Python | aashish31f/machineLearning | /REGRESSION/RANDOM FOREST REGRESSION/random-forest-regression.py | UTF-8 | 797 | 3.6875 | 4 | [] | no_license | #RANDOM FOREST REGRESSION
#Importing Libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing dataset
dataset = pd.read_csv('Position_Salaries.csv')
x = dataset.iloc[:,1:-1].values
y = dataset.iloc[:,-1].values
#Fitting random forest regression to the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 277 , random_state = 0)
regressor.fit(x,y)
#Predicting a new result
y_pred = regressor.predict(6.5)
#Plotting our random forest regression results
x_grid = np.arange(min(x),max(x),0.01)
x_grid = x_grid.reshape((len(x_grid),1))
plt.scatter(x,y,color = 'red')
plt.plot(x_grid,regressor.predict(x_grid))
plt.title('BLUFF DETECTOR (DECISION TREE)')
plt.xlabel('level')
plt.ylabel('salary')
plt.show()
| true |
a613770e977c522fb7617fa846b2e93fa149902e | Python | mwolffe/my-isc-work | /python/doubleit.py | UTF-8 | 77 | 2.859375 | 3 | [] | no_license | def double_it(number):
print(number * 2)
double_it(57)
double_it("lol")
| true |
7fac1d489778194d144d6b783bb29032b7a6271e | Python | ahridin-synamedia/intro-to-python-typing | /examples/05_lists_and_dicts.py | UTF-8 | 415 | 3.78125 | 4 | [] | no_license | """
Lists can have typed contents; Dicts can type [Keys: Values]
"""
from typing import List, Dict
LetterList = List[str]
LetterIndexMap = Dict[str, int]
def map_strings_by_index(my_list: LetterList) -> LetterIndexMap:
return {letter: index for index, letter in enumerate(my_list)}
letter_list = ['A', 'B', 'C', 'D', 'E']
indexed_strings = map_strings_by_index(letter_list)
assert indexed_strings['c'] == 3
| true |
5174ee5559968468d524e6fe65eb894282574ba8 | Python | gigarza/sf-dispatches | /data.py | UTF-8 | 3,821 | 2.78125 | 3 | [] | no_license | import csv
from datetime import date, datetime
import json
from shapely.geometry import Point, shape
from pymongo import MongoClient
config = json.loads(open("config.json").read())
# Gets the database to input data to from MongoDB
client = MongoClient('mongodb://'+config['mongo_user']+':'+config['mongo_pass']+'@'+config['mongo_host'])
db = client.get_database()
# Set up neighborhoods from GeoJSON file
with open('static/neighborhoods.geojson', 'r') as f:
neighborhoods = [{"s": shape(feature['geometry']), "f": feature} for feature in json.load(f)['features']]
# Open the csv file that currently contains the data
with open('sfpd-dispatch/sfpd_dispatch_data_subset.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# go through the data and cast to correct variable type as necessary
row["call_number"] = int(row["call_number"])
row["incident_number"] = int(row["incident_number"])
row["call_date"] = datetime.strptime(row["call_date"], '%m/%d/%y')
row["watch_date"] = datetime.strptime(row["watch_date"], '%m/%d/%y')
if row["received_timestamp"] == '':
row["receiver_timestamp"] = None
else:
row["received_timestamp"] = datetime.strptime(row["received_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
row["received_timestamp_hr"] = row["received_timestamp"].hour
row["received_timestamp_min"] = row["received_timestamp"].minute
if row["entry_timestamp"] == '':
row["entry_timestamp"] = None
else:
row["entry_timestamp"] = datetime.strptime(row["entry_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
if row["dispatch_timestamp"] == '':
row["dispatch_timestamp"] = None
else:
row["dispatch_timestamp"] = datetime.strptime(row["dispatch_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
if row["response_timestamp"] == '':
row["response_timestamp"] = None
else:
row["response_timestamp"] = datetime.strptime(row["response_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
if row["on_scene_timestamp"] == '':
row["on_scene_timestamp"] = None
else:
row["on_scene_timestamp"] = datetime.strptime(row["on_scene_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
if row["transport_timestamp"] == '':
row["transport_timestamp"] = None
else:
row["transport_timestamp"] = datetime.strptime(row["transport_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
if row["hospital_timestamp"] == '':
row["hospital_timestamp"] = None
else:
row["hospital_timestamp"] = datetime.strptime(row["hospital_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
if row["available_timestamp"] == '':
row["available_timestamp"] = None
else:
row["available_timestamp"] = datetime.strptime(row["available_timestamp"], "%Y-%m-%d %H:%M:%S.%f %Z")
row["zipcode_of_incident"] = int(row["zipcode_of_incident"])
row["final_priority"] = int(row["final_priority"])
row["als_unit"] = bool(row["als_unit"])
row["number_of_alarms"] = int(row["number_of_alarms"])
row["unit_sequence_in_call_dispatch"] = int(row["unit_sequence_in_call_dispatch"])
row["latitude"] = float(row["latitude"])
row["longitude"] = float(row["longitude"])
#Find the neighborhood from this call
pt = Point(row["longitude"], row["latitude"])
for neighborhood in neighborhoods:
if neighborhood["s"].contains(pt):
row['neighborhood_district'] = neighborhood["f"]["properties"]["nhood"]
break
else:
row['neighborhood_district'] = None
db.calls.insert_one(row)
| true |
2c406edbaf5088c0b9697fa37e52d1f99ceb01c8 | Python | resghst/ML_learn_morvanzhouWeb | /normalUsed/2-2iris.py | UTF-8 | 819 | 3.21875 | 3 | [] | no_license | import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
if __name__ == "__main__":
#load data from iris database
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
print(iris_X[:2,:])
print(iris_y)
print('---------------------------------')
#The data devide to testing and traning data, and the testing data is 30% in the data.
#shuffle data is important in ML.
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3 )
print(y_train)
print('---------------------------------')
#crate model, testing and training
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
print(knn.predict(X_test))
print(y_test) | true |
ce796a403412c9523c2dcbbad4209f0e59f22d43 | Python | TechMaster/LearnAI | /SKLearn/05-Binarize.py | UTF-8 | 159 | 2.71875 | 3 | [] | no_license | from sklearn.preprocessing import Binarizer
X = [[1, 3, 2, 5, 0, 1]]
transformer = Binarizer(threshold=2)
print(transformer)
print(transformer.transform(X))
| true |
d54355bab5da9fdae422598d4831d58b4ea51946 | Python | NYU-DevOps-Spring2018-Orders/orders | /tests/test_items.py | UTF-8 | 8,481 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | """
Test cases for Item Model
Test cases can be run with:
nosetests
coverage report -m
"""
from datetime import datetime
import os
import unittest
from app import app, db
from app.models import Order, Item, DataValidationError
from werkzeug.exceptions import NotFound
DATABASE_URI = os.getenv('DATABASE_URI', 'mysql+pymysql://root@localhost:3306/development')
######################################################################
# T E S T C A S E S
######################################################################
class TestItems(unittest.TestCase):
""" Test Cases for Items """
@classmethod
def setUpClass(cls):
""" These run once per Test suite """
app.debug = False
# Set up the test database
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
Order.init_db()
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
date = datetime.now()
order = Order(customer_id=1, date=date, status = 'processing').save()
order = Order(customer_id=2, date=date, status = 'processing').save()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_create_an_item(self):
""" Create a item and assert that it exists """
item = Item(product_id=1, name="wrench", quantity=1, price=10.50)
self.assertEqual(item.id, None)
self.assertEqual(item.product_id, 1)
self.assertEqual(item.name, "wrench")
self.assertEqual(item.quantity, 1)
self.assertEqual(item.price, 10.50)
def test_add_an_item(self):
""" Create an Item and add it to the database """
items = Item.all()
self.assertEqual(items, [])
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
self.assertEqual(item.id, None)
item.save()
self.assertEqual(item.id, 1)
items = Item.all()
self.assertEqual(len(items), 1)
def test_update_an_item(self):
""" Update an Item """
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
self.assertEqual(item.id, 1)
item.price = 12.0
item.save()
items = Item.all()
self.assertEqual(len(items), 1)
self.assertEqual(items[0].price, 12.0)
def test_delete_an_item(self):
""" Delete an Item """
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
self.assertEqual(len(Item.all()), 1)
item.delete()
self.assertEqual(len(Item.all()), 0)
def test_serialize_an_item(self):
""" Test serialization of an Item """
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
data = item.serialize()
self.assertNotEqual(data, None)
self.assertIn('id', data)
self.assertEqual(data['id'], None)
self.assertIn('order_id', data)
self.assertEqual(data['order_id'], 1)
self.assertIn('product_id', data)
self.assertEqual(data['product_id'], 1)
self.assertIn('name', data)
self.assertEqual(data['name'], "wrench")
self.assertIn('quantity', data)
self.assertEqual(data['quantity'], 1)
self.assertIn('price', data)
self.assertEqual(data['price'], 10.50)
def test_deserialize_an_item(self):
""" Test deserialization of an Item """
data = {"id": 1, "product_id": 1, "name": "wrench", "quantity": 1, "price": 10.50}
order_id = 1
item = Item()
item.deserialize(data, order_id)
self.assertNotEqual(item, None)
self.assertEqual(item.id, None)
self.assertEqual(item.order_id, 1)
self.assertEqual(item.product_id, 1)
self.assertEqual(item.name, "wrench")
self.assertEqual(item.quantity, 1)
self.assertEqual(item.price, 10.50)
def test_fetch_all_items(self):
""" Test fetching all Items """
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
item2 = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
item2.save()
Item.all()
self.assertEqual(len(Item.all()), 2)
def test_get_an_item(self):
""" Get an Item by id """
hammer = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
hammer.save()
item = Item.get(hammer.id)
self.assertEqual(item.id, hammer.id)
self.assertEqual(item.name, "hammer")
def test_get_or_404(self):
""" Get_or_404 function with nonexistent ID """
self.assertRaises(NotFound, Item.get_or_404, 1)
def test_find_by_product_id(self):
""" Find Items by product_id"""
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
item2 = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
item2.save()
items = Item.find_by_product_id(1)
self.assertEqual(items[0].product_id, 1)
self.assertEqual(items[0].name, "wrench")
self.assertEqual(items[0].quantity, 1)
self.assertEqual(items[0].price, 10.50)
def test_find_by_order_id(self):
""" Find Items by product_id"""
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
item2 = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
item2.save()
items = Item.find_by_order_id(1)
self.assertEqual(items[0].product_id, 1)
self.assertEqual(items[0].name, "wrench")
self.assertEqual(items[1].quantity, 2)
self.assertEqual(items[1].price, 11.00)
def test_find_by_name(self):
""" Find Items by name"""
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
item2 = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
item2.save()
items = Item.find_by_name("wrench")
self.assertEqual(items[0].product_id, 1)
self.assertEqual(items[0].name, "wrench")
self.assertEqual(items[0].quantity, 1)
self.assertEqual(items[0].price, 10.50)
def test_find_by_quantity(self):
""" Find Items by quantity"""
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
item2 = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
item2.save()
items = Item.find_by_quantity(1)
self.assertEqual(items[0].product_id, 1)
self.assertEqual(items[0].name, "wrench")
self.assertEqual(items[0].quantity, 1)
self.assertEqual(items[0].price, 10.50)
def test_find_by_price(self):
""" Find Items by price"""
item = Item(order_id=1, product_id=1, name="wrench", quantity=1, price=10.50)
item.save()
item2 = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
item2.save()
items = Item.find_by_price(11)
self.assertEqual(items[0].product_id, 2)
self.assertEqual(items[0].name, "hammer")
self.assertEqual(items[0].quantity, 2)
self.assertEqual(items[0].price, 11)
def test_non_dict_raises_error(self):
""" Pass invalid data structure deserialize """
data = [1,2,3]
item = Item()
order_id = 1
with self.assertRaises(DataValidationError):
item.deserialize(data, order_id)
def test_invalid_key_raises_error(self):
""" Try to pass invalid key """
data = {"id": 1, "product_id": 1, "quantity": 1, "price": 10.50}
order_id = 1
with self.assertRaises(DataValidationError):
item = Item()
item.deserialize(data, order_id)
def test_repr(self):
""" Test that string representation is correct """
hammer = Item(order_id=1, product_id=2, name="hammer", quantity=2, price=11)
hammer.save()
self.assertEqual(hammer.__repr__(), "<Item u'hammer'>")
######################################################################
# M A I N
######################################################################
if __name__ == '__main__':
unittest.main()
| true |
047d79f042ca05f661bbcbd46e254e7f9279587c | Python | YumChaaMax/aps | /src/data/data_load.py | UTF-8 | 1,711 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 10:49:04 2019
@author: Max
"""
import pandas as pd
def load_prodline():
prodline=pd.read_excel(r'./data/prod_line_info.xlsx',names=['line_no','line_desp','staff_num','work_hour'])
prodline.index=prodline['line_no']
prod_line=prodline.index.tolist()
line_num=len(prod_line)
print("Loading Production Lines: Complete.")
print("Plan %d lines."%line_num)
return prod_line
def load_stdhour():
model_SAH=pd.read_excel(r"./data/model_std_hour.xlsx",names=['model_no','sah'])
model_SAH['sah']=model_SAH['sah']/3600
model_num=len(model_SAH['model_no'].unique())
print("Loading standard hour of models: Complete.")
print("Got %d models."%model_num)
return model_SAH
def load_effi():
practice_pace=pd.read_excel(r"./data/practice_curve.xlsx",names=['uid','model_no','line_no','day_process','effi'])
mdln_match=len(practice_pace[['model_no','line_no']].drop_duplicates())
print("Loading practice curve-efficiency: Complete")
print("Got %d matches."%mdln_match)
return practice_pace
def load_orders():
rawPool=pd.read_excel(r'./data/orderPool1.xlsx',names=['order_id','model_no','order_num',\
'order_date','deli_date','order_type','priority','epst','deli_ahead'])
order_num=len(rawPool)
print("Loading Orders: Complete.")
print("Got %d orders."%order_num)
rawPool.index=rawPool['order_id']
return rawPool
def load_workday():
work_day=pd.read_excel(r"./data/work_day.xlsx",names=['day_date','is_holiday','workday_id'])
print("Loading %d days in this year:Complete "%len(work_day))
return work_day
| true |
b3bf16cc53b5e6df6b709617d14d9e545f0c98e9 | Python | pronob1010/Data_Science_Project_with_Edu_data | /problem solve/venv/1015.py | UTF-8 | 202 | 3.3125 | 3 | [] | no_license | import math
linha1 = input().split(" ")
linha2 = input().split(" ")
x1,y1 = linha1
x2,y2 = linha2
Dis = math.sqrt(pow((float(x2)-float(x1)),2)+pow((float(y2)-float(y1)),2))
float(Dis)
print("%.4f"%Dis)
| true |
eb17415dbe6d16df29a6dec2578794fe5f98fee2 | Python | SebastianToporsch/scripting | /src/Logger.py | UTF-8 | 908 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python3
import datetime
import os
import subprocess
class Logger:
__instance = None
@staticmethod
def get_instance():
if Logger.__instance is None:
Logger()
return Logger.__instance
def __init__(self):
if Logger.__instance is not None:
raise Exception("This class is a singleton!")
else:
Logger.__instance = self
def log(self, string):
if not os.path.exists('logger.txt'):
f = open("logger.txt", "w+")
f = open("logger.txt", "a")
f.write("\nSUCCESS\t " + str(datetime.datetime.now()) + ": " + string)
f.close()
def err(self, string):
if not os.path.exists('logger.txt'):
f = open("logger.txt", "w+")
f = open("logger.txt", "a")
f.write("\nERROR\t " + str(datetime.datetime.now()) + ": " + string)
f.close()
| true |
9baf404e7aa246631fd0c7f535a8baee05559c5b | Python | beniaminofuoco/python | /CorsoUdemy/UpgradePython/ParamPosOnly3_8.py | UTF-8 | 884 | 4.34375 | 4 | [] | no_license | # PEP 570 - Python Positional-Only Parameters
# Questa PEP ci permette di specificare, attraverso l'operatore /, che alcuni parametri non possono
# essere di tipo Keyword ma devono essere SEMPRE posizionali.
class PositionalOnlyParam:
pass
if __name__ == "__main__":
def somma(a,b,c):
return a+b+c
# Richiamo lo funzione assegnando dei parametri posizionali
print(somma(10, 4, 2)) # 16
print()
#Richiamo la funzione assegnando dei parametri keyword
print(somma(b=4, c=2, a=10)) # 16
print()
# Indico che il parametro a deve essere sempre posizionale
def sommaParmOnlyPos(a,/, b, c):
return a + b + c
# print(sommaParmOnlyPos(b=4, a=10, c=2))
# Ottengo il seguente errore:
# sommaParmOnlyPos() got some positional-only arguments passed as keyword arguments: 'a'
print(sommaParmOnlyPos(10, c=2, b=4)) # 16
| true |
ff8bbe298ece9d12f9e89a3d1418ed74ec7a1e46 | Python | adam-formela/Adam-s-repo | /11_OOP_intro/03.py | UTF-8 | 936 | 4.6875 | 5 | [] | no_license | # Stwórz własną implementację kolejki FIFO. Klasa kolejka (Queue) powinna na starcie przyjmować listę elementów.
# Wśród metod powinny się znaleźć takie jak: wyswietlenie kolejki, sprawdzenie czy jest pusta, dodanie elementu do kolejki (put), wyjęcie elementu z kolejki (get).
# Utwórz kilka obiektów klasy Queue z różnymi parametrami.
class Queue():
def __init__(self, fifo):
self.fifo = fifo
def show(self):
print(self.fifo)
def is_empty(self):
return len(self.fifo) == 0
def put(self, element):
self.fifo.append(element)
def get(self):
element = self.fifo.pop(0)
return element
example_queue = Queue([5, 8, 3, 8, 20, 59, 3, 1, 2357])
example_queue.show()
print(example_queue.is_empty())
example_queue.put(6)
example_queue.put(1)
example_queue.put(61)
example_queue.show()
print('====get ====')
print(example_queue.get())
example_queue.show() | true |
cfa3860d0a4d429bdd99f7e20bcbce9fe23c5659 | Python | lipengbo/agent | /test.py | UTF-8 | 413 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename:test.py
# Date:Sat Oct 19 15:13:25 CST 2013
# Author:Pengbo Li
# E-mail:lipengbo10054444@gmail.com
import sys
if __name__ == '__main__':
module_name, sep, function_name = sys.argv[1].rpartition('.')
module_name = 'tests.' + module_name
if module_name not in sys.modules:
__import__(module_name)
print getattr(sys.modules[module_name], function_name)()
| true |
1f3595c31e67d6930b707fec13d305a613a65ed5 | Python | Deiri/vkbrute | /vkbf.py | WINDOWS-1251 | 1,079 | 2.609375 | 3 | [] | no_license | import os
os.system('clear')
banner = '''
8b d8 88 88888888ba 88888888888
`8b d8' 88 88 "8b 88
`8b d8' 88 88 ,8P 88
`8b d8' 88 ,d8 88aaaaaa8P' 88aaaaa
`8b d8' 88 ,a8" aaaaaaaa 88""""""8b, 88"""""
`8b d8' 8888[ """""""" 88 `8b 88
`888' 88`"Yba, 88 a8P 88
`8' 88 `Y8a 88888888P" 88
'''
print(banner)
text = '''
[1]-VkBruteForce
'''
print(text)
v = int(input(" :"))
def Vkpassgen():
import os
os.system("python3 passgen.py")
def VkBruteForceV1():
import os
os.system("python3 vk.py")
def VkBruteForce2():
import os
os.system('python vk2.py')
if v ==3:
vkpassgen()
elif v ==1:
VkBruteForce()
elif v ==22:
VkBruteForce2()
| true |
0bbeebab0c013ea7da3e3a4452497fba14b871aa | Python | amisha1garg/Arrays_In_Python | /ArraySum.py | UTF-8 | 496 | 3.5 | 4 | [] | no_license | #User function Template for python3
# Given an integer array Arr[] of size N. The task is to find sum of it.
from numpy import *
class Solution:
def _sum(self,arr, n):
# code here
return sum(arr)
#{
# Driver Code Starts
#Initial Template for Python 3
# Driver code
if __name__ == "__main__":
tc=int(input())
while tc > 0:
n=int(input())
arr=list(map(int , input().strip().split()))
ob = Solution()
ans = ob._sum(a | true |
bc61755850c460ce53f6e2da90890a6e302a64dc | Python | dev2404/Babber_List | /merge_2_sorted_array.py | UTF-8 | 132 | 3.40625 | 3 | [] | no_license | def merge(arr1, arr2):
arr1 = arr1 + arr2
return sorted(arr1)
arr1 = [1,3,5,7]
arr2 = [0,2,6,8,9]
print(merge(arr1, arr2)) | true |
917c5c16ca6895d5de944ce9e4d01de14d8070b2 | Python | lemma-osu/pynnmap | /pynnmap/diagnostics/vegetation_class_outlier_diagnostic.py | UTF-8 | 4,064 | 2.75 | 3 | [] | no_license | import pandas as pd
from pynnmap.diagnostics import diagnostic
from pynnmap.diagnostics import vegetation_class_diagnostic as vcd
from pynnmap.misc.utilities import df_to_csv
# Define the classes of vegetation class outliers. Red outliers represent
# large differences between observed and predicted vegetation classes, with
# orange and yellow being less severe
RED_OUTLIERS = {
1: [10, 11],
2: [11],
3: [],
4: [],
5: [11],
6: [],
7: [],
8: [11],
9: [],
10: [1],
11: [1, 2, 5, 8],
}
ORANGE_OUTLIERS = {
1: [4, 6, 7, 8, 9],
2: [10],
3: [10, 11],
4: [1],
5: [10],
6: [1, 11],
7: [1],
8: [1],
9: [1],
10: [2, 3, 5],
11: [3, 6],
}
YELLOW_OUTLIERS = {
1: [3, 5],
2: [4, 6, 7, 9],
3: [1, 6, 7, 8, 9],
4: [2, 5, 9, 10, 11],
5: [1, 4, 7, 9],
6: [2, 3, 8, 10],
7: [2, 3, 5, 8, 9],
8: [3, 4, 6, 7, 10],
9: [2, 3, 4, 5, 7, 11],
10: [4, 6, 8],
11: [4, 9],
}
def find_vegclass_outlier_class(rec):
observed, predicted = rec['OBSERVED'], rec['PREDICTED']
if predicted in YELLOW_OUTLIERS[observed]:
return 'yellow'
elif predicted in ORANGE_OUTLIERS[observed]:
return 'orange'
elif predicted in RED_OUTLIERS[observed]:
return 'red'
return 'green'
class VegetationClassOutlierDiagnostic(diagnostic.Diagnostic):
def __init__(self, parameters):
self.observed_file = parameters.stand_attribute_file
self.vegclass_outlier_file = parameters.vegclass_outlier_file
self.id_field = parameters.plot_id_field
# Create a list of predicted files - both independent and dependent
self.predicted_files = [
('dependent', parameters.dependent_predicted_file),
('independent', parameters.independent_predicted_file),
]
# Create a instance of the VegetationClassDiagnostic to calculate
# vegetation class
self.vc_calc = vcd.VegetationClassDiagnostic(parameters=parameters)
# Ensure all input files are present
files = [
self.observed_file,
parameters.dependent_predicted_file,
parameters.independent_predicted_file,
]
try:
self.check_missing_files(files)
except diagnostic.MissingConstraintError as e:
e.message += '\nSkipping VegetationClassOutlierDiagnostic\n'
raise e
def run_diagnostic(self):
# Run this for both independent and dependent predictions
out_dfs = []
for (prd_type, prd_file) in self.predicted_files:
# Read the observed and predicted files into dataframes
obs_df = pd.read_csv(self.observed_file, index_col=self.id_field)
prd_df = pd.read_csv(prd_file, index_col=self.id_field)
# Subset the observed data just to the IDs that are in the
# predicted file
obs_df = obs_df[obs_df.index.isin(prd_df.index)]
obs_df.reset_index(inplace=True)
prd_df.reset_index(inplace=True)
# Calculate VEGCLASS for both the observed and predicted data
vc_df = self.vc_calc.vegclass_aa(
obs_df, prd_df, id_field=self.id_field)
vc_df.columns = [self.id_field, 'OBSERVED', 'PREDICTED']
# Find the outliers
vc_df['CLASS'] = vc_df.apply(find_vegclass_outlier_class, axis=1)
# Only keep yellow, orange, and red outliers
vc_df = vc_df[vc_df.CLASS != 'green']
# Format this dataframe for export and append it to the out_df list
vc_df.insert(1, 'PREDICTION_TYPE', prd_type.upper())
vc_df.rename(columns={
'OBSERVED': 'OBSERVED_VEGCLASS',
'PREDICTED': 'PREDICTED_VEGCLASS',
'CLASS': 'OUTLIER_TYPE'
}, inplace=True)
out_dfs.append(vc_df)
# Merge together the dfs and export
out_df = pd.concat(out_dfs)
df_to_csv(out_df, self.vegclass_outlier_file)
| true |
269b6d14bfc8f801b730b101de9a9f498a3b7679 | Python | LucasAMiranda/OnFiap | /Capitulo2_Repeticoes/while_infinito.py | UTF-8 | 287 | 3.78125 | 4 | [] | no_license | n = int(input("Digite um número: "))
while n < 100:
print("\t" + str(n))
n += 1
print("Laço encerrado...")
#você criou o que chamamos de “loop infinito”,
#isto é, o programa ficará executando este bloco de código até que o programa seja encerrado de maneira abrupta, | true |
1c662158b1ba718dd6cde1a03be2be54ccb41138 | Python | zzmcdc/CRNN.tf2 | /train.py | UTF-8 | 10,045 | 2.53125 | 3 | [
"MIT"
] | permissive | import argparse
import time
import os
import tensorflow as tf
from tensorflow import keras
from dataset import DatasetBuilder
from model import build_model
from losses import CTCLoss
from metrics import WordAccuracy
from tensorflow.keras import backend as K
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-ta', '--train_ann_paths', type=str,
required=True,
help='The path of training data annnotation file.')
parser.add_argument('-va', '--val_ann_paths', type=str,
help='The path of val data annotation file.')
parser.add_argument('-w', '--img_width', type=int, default=100,
help='Image width, this parameter will affect the output '
'shape of the model, default is 100, so this model '
'can only predict up to 24 characters.')
parser.add_argument('-b', '--batch_size', type=int, default=256,
help='Batch size.')
parser.add_argument('-lr', '--learning_rate', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('-e', '--epochs', type=int, default=30,
help='Num of epochs to train.')
parser.add_argument('--img_channels', type=int, default=1,
help='0: Use the number of channels in the image, '
'1: Grayscale image, 3: RGB image')
parser.add_argument('--ignore_case', action='store_true',
help='Whether ignore case.(default false)')
parser.add_argument('--restore', type=str,
help='The model for restore, even if the number of '
'characters is different')
parser.add_argument('--charset', type=str,
default='label.txt', help='the charset file')
args = parser.parse_args()
localtime = time.asctime()
class WarmUpLearningRateScheduler(keras.callbacks.Callback):
"""Warmup learning rate scheduler
"""
def __init__(self, warmup_batches, init_lr, verbose=0):
"""Constructor for warmup learning rate scheduler
Arguments:
warmup_batches {int} -- Number of batch for warmup.
init_lr {float} -- Learning rate after warmup.
Keyword Arguments:
verbose {int} -- 0: quiet, 1: update messages. (default: {0})
"""
super(WarmUpLearningRateScheduler, self).__init__()
self.warmup_batches = warmup_batches
self.init_lr = init_lr
self.verbose = verbose
self.batch_count = 0
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.batch_count = self.batch_count + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
if self.batch_count <= self.warmup_batches:
lr = self.batch_count*self.init_lr/self.warmup_batches
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %05d: WarmUpLearningRateScheduler setting learning '
'rate to %s.' % (self.batch_count + 1, lr))
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""Cosine decay schedule with warm up period.
Cosine annealing learning rate as described in:
Loshchilov and Hutter, SGDR: Stochastic Gradient Descent with Warm Restarts.
ICLR 2017. https://arxiv.org/abs/1608.03983
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
Arguments:
global_step {int} -- global step.
learning_rate_base {float} -- base learning rate.
total_steps {int} -- total number of training steps.
Keyword Arguments:
warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})
warmup_steps {int} -- number of warmup steps. (default: {0})
hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate
before decaying. (default: {0})
Returns:
a float representing learning rate.
Raises:
ValueError: if warmup_learning_rate is larger than learning_rate_base,
or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to '
'warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to '
'warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(keras.callbacks.Callback):
"""Cosine decay with warmup learning rate scheduler
"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""Constructor for cosine decay with warmup learning rate scheduler.
Arguments:
learning_rate_base {float} -- base learning rate.
total_steps {int} -- total number of training steps.
Keyword Arguments:
global_step_init {int} -- initial global step, e.g. from previous checkpoint.
warmup_learning_rate {float} -- initial learning rate for warm up. (default: {0.0})
warmup_steps {int} -- number of warmup steps. (default: {0})
hold_base_rate_steps {int} -- Optional number of steps to hold base learning rate
before decaying. (default: {0})
verbose {int} -- 0: quiet, 1: update messages. (default: {0})
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
return lr
def on_batch_begin(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %05d: setting learning '
'rate to %s.' % (self.global_step + 1, lr))
dataset_builder = DatasetBuilder(
args.charset, args.img_width, args.img_channels, args.ignore_case)
train_ds, train_size = dataset_builder.build(
args.train_ann_paths, True, args.batch_size)
print('Num of training samples: {}'.format(train_size))
print("num of label", dataset_builder.num_classes)
saved_model_prefix = '{epoch:03d}_{word_accuracy:.4f}'
if args.val_ann_paths:
val_ds, val_size = dataset_builder.build(
args.val_ann_paths, False, args.batch_size)
print('Num of val samples: {}'.format(val_size))
saved_model_prefix = saved_model_prefix + '_{val_word_accuracy:.4f}'
else:
val_ds = None
saved_model_path = ('saved_models/{}/'.format(localtime) +
saved_model_prefix + '.h5')
os.makedirs('saved_models/{}'.format(localtime))
print('Training start at {}'.format(localtime))
model = build_model(dataset_builder.num_classes,
args.img_width, channels=args.img_channels)
model.compile(optimizer=keras.optimizers.SGD(args.learning_rate, momentum=0.9, clipnorm=1.0),
loss=CTCLoss(), metrics=[WordAccuracy()])
if args.restore:
model.load_weights(args.restore, by_name=True, skip_mismatch=True)
epoch_batch = 975000/args.batch_size
warm_up_lr = WarmUpCosineDecayScheduler(learning_rate_base=args.learning_rate,
total_steps=args.epochs * epoch_batch,
warmup_learning_rate=0.0,
warmup_steps=epoch_batch,
hold_base_rate_steps=4*epoch_batch)
callbacks = [warm_up_lr,
tf.keras.callbacks.TerminateOnNaN(),
keras.callbacks.ModelCheckpoint(saved_model_path),
keras.callbacks.TensorBoard(log_dir='logs/{}'.format(localtime), profile_batch=0)]
model.fit(train_ds, epochs=args.epochs, callbacks=callbacks,
validation_data=val_ds)
| true |
dc195daa5c3f2bd6c97af76a57c44248d9521108 | Python | diegothuran/rss | /postagem/News.py | UTF-8 | 363 | 2.71875 | 3 | [] | no_license | from lexical_analyzer import lexical
class News(object):
def __init__(self, abstract, news, date, link, title, media):
self.title = title
self.abstract = abstract
self.news = news
self.date = date
self.link = link
self.media = media
def set_categories(self):
self._categories = lexical(self.news) | true |
7d4809461f300de099bbea5e40b35ba71edfdd31 | Python | hackathonprojs/waterchain | /datasync.py | UTF-8 | 499 | 2.78125 | 3 | [] | no_license | import time
import requests
import json
from io import StringIO
def main():
datastring = ""
while True:
file = open('data.txt', 'r')
temp = str(file.read().replace('\n',''))
if datastring != temp and temp != '':
datastring = temp
io = StringIO(datastring)
r = requests.get('http://localhost:8000/addData?' + temp)
print(r.url)
file.close()
time.sleep(1)
if __name__ == "__main__":
main()
| true |
d9a60715aec1c394718f02109f9a24160eaa5bc9 | Python | MaureenZOU/traffic_prediction | /Preprocessing/DeleteWeekend.py | UTF-8 | 2,304 | 3.078125 | 3 | [] | no_license | from datetime import date
import csv
def getLabel():
with open('speeds_final.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
label = {}
iteration = 0
for row in spamreader:
iterationj = 0
for data in row:
if iterationj != 0 and iteration == 0:
label[iterationj - 1] = data
iterationj = iterationj + 1
# if iterationj!=0 and iteration!=0:
break
return label
def getDateObject(label):
dateObject={}
for i in range(0, len(label)):
day = label[i]
day = list(day)
year = int(''.join(day[0:4]))
if day[4] == '0':
month = int(day[5])
else:
month = int(''.join(day[4:6]))
if day[6]=='0':
cday=int(day[7])
else:
cday=int(''.join(day[6:8]))
currentDate=date(year,month,cday)
dateObject[i]=currentDate
return dateObject
def getWeekendTag(dateObjects):
tag=set()
for i in range(0,len(dateObjects)):
if dateObjects[i].isoweekday()==6 or dateObjects[i].isoweekday()==7:
tag.add(i)
return tag
def getDataMatrix():
with open('speeds_final.csv') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
dataSet = []
for row in spamreader:
entry = []
for data in row:
entry.append(data)
dataSet.append(entry)
return dataSet
def deleteWeekend(dataSet,tag):
weekset=[]
for row in dataSet:
entry=[]
entry.append(row[0])
for i in range(0,len(row)):
if i!=0 and ((i-1) not in tag):
entry.append(row[i])
weekset.append(entry)
return weekset
def writeTempFile(weekset):
with open('speed_delete_weekend_final.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in weekset:
spamwriter.writerow(row)
label=getLabel()
dateObject=getDateObject(label)
tag=getWeekendTag(dateObject)
dataSet=getDataMatrix()
weekset=deleteWeekend(dataSet,tag)
writeTempFile(weekset)
| true |
f29224a2a33e3d6af4e6c5ca29e2b466ba445dfa | Python | koger23/CSV-Merger | /modules/targetWidget.py | UTF-8 | 1,388 | 2.765625 | 3 | [] | no_license | from PySide2.QtWidgets import QWidget, QHBoxLayout, QLabel, QTextEdit, QPushButton, QFileDialog
class TargetWidget(QWidget):
def __init__(self):
super(TargetWidget, self).__init__()
mainLayout = QHBoxLayout()
mainLayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(mainLayout)
lblTarget = QLabel("Target filepath:")
mainLayout.addWidget(lblTarget)
lblTarget.setMinimumWidth(80)
self.txtTarget = QTextEdit()
mainLayout.addWidget(self.txtTarget)
self.txtTarget.setMaximumHeight(27)
btnBrowse = QPushButton("Browse")
mainLayout.addWidget(btnBrowse)
btnBrowse.clicked.connect(self.browseTarget)
self.txtTarget.textChanged.connect(self.autoChangeSize)
def browseTarget(self):
filePath = QFileDialog.getSaveFileName(filter=("Comma separated text files (*.csv)"))
self.txtTarget.setText(filePath[0])
def autoChangeSize(self):
textHeight = self.txtTarget.document().size().height()
textLength = len(self.txtTarget.toPlainText())
if self.txtTarget.height() < textHeight:
self.setMinimumWidth(self.width() + textLength)
if __name__ == '__main__':
import sys
from PySide2.QtWidgets import QApplication
app = QApplication(sys.argv)
window = TargetWidget()
window.show()
app.exec_() | true |
2b5739d681db1324d38b30f2bcba2a1d1346399d | Python | FelicityN/gblearn | /gblearn/soap.py | UTF-8 | 2,885 | 2.84375 | 3 | [] | no_license | """Functions for generating the SOAP representation of a grain
boundary.
"""
import numpy as np
def S(a, b):
"""Computes the SOAP similarity kernel between two SOAP vectors,
:math:`d(a,b) = \sqrt{K(a,a)+K(b,b)-2*K(a,b)}`.
"""
return np.sqrt(np.dot(a, a) + np.dot(b, b) - 2*np.dot(a, b))
class SOAPCalculator(object):
"""Represents a set of unique SOAP parameters for which SOAP
vectors can be calculated.
Args:
rcut (float): local environment finite cutoff parameter.
nmax (int): bandwidth limits for the SOAP descriptor radial basis
functions.
lmax (int): bandwidth limits for the SOAP descriptor spherical
harmonics.
sigma (float): width parameter for the Gaussians on each atom.
trans_width (float): distance over which the coefficients in the
radial functions are smoothly transitioned to zero.
Attributes:
rcut (float): local environment finite cutoff parameter.
nmax (int): bandwidth limits for the SOAP descriptor radial basis
functions.
lmax (int): bandwidth limits for the SOAP descriptor spherical
harmonics.
sigma (float): width parameter for the Gaussians on each atom.
trans_width (float): distance over which the coefficients in the
radial functions are smoothly transitioned to zero.
"""
def __init__(self, rcut=5., nmax=12, lmax=12, sigma=0.5, trans_width=0.5):
self.rcut = rcut
self.nmax = nmax
self.lmax = lmax
self.sigma = sigma
self.trans_width = trans_width
def calc(self, atoms, central, basis=None):
"""Calculates a SOAP vector for the specified species and atomic
positions.
Args:
atoms (quippy.Atoms): list of atoms to calculate the vector for.
central (int): integer element number to set as the central atom type
for the SOAP calculation.
basis (list): of `int` defining which of the atoms in the *conventional*
unit cell should be retained as a unique part of the basis.
"""
import quippy
import quippy.descriptors as descriptor
descstr = ("soap cutoff={0:.1f} n_max={1:d} l_max={2:d} "
"atom_sigma={3:.2f} n_species=1 species_Z={{{4:d}}} "
"Z={4:d} trans_width={5:.2f} normalise=F")
Z = np.unique(atoms.get_atomic_numbers())[0]
D = descriptor.Descriptor
descZ = D(descstr.format(self.rcut, self.nmax, self.lmax, self.sigma,
Z, self.trans_width))
atoms.set_cutoff(descZ.cutoff())
atoms.calc_connect()
PZ = descZ.calc(atoms)
if basis is not None:
dZ = [PZ["descriptor"][b,:] for b in basis]
return dZ
else:
return PZ
| true |
4b588c4c18de2e700dd9e45fc3af21139913f8b1 | Python | CoachEd/advent-of-code | /2019/day16/part1a.py | UTF-8 | 3,090 | 2.78125 | 3 | [] | no_license | import sys
import time
import math
from copy import copy, deepcopy
import cProfile, pstats
from io import StringIO
marr = [0, 1, 0, -1] # -1 0 1 => 0 1 2
c1 = []
for i in range(0,10):
temp = []
for j in range(0,3):
temp.append(i*(j-1))
c1.append(temp)
#TODO: instead of deleting from the array, keep the array but skip forward
def sum_row(arr,row_num,slen):
sum = 0
sub1 = 0
mindex = 0
curr_index = 0
if row_num == 0:
mindex = 1
if row_num > 0:
sub1 = -1
while True:
if mindex == 0 or mindex ==2:
curr_index = curr_index + row_num+1+sub1
else:
for i in range(curr_index,curr_index+row_num+1+sub1):
if i >= slen:
break
sum = sum + c1[arr[i]][marr[mindex]+1]
curr_index = curr_index + row_num+1+sub1
sub1 = 0
mindex = mindex + 1
if mindex > 3:
mindex = 0
if curr_index >= slen:
break
return sum
marr = [0, 1, 0, -1]
#arr2 = [1,2,3,4,5,6,7,8]
#slen2 = len(arr2)
#print( sum_row(arr2,0,slen2)) # -4
#print()
#print( sum_row(arr2,1,slen2)) # -8
#print()
#print( sum_row(arr2,2,slen2)) #
#print()
#print( sum_row(arr2,3,slen2)) #
#print()
#sys.exit()
#pr = cProfile.Profile() # create a profile object
#pr.enable() # start profiling
#print()
# 33 seconds is still too slow
start_secs = time.time()
str1='59765216634952147735419588186168416807782379738264316903583191841332176615408501571822799985693486107923593120590306960233536388988005024546603711148197317530759761108192873368036493650979511670847453153419517502952341650871529652340965572616173116797325184487863348469473923502602634441664981644497228824291038379070674902022830063886132391030654984448597653164862228739130676400263409084489497532639289817792086185750575438406913771907404006452592544814929272796192646846314361074786728172308710864379023028807580948201199540396460310280533771566824603456581043215999473424395046570134221182852363891114374810263887875638355730605895695123598637121'
#PART2
#The first seven digits of your initial input signal also represent the message offset.
#offset = int(str1[0:7])
#str1 = 10000 * str1 # PART 2
slen = len(str1) # 650
s = [None] * slen
for i in range(0,slen):
s[i] = int(str1[i])
phases=100
news=[None]*slen
for phs in range(1,phases+1):
#print('phs: ' + str(phs))
for i in range(0,slen):
news[i] = abs(sum_row(s,i,slen)) % 10
s=news
print(s[0:8])
print('phases: ' + str(phases))
# part 2
#ans = ''
#for i in range(offset,offset+8):
# ans = ans + str(s[i])
#print(ans + ' , phases: ' + str(phases))
#print()
#pr.disable() # end profiling
# print out some stats.
#s = StringIO()
#sortby = 'cumulative'
#ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
#ps.print_stats()
#print(s.getvalue())
end_secs = time.time()
print('elapsed time: ' + str(end_secs - start_secs) + ' seconds')
| true |
2b5dda2532a12a60db5f34901ee99842acb6a8e5 | Python | mg-blvd/Kattis_Solutions | /Paragrams.py | UTF-8 | 276 | 3.640625 | 4 | [] | no_license | letterCount = []
for i in range(26):
letterCount.append(0)
inputWord = input()
for letter in inputWord:
letterCount[ord(letter) - ord('a')] += 1
oddsCount = 0;
for num in letterCount:
if num % 2 == 1:
oddsCount += 1
print(oddsCount - 1)
| true |
a6e53190d78de23db0b1d2da850735bc434a78f3 | Python | yanbinbi/leetcode | /101-200/164.py | UTF-8 | 1,048 | 2.890625 | 3 | [] | no_license | class Solution(object):
def maximumGap(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
size = len(nums)
if size < 2:
return 0
min_n, max_n = min(nums), max(nums)
dist = max_n - min_n
if dist == 0:
return 0
num = int(dist/(size-1)) if dist%(size-1) == 0 else int(dist/(size-1))+1
buck = int((dist+1)/num) if (dist+1)%num == 0 else int((dist+1)/num)+1
min_arr, max_arr = [2147483647]*buck, [-2147483648]*buck
for n in nums:
buck_id = int((n-min_n)/num)
if min_arr[buck_id] > n:
min_arr[buck_id] = n
if max_arr[buck_id] < n:
max_arr[buck_id] = n
ret, i, j = 0, 0, 1
while j < buck:
if min_arr[j] == 2147483647:
j += 1
else:
if min_arr[j] - max_arr[i] > ret:
ret = min_arr[j] - max_arr[i]
i = j
j += 1
return ret
| true |
e5e6a5fac93589168e548aa0c26f3f56aa80da7c | Python | UofSSpaceTeam/robocluster | /tests/test_util.py | UTF-8 | 1,564 | 3.140625 | 3 | [
"ECL-2.0",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-unknown"
] | permissive | import random
from inspect import iscoroutinefunction
from robocluster.util import duration_to_seconds, as_coroutine
def test_duration_to_seconds():
# test that integers are unchanged
rnd_integer = random.randint(-10000, 10000)
assert(duration_to_seconds(rnd_integer) == rnd_integer)
# test that floats are unchanged
rnd_float = random.random()
assert(duration_to_seconds(rnd_float) == rnd_float)
# test minute values
time_string = '1 m'
assert(duration_to_seconds(time_string) == 60)
time_string = '1 minute'
assert(duration_to_seconds(time_string) == 60)
time_string = '2 minutes'
assert(duration_to_seconds(time_string) == 120)
# test second values
time_string = '1 s'
assert(duration_to_seconds(time_string) == 1)
time_string = '1 second'
assert(duration_to_seconds(time_string) == 1)
time_string = '2 seconds'
assert(duration_to_seconds(time_string) == 2)
# test millisecond values
time_string = '1 ms'
assert(duration_to_seconds(time_string) == 0.001)
time_string = '1 millisecond'
assert(duration_to_seconds(time_string) == 0.001)
time_string = '2 milliseconds'
assert(duration_to_seconds(time_string) == 0.002)
# test invalid input
assert(duration_to_seconds('invalid') == -1)
assert(duration_to_seconds('1 ps') == -1)
def test_as_coroutine():
def dummy():
pass
async def dummy_coro():
pass
assert(iscoroutinefunction(as_coroutine(dummy)))
assert(iscoroutinefunction(as_coroutine(dummy_coro)))
| true |
abe37dc90b7c97fa01e142323b0929402d539869 | Python | vedantg/Python_Online_Social_Network_Analysis | /Recommendation_Systems/a3.py | UTF-8 | 12,595 | 3.453125 | 3 | [] | no_license | # coding: utf-8
# Recommendation systems
#
# Here we'll implement a content-based recommendation algorithm.
# It will use the list of genres for a movie as the content.
# The data come from the MovieLens project: http://grouplens.org/datasets/movielens/
# Please only use these imports.
from collections import Counter, defaultdict
import math
import numpy as np
import os
import pandas as pd
import re
from scipy.sparse import csr_matrix
import urllib.request
import zipfile
def download_data():
""" DONE. Download and unzip data.
"""
url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'
urllib.request.urlretrieve(url, 'ml-latest-small.zip')
zfile = zipfile.ZipFile('ml-latest-small.zip')
zfile.extractall()
zfile.close()
def tokenize_string(my_string):
""" DONE. You should use this in your tokenize function.
"""
return re.findall('[\w\-]+', my_string.lower())
def tokenize(movies):
"""
Append a new column to the movies DataFrame with header 'tokens'.
This will contain a list of strings, one per token, extracted
from the 'genre' field of each movie. Use the tokenize_string method above.
Note: you may modify the movies parameter directly; no need to make
a new copy.
Params:
movies...The movies DataFrame
Returns:
The movies DataFrame, augmented to include a new column called 'tokens'.
>>> movies = pd.DataFrame([[123, 'Horror|Romance'], [456, 'Sci-Fi']], columns=['movieId', 'genres'])
>>> movies = tokenize(movies)
>>> movies['tokens'].tolist()
[['horror', 'romance'], ['sci-fi']]
"""
###TODO
movies['tokens'] = ""
gen = ""
gen_return = ""
for index,row in movies.iterrows():
gen = row['genres']
gen_return=tokenize_string(gen)
movies.set_value(index,'tokens',gen_return)
return (movies)
def cal_unique_features(movies): #num_features
h = set()
for index,row in movies.iterrows():
gen = row['tokens']
for item in gen:
h.add(item)
return sorted(h)
def cal_unique_vocab(get_h):
return_dict = {}
counter = 0
for item in get_h:
#print("current_item::",item[0]) # vocab complete
return_dict[item]=counter
counter+=1
return return_dict
def cal_unique_docs(h,movies): #df(i)
df_dict = {}
#check_set = set()
for item in h:
#print("ITEM::",item)
count = 0
for index,row in movies.iterrows():
check_set = set()
gen = row['tokens']
#print("GEN::",gen)
for gen_item in gen:
#print("GEN_ITEM",gen_item)
check_set.add(gen_item)
#print("Check_set:",check_set)
if item in check_set:
#print("Count_Before::",count)
count += 1
#print("Count_After::",count)
break
df_dict[item]=count
return(df_dict)
#print("HII::",df_dict)
def get_tf_value(index_dict, tok, ind):
for t_list in index_dict[tok]:
if t_list[0] == ind:
tf_val = t_list[1]
return(tf_val)
def featurize(movies):
"""
Append a new column to the movies DataFrame with header 'features'.
Each row will contain a csr_matrix of shape (1, num_features). Each
entry in this matrix will contain the tf-idf value of the term, as
defined in class:
tfidf(i, d) := tf(i, d) / max_k tf(k, d) * log10(N/df(i))
where:
i is a term
d is a document (movie)
tf(i, d) is the frequency of term i in document d
max_k tf(k, d) is the maximum frequency of any term in document d
N is the number of documents (movies)
df(i) is the number of unique documents containing term i
Params:
movies...The movies DataFrame
Returns:
A tuple containing:
- The movies DataFrame, which has been modified to include a column named 'features'.
- The vocab, a dict from term to int. Make sure the vocab is sorted alphabetically as in a2 (e.g., {'aardvark': 0, 'boy': 1, ...})
"""
###TODO
movies['features'] = ""
get_h = set()
vocab_dict = {}
df_dict_return = {}
tup_list = []
index_dict = {}
index_dict_1 = {}
movie_len = len(movies)
#print("MovieLength::",movie_len)
#print("MOVIES:::",movies)
get_h = cal_unique_features(movies) # num_features
vocab_dict = cal_unique_vocab(get_h) # vocab complete
len_vocab = len(get_h)
df_dict_return = cal_unique_docs(get_h,movies) # df(i)
for token in get_h :
#tup_list.clear()
#print("token_GOTTTTT:::",token)
for index,row in movies.iterrows():
#print("row_got::",row)
gen_list = row['tokens']
#print("gen_list::",gen_list)
#mov_id = row['movieId']
#print("mov_id::",mov_id)
token_count_1 = Counter(gen_list).most_common()[:1]
tok = token_count_1[0]
index_dict_1[index] = tok[1]
token_count = gen_list.count(token)
#print("token_count::",token_count)
tup = (index,token_count)
#print("tuple::",tup)
tup_list.append(tup)
#print("LIST_PRINT:::::::::::::",tup_list)
index_dict[token] = tup_list
tup_list = []
#print("INDEX_DICT:::",index_dict) # tf(i,d)
#print("INDEX_DICT_1:::",index_dict_1) # max_k dict per docx
for ind, row in movies.iterrows():
data_list = []
rows_list = []
columns_list = []
gen_list = row['tokens']
#print("TOKENS GOTTT::",gen_list)
for gen in gen_list:
tf = get_tf_value(index_dict,gen,ind)
#print("TF GOTTT::",tf)
tf_weight = float( tf / index_dict_1[ind])
#print("tf_weight::",tf_weight)
df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )
#print("df_weight::",df_weight)
final_tfidf = tf_weight * df_weight
#print("final_tfidf::",final_tfidf)
data_list.append(final_tfidf)
columns_list.append(vocab_dict[gen])
rows_list.append(0)
csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))
#print("TYPE of CSR GOTT::",type(csr))
#print("CSR GOTT:::",csr)
movies.set_value(ind, 'features', csr)
#print("UPDATE movies::",movies)
return(movies,vocab_dict)
pass
def train_test_split(ratings):
"""DONE.
Returns a random split of the ratings matrix into a training and testing set.
"""
test = set(range(len(ratings))[::1000])
train = sorted(set(range(len(ratings))) - test)
test = sorted(test)
return ratings.iloc[train], ratings.iloc[test]
def cosine_sim(a, b):
"""
Compute the cosine similarity between two 1-d csr_matrices.
Each matrix represents the tf-idf feature vector of a movie.
Params:
a...A csr_matrix with shape (1, number_features)
b...A csr_matrix with shape (1, number_features)
Returns:
The cosine similarity, defined as: dot(a, b) / ||a|| * ||b||
where ||a|| indicates the Euclidean norm (aka L2 norm) of vector a.
"""
###TODO
#print("SHAPE of AAA::",a.shape)
a = a.toarray()
#print("TYPE of AAA::",type(a))
#print("AA:::",a)
#print("SHAPE of BBB::",b.shape)
b = b.toarray()
#print("TYPE of BBB::",type(b))
#print("BBB_TEST:::",b)
b_new = b.reshape(22,1)
dot_product = np.dot(a, b_new)
norm_a = np.linalg.norm(a)
#print("NORM_a::",norm_a)
#print("TYPE of NORM_a::",type(norm_a))
norm_b = np.linalg.norm(b)
#print("NORM_b::",norm_b)
#print("TYPE of NORM_b::",type(norm_b))
norm_total = np.multiply(norm_a, norm_b)
#print("norm_total::",norm_total)
#print("TYPE of norm_total::",type(norm_total))
cos_sim = np.divide(dot_product, norm_total)
#print("cos_sim::",cos_sim)
#print("TYPE of cos_sim::",type(cos_sim))
return_ans = cos_sim.item()
#print("return_ans::",return_ans)
#print("TYPE of return_ans::",type(return_ans))
return (return_ans)
pass
def make_predictions(movies, ratings_train, ratings_test):
"""
Using the ratings in ratings_train, predict the ratings for each
row in ratings_test.
To predict the rating of user u for movie i: Compute the weighted average
rating for every other movie that u has rated. Restrict this weighted
average to movies that have a positive cosine similarity with movie
i. The weight for movie m corresponds to the cosine similarity between m
and i.
If there are no other movies with positive cosine similarity to use in the
prediction, use the mean rating of the target user in ratings_train as the
prediction.
Params:
movies..........The movies DataFrame.
ratings_train...The subset of ratings used for making predictions. These are the "historical" data.
ratings_test....The subset of ratings that need to predicted. These are the "future" data.
Returns:
A numpy array containing one predicted rating for each element of ratings_test.
"""
###TODO
user_result = []
for index,row in ratings_test.iterrows():
userid_test = row['userId']
#print("userid_test::",userid_test)
movieid_test = row['movieId']
#print("movieid_test::",movieid_test)
x = list(movies[movies.movieId==movieid_test]['features'])[0]
#print("CSR_GOTT+X::",x)
#print("TYPE of CSR_GOTT_X::",type(x))
subset_train = ratings_train[ratings_train.userId == userid_test]
#print("SUB MOVIE SET::",subset_train)
#print("TYPE of SUB MOVIE SET::",type(x))
total_if_zero=0
rating_if_zero=0
sum_main_result=0
sum_cosine=0
for index1,row1 in subset_train.iterrows():
userid_train = row1['userId']
#print("userid_train::",userid_train)
if(userid_test == userid_train ):
#print("HII IN IFFF:::")
movieid_train = row1['movieId']
#print("movieid_train::",movieid_train)
rating_train = row1['rating']
#print("rating_train::",rating_train)
total_if_zero = total_if_zero + 1
rating_if_zero = rating_if_zero + rating_train
y = list(movies[movies.movieId==movieid_train]['features'])[0]
#print("CSR_GOTT_Y::",y)
#print("TYPE of CSR_GOTT_Y::",type(y))
result_cos = cosine_sim(x,y)
sum_main_result += result_cos * rating_train
sum_cosine += result_cos
if(sum_main_result != 0):
user_result.append(sum_main_result/sum_cosine)
#print("user_result::",user_result)
else:
user_result.append(rating_if_zero / total_if_zero)
#print("user_result::",user_result)
return_result_arr = np.array(user_result)
return return_result_arr
pass
def mean_absolute_error(predictions, ratings_test):
"""DONE.
Return the mean absolute error of the predictions.
"""
return np.abs(predictions - np.array(ratings_test.rating)).mean()
def main():
download_data()
path = 'ml-latest-small'
ratings = pd.read_csv(path + os.path.sep + 'ratings.csv')
movies = pd.read_csv(path + os.path.sep + 'movies.csv')
movies = tokenize(movies)
movies, vocab = featurize(movies)
print('vocab:')
print(sorted(vocab.items())[:10])
ratings_train, ratings_test = train_test_split(ratings)
print('%d training ratings; %d testing ratings' % (len(ratings_train), len(ratings_test)))
predictions = make_predictions(movies, ratings_train, ratings_test)
print('error=%f' % mean_absolute_error(predictions, ratings_test))
print(predictions[:10])
if __name__ == '__main__':
main()
| true |
e51fb65dd8795d2c10b46374e56d9a12f868a630 | Python | cmartinezbjmu/DatosEnormes | /Script/twitter/cotweet/apps/panel.py | UTF-8 | 5,967 | 2.5625 | 3 | [] | no_license | import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
import dash_table
import re
import psycopg2
from app import app
from time import sleep
from bson.objectid import ObjectId
app = dash.Dash(__name__)
app.titulo = "Panel de control"
app.explanation = '''
Desde esta página podras obtener los datos más relevantes de la
base de datos, así como actualizar la base de datos obteniendo
más tweets, también puedes calibrar los modelos de predicción
que corresponden al análisis de sentiemientos, tendencia y coherencia
'''
app.layout = html.Div(
[
html.Div(
[
html.Div(
[
html.P(
"Escoja el país para obtener los datos más relevantes",
className="control_label",
),
dcc.RadioItems(
id="panel-seleccion",
options=[
{"label": "Colombia ", "value": "COL"},
{"label": "Argentina ", "value": "ARG"},
],
value="active",
labelStyle={"display": "inline-block"},
className="dcc_control",
),
dcc.Checklist(
id="panel-balanceo",
options=[{"label": "Balancear muestra", "value": 1}],
className="dcc_control",
value=[],
),
dcc.Dropdown(
id="panel-tipo-modelo",
options=[
{'label': 'Modelo de Sentimientos', 'value': 'emocion'},
{'label': 'Modelo de Tendencia', 'value': 'tendencia'},
{'label': 'Modelo de coherencia', 'value': 'coherencia'},
],
placeholder="¿Qué modelo quieres re-entrenar?",
clearable=False,
className="dcc_control",
),
dcc.Dropdown(
id="panel-modelos",
options=[
{'label': 'Naive Bayes Multinomial', 'value': 'NB'},
{'label': 'Random Forest', 'value': 'RF'},
{'label': 'Regresión Logística', 'value': 'LR'},
{'label': 'Soporte Vectorial', 'value': 'SV'}
],
placeholder="Escoje el algoritmo ",
clearable=False,
className="dcc_control",
),
html.P(
"Pare re - entrenar los modelos haga click en el botón",
className="control_label",
),
html.Button("Calibrar modelo", id="panel-correr-modelo"),
html.P(
"Pare obtener más tweets para el estudio haga click en el botón",
className="control_label",
),
html.Button("Recolectar tweets", id="panel-recolectar"),
html.P(
id='panel-exito',
className="control_label",
),
],
className="pretty_container four columns",
id="panel-cross-filter-options",
),
html.Div(
[
html.Div(
[
############################
### Mini containers ########
############################
html.Div(
[html.H6(id="panel-tweets"), html.P("No. de Tweets")],
className="mini_container",
),
html.Div(
[html.H6(id="panel-cuentas"), html.P("Tweets de Influencers")],
className="mini_container",
),
html.Div(
[html.H6(id="panel-comentarios"), html.P("Comentarios")],
className="mini_container",
),
html.Div(
[html.H6(id="panel-citas"), html.P("Citas")],
className="mini_container",
),
],
id="panel-info-container",
className="row container-display",
),
html.Div(
[dcc.Graph(id="panel-graph")],
className="pretty_container",
),
],
id="panel-right-column",
className="seven columns",
),
],
className="row flex-display",
),
],
id="panel-mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
| true |
302a6984dadc481451e3d31bce5607f532f02a62 | Python | ananya/HackerSkills | /Solutions/nab331/Task7/script.py | UTF-8 | 793 | 2.671875 | 3 | [] | no_license |
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import csv
import os
participants = []
with open("input.csv","rb") as f:
reader = csv.reader(f)
for row in reader:
participants.append( row[0] )
fonts_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'fonts')
font = ImageFont.truetype(os.path.join(fonts_path, 'best_font.ttf'), 80)
output_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'Output')
i=1
for name in participants:
img = Image.open("sample_in.jpg")
draw = ImageDraw.Draw(img)
fsize_x , fsize_y = draw.textsize(name,font)
x = 1065-(fsize_x)/2
y = 510-(fsize_y)/2
draw.text( (x , y) ,name,(0,0,0),font)
output_name = os.path.join( output_path , str(i)+" - "+name+".jpg" )
img.save(output_name)
i+=1 | true |
0c709824613fc2548ffcce9ab81954479d1d7921 | Python | hdqlife/DataAnalyst | /子图.py | UTF-8 | 1,553 | 3.5 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
#创建画布
p1=plt.figure(figsize=(8,6))
#添加子图
# p1.add_subplot(2,1,1) #两行一列第一个图
x=np.arange(10)
# y=-x+2
# plt.plot(x,y)
# p1.add_subplot(2,3,5) #两行三列第五个图(行,列,第几个图)
y1=x**2-2*x
# plt.plot(x,y1)
# plt.show()
#
# ##散点图
p1.add_subplot(2,1,2)
y2=x**2-2*x
plt.scatter(x,y1,marker="d",color='yellow')
plt.show()
#==============================国民经济核算================================
data=np.load(r'E:\DataAnalyst\数据分析表\国民经济核算季度数据.npz',allow_pickle=True)
plt.rcParams['font.sans-serif']='SimHei' #将默认字体修改为仿宋
plt.rcParams['axes.unicode_minus']=False
# print(list(data))
columns=data['columns']
values=data['values']
print(values)
##绘制2000年到2017年第一季度国民生产总值散点图
#字符串不能直接画图
x=values[::4,0]
y=values[::4,2]
plt.figure(figsize=(10,8))
plt.scatter(x,y,marker="d")
x=values[1::4,0]
y=values[1::4,2]
plt.scatter(x,y,marker="d")
x=values[2::4,0]
y=values[2::4,2]
plt.scatter(x,y,marker="d")
x=values[3::4,0]
y=values[3::4,2]
plt.scatter(x,y,marker="d")
plt.title('2000年到2017年各季度国民生产总值散点图')
plt.legend(['第一季度','第二季度','第三季度','第四季度'])
plt.xlabel('季度')
plt.ylabel('生产总值')
# plt.xticks(values[0::4,0],values[0::4,1],rotation='45')
plt.xticks(values[0::4,0],np.arange(2000,2018,1),rotation='45') #把刻度替换为我们需要显示的值
plt.show()
| true |
c7c32201fe6e9d2e4d1cfa31792dfa392a4e135f | Python | SaketVats95/PythonDataScience | /LogicBuilding/unzipAllFileInsideFolder.py | UTF-8 | 1,306 | 3.171875 | 3 | [] | no_license | import os,zipfile
#dir_name = 'D:\\abc'
def RemoveAllZipFile(list_zip):
for file_full_name in list_zip:
os.remove(file_full_name)
print(file_full_name," file deleted as the unzipping completed")
dir_name=input("Enter the Directory Name::::")
dir_name=dir_name.replace('\\','\\')
extension = ".zip"
list_of_all_zip=[]
os.chdir(dir_name) # change directory from working dir to dir with files
try:
for path,dir_list,file_list in os.walk(dir_name): # loop through items in dir
for file_name in file_list:
if file_name.endswith(extension): # check for ".zip" extension
file_name_path = os.path.join(path,file_name) # get full path of files
print(file_name_path, " file unzipping started")
zip_ref = zipfile.ZipFile(file_name_path) # create zipfile object
newDirName=file_name_path.replace(".zip","")
os.mkdir(newDirName)
zip_ref.extractall(newDirName) # extract file to dir
#os.listdir(newDirName).__contains__()
list_of_all_zip.append(file_name_path)
zip_ref.close() # close file
print(file_name_path, " file unzipping completed")
except Exception as e:
if hasattr(e,'message'):
print(e.message)
else:
print(e)
NeedToDelete=input("Enter [Y/N] if you want to delete all zip files:")
if NeedToDelete.lower()=="y":
RemoveAllZipFile(list_of_all_zip) | true |
265052c33741cb6457e048ae1613d9755d40da44 | Python | futureprecd/yandepachong | /Http.py | UTF-8 | 563 | 2.65625 | 3 | [] | no_license | import urllib.request
def get(url: str, header: list = {}):
"""
HTTP GET获取
:param url: URL地址
:param header: HTTP头
:return: row
"""
header['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
header['Accept-Language'] = 'zh-CN,zh;q=0.8,en;q=0.6'
header['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2883.103 Safari/537.36'
req = urllib.request.Request(url, headers=header)
return urllib.request.urlopen(req).read() | true |
1a93a1513dd50ae13ecace4ce90c959a46839af9 | Python | elhamsyahrianputra/Pemrograman-Terstruktur | /Chapter 05/Chapter05_Praktikum2_13.py | UTF-8 | 157 | 3.390625 | 3 | [] | no_license | from random import randint
jumlah = 0
while True:
bil = randint(0, 10)
jumlah += 1
print(bil)
if bil == 5:
print("Jumlah perulangan : ",jumlah)
break | true |
0efc57b80811f1901db7c5d0c5ccc7777b903f05 | Python | k8440009/Algorithm | /leetcode/121. Best Time to Buy and Sell Stock_0_2_1.py | UTF-8 | 583 | 3.859375 | 4 | [] | no_license | """
주식을 사고팔기 가장 좋은 시점
https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
저점과 현재 값과의 차이 계산
"""
import sys
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
max_cost = -sys.maxsize
min_cost = sys.maxsize
for price in prices:
min_cost = min(price, min_cost)
max_cost = max(price - min_cost, max_cost)
return max_cost
a = Solution()
# prices = [7,1,5,3,6,4]
prices = [7, 6, 4, 3, 1]
print(a.maxProfit(prices))
| true |
8a335cbcf1c6465101b6e05b0757a2bdbbd599e7 | Python | cair/deep-line-wars | /deep_line_wars/player.py | UTF-8 | 7,424 | 2.78125 | 3 | [
"MIT"
] | permissive | import copy
from os.path import realpath, dirname
from . import action_space
dir_path = dirname(realpath(__file__))
class AgentList:
def __init__(self):
self.agents = []
self.sel_idx = 0
def next(self):
self.sel_idx += 1
def append(self, agent):
self.agents.append(agent)
def get(self):
return self.agents[self.sel_idx % len(self.agents)] if self.has_agent() else None
def has_agent(self):
return len(self.agents) > 0
class Player:
def __init__(self, player_id, game, action_space=action_space.StandardActionSpace):
# Game Reference
self.game = game
# Player identification
self.id = player_id
# List of AI Agents
self.agents = AgentList()
# The player action_space
self.action_space = action_space(self.game)
# Player opponent
self.opponent = None
# Income frequency
self.income_frequency = game.config.mechanics.income_frequency * game.config.mechanics.ticks_per_second
# Direction of units
self.direction = 1 if player_id == 1 else -1
# Colors
self.color = (255, 0, 0) if player_id == 1 else (0, 0, 255)
self.cursor_colors = (95, 252, 242) if player_id == 1 else (212, 247, 86)
# Spawn position for player
self.spawn_x = 0 if player_id == 1 else self.game.width - 1
# Goal position for player
self.goal_x = self.game.width - 1 if player_id == 1 else 0
# The calculated territory of the player, is a square
self.territory = (
1 * 32 if self.direction == 1 else (game.state.center_area[-1] + 1) * 32,
0,
(game.state.center_area[0] - 1) * 32 if self.direction == 1 else ((game.width - 1) - (
game.state.center_area[-1] + 1)) * 32,
game.height * 32
)
# Episode variables. Resets every episode.
self.health = None
self.gold = None
self.lumber = None
self.income = None
self.level = None
self.units = None
self.buildings = None
self.spawn_queue = None
self.stat_spawn_counter = None
self.income_counter = None
self.virtual_cursor_x = None
self.virtual_cursor_y = None
self.reset()
def reset(self):
self.health = self.game.config.mechanics.start_health
self.gold = self.game.config.mechanics.start_gold
self.lumber = self.game.config.mechanics.start_lumber
self.income = self.game.config.mechanics.start_income
self.level = 0
self.units = []
self.buildings = []
self.spawn_queue = []
self.stat_spawn_counter = 0
self.income_counter = self.income_frequency
self.virtual_cursor_x = self.spawn_x
self.virtual_cursor_y = int(self.game.height / 2)
def set_cursor(self, x, y):
# Cannot perform action when game has ended.
if self.game.winner:
return False
self.virtual_cursor_x = max(min(self.game.width - 1, self.game.selected_player.virtual_cursor_x + x), 0)
self.virtual_cursor_y = max(min(self.game.height - 1, self.game.selected_player.virtual_cursor_y + y), 0)
def available_buildings(self):
return [b for b in self.game.building_shop if b.level <= self.level]
def rel_pos_to_abs(self, x, y):
if self.direction == -1:
return self.game.width - x - 1, y
return x, y
def get_building_idx(self, idx, last_if_error=True):
avb = self.available_buildings()
try:
return avb[idx]
except IndexError as e:
if last_if_error:
return avb.pop()
def get_score(self):
return (self.stat_spawn_counter + self.income) * (100 * self.level)
def can_afford_unit(self, u):
if u.gold_cost <= self.gold:
return True
return False
def available_units(self):
return [u for u in self.game.unit_shop if u.level <= self.level]
def can_afford_idx(self, idx):
available_buildings = self.available_buildings()
if idx >= len(available_buildings):
if available_buildings.pop().gold_cost > self.gold:
return False
return True
if available_buildings[idx].gold_cost > self.gold:
return False
return True
def do_action(self, a):
# Cannot perform action when game has ended.
if self.game.winner:
return False
return self.action_space.perform(a)
def update(self):
##############################################
##
## Income Logic
##
##############################################
# Decrements counter each tick and fires income event when counter reaches 0
self.income_counter -= 1
if self.income_counter == 0:
self.gold += self.income
self.income_counter = self.income_frequency
# Spawn Queue Logic
# Spawn queued units
if self.spawn_queue:
self.spawn_queue.pop().spawn(self)
# Process units and buildings
for unit in self.units:
unit.update()
if unit.despawn:
unit.remove()
self.units.remove(unit)
def increase_gold(self, amount):
self.gold += amount
def levelup(self):
# Lvelup logic
next_level = self.levels[0]
if self.gold >= next_level[0] and self.lumber >= next_level[1]:
self.gold -= next_level[0]
self.lumber -= next_level[1]
self.level += 1
self.levels.pop(0)
else:
print("Cannot afford levelup!")
def enemy_unit_reached_red(self):
my_spawn = self.spawn_x
if self.opponent.id in self.game.map[2][my_spawn]:
return True
return False
def enemy_unit_reached_base(self, u):
if self.direction == 1 and u.x < self.game.mid[0]:
return True
elif self.direction == -1 and u.x > self.game.mid[0]:
return True
else:
return False
def build(self, x, y, building):
# Restrict players from placing towers on mid area and on opponents side
if self.direction == 1 and not all(
i > x for i in self.game.center_area) and not self.game.config.mechanics.complexity.build_anywhere:
return False
elif self.direction == -1 and not all(
i < x for i in self.game.center_area) and not self.game.config.mechanics.complexity.build_anywhere:
return False
elif x == 0 or x == self.game.width - 1:
return False
# Ensure that there is no building already on this tile (using layer 4 (Building Player Layer)
if self.game.map[4][x, y] != 0:
return False
# Check if can afford
if self.gold >= building.gold_cost:
self.gold -= building.gold_cost
else:
return False
building = copy.copy(building)
building.setup(self)
building.x = x
building.y = y
# Update game state
self.game.map[4][x, y] = building.id
self.game.map[3][x, y] = self.id
self.buildings.append(building)
return True
| true |
08c625bc70abccc33190604b7a876238d393a264 | Python | mwenz27/python_misc | /charactersInWords.py | UTF-8 | 784 | 4 | 4 | [] | no_license | # write a function that takes a string and prints each character on a new line
import os
import random
file = '/Users/Wenz/Downloads/python_fundamentals-master/labs/07_file_io/words.txt'
with open(file, encoding='utf-8') as words:
contents = words.read()
words = contents.split() #create a list for words
def printChars():
for i in range(4):
word = words[random.randint(0, len(words))]
first_letter = word[0]
#random_word_related_to_first_letter
#finding first word for in a list using a list
print("-------------------", word.upper(), "-------------------",)
for count, i in enumerate(word, 0):
print(" "*18, " "*count ,i.upper(), end='\n')
#printChars() | true |
5db685ead092d638fee194d036f6342efdc697bc | Python | yliasemka/ISP3-4 | /authentication/email_thread.py | UTF-8 | 229 | 2.5625 | 3 | [] | no_license | import threading
class EmailThread(threading.Thread):
def __init__(self, email):
self.email = email
threading.Thread.__init__(self)
def run(self):
self.email.send(fail_silently=False)
| true |
a872904ad927caa194451c9e838ea6c4cf129b60 | Python | npsables/ppl3 | /assignment3-ver1.2/assignment3/initial/src/test/CheckSuite.py | UTF-8 | 8,030 | 2.859375 | 3 | [] | no_license |
import unittest
from TestUtils import TestChecker
from StaticError import *
from AST import *
class CheckSuite(unittest.TestCase):
# def test_var_redeclared(self):
# input = """
# Let x; Let x;
# """
# expect = str(Redeclared(Variable(), "x"))
# self.assertTrue(TestChecker.test(input, expect, 1))
# def test_func_redeclared(self):
# input = """
# Let x; Function a(){Let a;}
# """
# expect = str(Redeclared(Function(), "x"))
# self.assertTrue(TestChecker.test(input, expect, 2))
# def test_var_and_const(self):
# input = """
# Let x; Constant $x;
# """
# expect = str(NoEntryPoint())
# self.assertTrue(TestChecker.test(input, expect, 3))
# def test_const_redeclared(self):
# input = """Constant $x2; Constant $x2;
# """
# expect = str(Redeclared(Constant(), "$x2"))
# self.assertTrue(TestChecker.test(input, expect, 4))
# def test_2_func_redeclared(self):
# input = """
# Function foo(){}
# Function foo(){}
# """
# expect = str(Redeclared(Function(), "foo"))
# self.assertTrue(TestChecker.test(input, expect, 5))
# def test_no_entry_point(self):
# input = """
# Function foo(){}
# """
# expect = str(NoEntryPoint())
# self.assertTrue(TestChecker.test(input, expect, 6))
# def test_para_redeclare_1(self):
# input = """
# Function foo(b){}
# Function main(){}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 7))
# def test_para_redeclare_1(self):
# input = """
# Function foo(b, b){}
# Function main(){}
# """
# expect = str(Redeclared(Parameter(), "b"))
# self.assertTrue(TestChecker.test(input, expect, 8))
# def test_undeclared_function(self):
# """Simple program: main"""
# input = """Function main() {
# Call(foo, []);
# }
# """
# expect = "" #str(Undeclared(Function(), "foo"))
# self.assertTrue(TestChecker.test(input, expect, 9))
# def test_diff_numofparam_stmt(self):
# """Complex program"""
# input = """Function main() {
# Call(printStrLn, [])
# }"""
# expect = str(TypeMismatchInStatement(CallStmt(Id("printStrLn"), [])))
# self.assertTrue(TestChecker.test(input, expect, 10))
# def test_diff_numofparam_expr(self):
# """More complex program"""
# input = """Function main() {
# Call(printStrLn, [Call(read, [4])])
# }
# """
# expect = str(TypeMismatchInExpression(
# CallExpr(Id("read"), [IntLiteral(4)])))
# self.assertTrue(TestChecker.test(input, expect, 11))
# def test_undeclared_function_use_ast(self):
# """Simple program: main """
# input = Program([FuncDecl(Id("main"), [], ([], [
# CallExpr(Id("foo"), [])]))])
# expect = str(Undeclared(Function(), "foo"))
# self.assertTrue(TestChecker.test(input, expect, 12))
# def test_diff_numofparam_expr_use_ast(self):
# """More complex program"""
# input = Program([
# FuncDecl(Id("main"), [], ([], [
# CallStmt(Id("printStrLn"), [
# CallExpr(Id("read"), [IntLiteral(4)])
# ])]))])
# expect = str(TypeMismatchInExpression(
# CallExpr(Id("read"), [IntLiteral(4)])))
# self.assertTrue(TestChecker.test(input, expect, 13))
# def test_diff_numofparam_stmt_use_ast(self):
# """Complex program"""
# input = Program([
# FuncDecl(Id("main"), [], ([], [
# CallStmt(Id("printStrLn"), [])]))])
# expect = str(TypeMismatchInStatement(CallStmt(Id("printStrLn"), [])))
# self.assertTrue(TestChecker.test(input, expect, 14))
# def test_func_redeclared_15(self):
# input = """
# Let a; Function main(){Let a; Let b;}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 15))
# def test_func_redeclared_16(self):
# input = """
# Function main(){Let a; a = 1;}
# """
# expect = "Type Cannot Be Inferred: Assign(Id(a),NumberLiteral(1.0))"
# self.assertTrue(TestChecker.test(input, expect, 16))
# def test_func_redeclared_17(self):
# input = """
# Function main(){Let a[5]: Number = [1, 2, 3, 4, 5];}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 17))
# def test_func_redeclared_18(self):
# input = """
# Function main(){Let a[5]; a[4] = [1, 2, 3, 4, 5];}
# """
# expect = "Type Cannot Be Inferred: Assign(ArrayAccess(Id(a),[NumberLiteral(4.0)]),ArrayLiteral(NumberLiteral(1.0),NumberLiteral(2.0),NumberLiteral(3.0),NumberLiteral(4.0),NumberLiteral(5.0)))"
# self.assertTrue(TestChecker.test(input, expect, 18))
# def test_func_redeclared_20(self):
# input = """
# Function main(){Let a[5]: Number; a[4] = 1;}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 20))
# def test_func_redeclared_19(self):
# input = """
# Function main(){Let a[5]: Number; a[4] = "abc";}
# """
# expect = "Type Mismatch In Statement: Assign(ArrayAccess(Id(a),[NumberLiteral(4.0)]),StringLiteral(abc))"
# self.assertTrue(TestChecker.test(input, expect, 19))
# def test_func_redeclared_21(self):
# input = """
# Function main(){Let a[5]: Number; a["a"] = "abc";}
# """
# expect = "Type Mismatch In Expression: ArrayAccess(Id(a),[StringLiteral(a)])"
# self.assertTrue(TestChecker.test(input, expect, 21))
# def test_func_redeclared_22(self):
# input = """
# Function main(){Let a =1;}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 22))
# def test_func_redeclared_23(self):
# input = """
# Function main(){Let a:String =1;}
# """
# expect = "Type Mismatch In Statement: VarDecl(Id(a),StringType,NumberLiteral(1.0))"
# self.assertTrue(TestChecker.test(input, expect, 23))
# def test_func_redeclared_24(self):
# input = """
# Function main(){Let a;}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 24))
# def test_func_redeclared_25(self):
# input = """
# Function main(){Let a:Number ="ass";}
# """
# expect = "Type Mismatch In Statement: VarDecl(Id(a),NumberType,StringLiteral(ass))"
# self.assertTrue(TestChecker.test(input, expect, 25))
# def test_func_redeclared_26(self):
# input = """
# Function main(){Let a:Number; a = 1+2;}
# """
# expect = ""
# self.assertTrue(TestChecker.test(input, expect, 26))
def test_func_redeclared_27(self):
input = """
Function main(){Let a:String; a = "2" +. "1";}
"""
expect = ""
self.assertTrue(TestChecker.test(input, expect, 27))
def test_func_redeclared_28(self):
input = """
Function main(){Let a:Number; a = 1 - 2;}
"""
expect = ""
self.assertTrue(TestChecker.test(input, expect, 28))
def test_func_redeclared_29(self):
input = """
Function main(){Let a:Boolean; a = True && False;}
"""
expect = ""
self.assertTrue(TestChecker.test(input, expect, 29))
def test_func_redeclared_30(self):
input = """
Function main(){Let a:Number; a = 1+"2";}
"""
expect = "Type Mismatch In Expression: BinaryOp(+,NumberLiteral(1.0),StringLiteral(2))"
self.assertTrue(TestChecker.test(input, expect, 30)) | true |
f44d66b2b4e1689b9870999826147a4ca3537235 | Python | Aasthaengg/IBMdataset | /Python_codes/p02576/s830344370.py | UTF-8 | 117 | 3.0625 | 3 | [] | no_license | N,X,T = (int(x) for x in input().split())
a = ( N // X ) * T
if N % X == 0:
print(a)
else:
print(a + T)
| true |
bcef9fe0843aae5cd8903075f0ee7218449b463c | Python | rpereira91/AI | /PSO/runner.py | UTF-8 | 1,668 | 3.078125 | 3 | [] | no_license | #############################
# Ralph Pereira #
# COSC3P71 Assignment 3 #
# Basic PSO #
#############################
from pso import PSO
def CreateTestFiles():
p1 = PSO(30,0.729844,1.496180,1.496180,200,5000)
p2 = PSO(30,0.4,1.2,1.2,200,5000)
p3 = PSO(30,1,2,2,200,5000)
p4 = PSO(30,-1,2,2,200,5000)
r1 = PSO(30,0.729844,1.496180,1.496180,200,5000)
for i in range(1):
f1 = "Test\p1_" + str(i+1) + ".txt"
f2 = "Test\p2_" + str(i+1) + ".txt"
f3 = "Test\p3_" + str(i+1) + ".txt"
f4 = "Test\p4_" + str(i+1) + ".txt"
p1.SolvePso(f1)
p2.SolvePso(f2)
p3.SolvePso(f3)
p4.SolvePso(f4)
p1 = PSO(30,0.729844,1.496180,1.496180,200,5000)
p2 = PSO(30,0.4,1.2,1.2,200,5000)
p3 = PSO(30,1,2,2,200,5000)
p4 = PSO(30,-1,2,2,200,5000)
rand = "Test\r1_" + str(i+1) + ".txt"
r1.RandomSearch(rand)
def CreateResults(x):
p1 = PSO(30,0.729844,1.496180,1.496180,200,5000)
p2 = PSO(30,0.4,1.2,1.2,200,5000)
p3 = PSO(30,1,2,2,200,5000)
p4 = PSO(30,-1,2,2,200,5000)
r1 = PSO(30,0.729844,1.496180,1.496180,200,5000)
pa1 = []
pa2 = []
pa3 = []
pa4 = []
for i in range(x):
print("Pass by",i)
pa1.append(p1.SolvePso())
pa2.append(p2.SolvePso())
pa3.append(p3.SolvePso())
pa4.append(p4.SolvePso())
f = open("fn",'w')
f.write(str(sum(pa1)/len(pa1)))
f.write(str(sum(pa2)/len(pa2)))
f.write(str(sum(pa3)/len(pa3)))
f.write(str(sum(pa4)/len(pa4)))
f.close()
CreateResults(2)
| true |
93a04210f5e89abc7fe0254393cca522b97cdbf7 | Python | Sourav0Sinha/codeforce-repo | /problrm1.py | UTF-8 | 1,327 | 3.984375 | 4 | [] | no_license | '''Let's define S(x) to be the sum of digits of number x written in decimal system. For example, S(5)=5, S(10)=1, S(322)=7.
We will call an integer x interesting if S(x+1)<S(x). In each test you will be given one integer n. Your task is to calculate the number of integers x such that 1≤x≤n and x is interesting.
Input
The first line contains one integer t (1≤t≤1000) — number of test cases.
Then t lines follow, the i-th line contains one integer n (1≤n≤109) for the i-th test case.
Output
Print t integers, the i-th should be the answer for the i-th test case.
Example
inputCopy
5
1
9
10
34
880055535
outputCopy
0
1
1
3
88005553
Note
The first interesting number is equal to 9.
'''
n = int(input())
data = []
for l in range(n):
a = input()
data.append(a)
def S(x):
str1 = str(x)
res = 0
for j in str1:
res += int(j)
return res
def check(x,c):
q = int(x)
if x[0]=='8':
q %= 8*(10**(len(x)-1))
c += 8*(10**(len(x)-2))
q = str(q)
return check(q,c)
else:
return x,c
for i in data:
count = 0
i,c = check(i,count)
q = int(i)
count += c
for j in range(1,q+1):
cmp1 = S(j)
cmp2 = S(j+1)
if cmp1 > cmp2:
count += 1
print(count) | true |
7bd7d1c9fe8ad5313a43480ce00be792ca229d93 | Python | DmitryPukhov/pytrade | /tests/connector/test_CsvFeedConnector.py | UTF-8 | 2,217 | 2.890625 | 3 | [] | no_license | from unittest import TestCase
import pandas as pd
from pytrade.connector.CsvFeedConnector import CsvFeedConnector
from datetime import datetime
from model.feed.Asset import Asset
class TestCsvFeedConnector(TestCase):
def test_level2_of(self):
# Set input
dt = datetime.fromisoformat("2021-11-14 10:00")
data = {'datetime': [dt, dt],
'ticker': ['stock1/ticker1', 'stock1/ticker1'],
'price': [1, 11],
'bid_vol': [2, 22],
'ask_vol': [3, 33]}
data = pd.DataFrame(data).to_numpy()
# Process
level2 = CsvFeedConnector._level2_of(data)
# Assert
self.assertEqual(level2.asset, Asset("stock1", "ticker1"))
self.assertEqual(level2.dt, dt)
self.assertEqual([item.price for item in level2.items], [1, 11])
self.assertEqual([item.bid_vol for item in level2.items], [2, 22])
self.assertEqual([item.ask_vol for item in level2.items], [3, 33])
def test__quote_of(self):
# Set input
dt = datetime.now()
data = {'ticker': str(Asset("stock1", "name1")), 'bid': 1, 'ask': 2, 'last': 3, 'last_change': 4}
# Call
quote = CsvFeedConnector._quote_of(dt, data)
# Assert
self.assertEqual(quote.dt, dt)
self.assertEqual(quote.asset, Asset.of(data['ticker']))
self.assertEqual(quote.bid, data['bid'])
self.assertEqual(quote.ask, data['ask'])
self.assertEqual(quote.last, data['last'])
self.assertEqual(quote.last_change, data['last_change'])
def test__ohlcv_of(self):
# Set input
dt = datetime.now()
data = {'ticker': str(Asset("stock1", "name1")), 'open': 1, 'high': 2, 'low': 3, 'close': 4, 'volume': 5}
# Call
candle = CsvFeedConnector._ohlcv_of(dt, data)
# Assert
self.assertEqual(candle.dt, dt)
self.assertEqual(candle.asset, Asset.of(data['ticker']))
self.assertEqual(candle.o, data['open'])
self.assertEqual(candle.h, data['high'])
self.assertEqual(candle.l, data['low'])
self.assertEqual(candle.c, data['close'])
self.assertEqual(candle.v, data['volume'])
| true |
96f23c840accb8c78d7e80d62abd432ab3b449fc | Python | sshubham2/India_Covid_Data | /Covid_Data.py | UTF-8 | 1,201 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import pandas as pd
df = pd.read_json('https://www.mohfw.gov.in/data/datanew.json')
df = df[:36]
df = df.drop(['sno'], axis=1)
from lxml import html
import requests
page = requests.get('https://www.mohfw.gov.in/')
tree = html.fromstring(page.content)
date_string = tree.xpath('//h5/span/text()[1]')[0]
date_string_split = date_string.split()
date = date_string_split[3]+date_string_split[4].lower().capitalize()+date_string_split[5][0:4]
from datetime import datetime
date = datetime.strptime(date, '%d%B%Y')
df['date'] = date
df['change_in_active']=df['new_active']-df['active']
df['change_in_positive']=df['new_positive']-df['positive']
df['change_in_cured']=df['new_cured']-df['cured']
df['change_in_death']=df['new_death']-df['death']
df=df[['date', 'state_name', 'new_active', 'new_positive', 'new_cured',
'new_death', 'active', 'positive', 'cured', 'death','change_in_active',
'change_in_positive', 'change_in_cured', 'change_in_death','state_code']]
from sqlalchemy import create_engine
engine = create_engine('sqlite:///covid_data.db', echo=True)
df.to_sql('india_covid_data_state', con=engine, if_exists='append', index=False)
| true |
a842a78b6c45b51dd7978c26e2c76b5d1683853f | Python | skn123/spectralgp | /spectralgp/kernels/recover_spectral_density.py | UTF-8 | 1,493 | 2.734375 | 3 | [
"BSD-2-Clause"
] | permissive | import gpytorch
import torch
import math
def get_spectral_density_from_kernel(kernel, locs = 100, base = torch.zeros(1,1), s_range = 4.,
omg=None):
#this function is another implementation of the trapezoid rule but around K(tau) instead
# of the spectral density
if omg is None:
s = torch.linspace(0, s_range, locs)
else:
s = omg
if isinstance(kernel, gpytorch.kernels.SpectralMixtureKernel):
dens = torch.zeros_like(s)
n_mix = kernel.mixture_scales.nelement()
for ii in range(n_mix):
norm = torch.distributions.normal.Normal(kernel.mixture_means[ii], kernel.mixture_scales[ii])
dens = dens + kernel.mixture_weights[ii] * norm.log_prob(s).exp()
# dens = dens + kernel.mixture_weights[ii]
return dens.squeeze(), s
def integrand(tau):
trig_part = torch.cos(2.0 * math.pi * tau * s)
base = torch.zeros(1,1)
if base is not None:
base = base.type(tau.dtype)
kernel_part = kernel(tau, base).evaluate()
return kernel_part * trig_part
s_diff = s[1] - s[0]
tau = torch.linspace(-1 / s_diff, 1 / s_diff, 3 * locs, dtype=s.dtype, device=s.device).unsqueeze(1)
fn_output = integrand(tau)
# standard trapezoidal rule
diff = tau[1:] - tau[:-1]
output = (diff * (fn_output[1:,...] + fn_output[:-1,...])/2.0).sum(0)
output = torch.clamp(output, 1e-6)
return 2. * output, s
| true |
9c6250c1f7410ac21c088b8a2b4dfe90210cdb37 | Python | vltlswk/Baekjoon-Algorithm | /b1712.py | UTF-8 | 477 | 2.984375 | 3 | [] | no_license | a, b, c = map(int, input().split())
i = 0
if b >= c:
print("-1")
else:
i = a // (c - b)
print(i + 1)
# a, b, c = map(int, input().split())
# diff = 0
# # a는 고정금액
# # b는 가변금액
# # c는 노트북 가격
# # c*n>a+nb
# # 손익 분기점이 없다=-1
#
# if b >= c:
# print("-1")
# else:
# diff = c - b
# i = 2
# while True:
# if diff * i > a:
# print(i)
# break
# else:
# i += 1 | true |
27a49528a10cc1ebc14d9886d18b96f4e5224c0f | Python | Desserg/Timus-Online-Judge | /2002 Тестовое задание.py | UTF-8 | 1,101 | 2.765625 | 3 | [] | no_license | log = list(input() for _ in range(int(input())))
user = {}
login = []
for i in log:
step = list(i.split(' '))
if step[0] == "register":
if step[1] in user:
print("fail: user already exists")
else:
user[step[1]] = step[2]
print("success: new user added")
elif step[0] == "login":
if user.get(step[1]) == step[2]:
if (step[1] in login) == False:
login.append(step[1])
print("success: user logged in")
else:
print("fail: already logged in")
else:
if step[1] in user:
print("fail: incorrect password")
else:
print("fail: no such user")
elif step[0] == "logout":
if (step[1] in user) and (step[1] in login):
login.remove(step[1])
print("success: user logged out")
elif (step[1] in user) == False:
print("fail: no such user")
elif (step[1] in login) == False:
print("fail: already logged out") | true |
aa83977c12bad8b1d3745dc0829d6bd2c318c124 | Python | hermetique/microkanren | /tests/test_stream.py | UTF-8 | 632 | 3.03125 | 3 | [
"MIT"
] | permissive | import pytest
from mk.stream import MZero, Thunk, Unit, unfold
UNFOLD_DATA = [
(MZero(), []),
(Unit(1), [1]),
(MZero().mplus(Unit(1)), [1]),
(Unit(1).mplus(Unit(2)), [1, 2]),
(Unit(1).mplus(Unit(2).mplus(Unit(3))), [1, 2, 3]),
(Unit(1).mplus(Unit(2)).mplus(Unit(3)), [1, 3, 2]),
(Unit(1).bind(lambda a: Unit(a + 1)), [2]),
(Unit(1).mplus(Unit(2)).bind(lambda a: Unit(a + 1)), [2, 3]),
(Thunk(lambda: Unit(1)).mplus(Thunk(lambda: Unit(2))), [1, 2]),
]
@pytest.mark.parametrize("stream, expected", UNFOLD_DATA)
def test_unfold(stream, expected):
assert list(unfold(stream)) == expected
| true |
96d37241a8c1984fed0e53438c6110e90d2299a4 | Python | debasishdebs/parameterTesting | /Git/birch_cluster.py | UTF-8 | 2,411 | 3.0625 | 3 | [] | no_license | from sklearn.cluster import Birch
import csv
import datetime
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import Birch
from mpl_toolkits.mplot3d import Axes3D
def convert_str_to_dt(aStr):
'''
Converts aStr to a datetime object
Returns: datetime object
Note: I know a method exists in the datetime module that does this, but I figured that out too late
'''
info = aStr.split(" ")
dateInfo = info[0]
timeInfo = info[1]
dateInfo = dateInfo.split("-")
year = int(dateInfo[0])
month = int(dateInfo[1])
day = int(dateInfo[2])
timeInfo = timeInfo.split(":")
hour = int(timeInfo[0])
minute = int(timeInfo[1])
dt = datetime.datetime(year,month,day,hour,minute)
return dt
def birch_cluster(init_ds,ts_flag = False):
'''
Parameters: init_ds - 2D list of data
ts_flag - boolean specifying if the first column of init_ds is a datetime object or not
Returns: 2D list with additional column denoting which cluster said row falls into
'''
if ts_flag:
init_ds = [i[1:] for i in init_ds]
brc = Birch()
labels = brc.fit_predict(init_ds)
return [init_ds[i]+[labels[i]] for i in range(len(init_ds)) ]
def plot(data):
'''
Paramters: data - 2D list with 2 columns: value,cluster
Returns: None
This function just plots the points of the data parameter all different colors based on cluster
'''
x_vals = [i for i in range(len(data))]
y_vals = [i[0] for i in data]
a = np.array([x_vals,y_vals])
categories = np.array([i[-1] for i in data])
colormap = np.array(list(set([i[-1] for i in data])))
plt.scatter(a[0],a[1],s=50,c=colormap[categories])
plt.show()
def plot3D(data):
'''
Parameters: data - 2D list with 3 columns: val1,val2,cluster
Returns: none
'''
fig = plt.figure()
ax = fig.add_subplot(111,projection='3d')
x1_vals = [i[0] for i in data]
x2_vals = [i[1] for i in data]
y_vals = [i for i in range(len(data))]
categories = np.array([i[-1] for i in data])
colormap = np.array(list(set([i[-1] for i in data])))
ax.scatter(y_vals,x2_vals,x1_vals,s=50,c=colormap[categories])
ax.set_xlabel("time")
ax.set_ylabel("Unblocked")
ax.set_zlabel("RCMPL")
plt.show()
| true |
b2eb1132d982171c6c04b5342ce090fdf69d244c | Python | BMMalz/COMP-115 | /Assignment 4/a4_Malz.py | UTF-8 | 7,230 | 3.84375 | 4 | [] | no_license | """
==========================================================
Programmer: Benjamin Malz
Summary: This program reads DNA from a .fna file, and then determines
whether or not the DNA proves that a person has Huntington's Disease.
INPUT (keyboard, stdin): The program prompts users through a menu to enter a
number to perform an action. The user must type a filename by pressing '1' first
before they can do anything else.
INPUT (from a file of data): The .fna file contains a segment of DNA which can
be used to determine if someone has Huntington's Disease.
OUTPUT (console, stdout): The shell will display a menu which can be 'interacted'
with by pressing 1, 2, 3 and 4. The interactions will yield: a reading of the .fna
file's DNA, the number of CAG repetitions in the DNA, whether or not someone has
Huntington's Disease, and the percentage of the DNA that is the letters 'C' and
'G'.
Date Last Modified:
10/10/2016: (mdl) Starter Kit created
10/17/2016: (BMM) Finished the getDna() portion of the code.
10/19/2016: (BMM) CG Percentage added, CAG repeat detection added and classifications
for Huntington's Disease added.
==========================================================
"""
import os
# ------ CONSTANTS for entire file ----------
# menu options
READFILE = 1
FINDCAG = 2
GCPERCENT = 3
EXIT = 4
ERROR = -1
# definition of the snip of DNA of interest
REPEAT = "cag"
# constants for Classification Status
# ---------------------------
def getDNA(dnaFile):
"""ignoring header line, returns all DNA in file as one lowercase string; returns empty string if bad file"""
DNA = ""
if not os.path.exists(dnaFile):
print("\nSORRY, the file", dnaFile, "does not exist.")
print("Try another filename ...")
else:
line = 0
FILE = open(dnaFile, 'r')
print("Using file:", dnaFile)
headerLine = FILE.readline() #isolates header so it isn't stored to DNA
headerLine = headerLine.strip() #isolates header so it isn't stored to DNA
#print(headerLine)
for nextLine in FILE:
nextLine = nextLine.strip() #reads lines
freshDNA = nextLine #tracks the new line of DNA to add to total DNA string
#print(nextLine)
DNA = DNA + freshDNA #the total DNA string
# end else
print(DNA)
return DNA
# ---------------------------------------------------------
def findCAGs(DNA):
"""report on all 'cag' repeats (lowercase) in a string of DNA"""
#start looking in the string DNA at start(0) for cag-repeats
DNAcags = DNA.lower() #converts DNA to lowercase for CAG locating
where = DNAcags.find(REPEAT) #tracks position of CAGs located
#cagCount = 0 #counts the amount of CAGs, also I didn't need it
repeatCount = 0 #counts the amount of CAG repeats
while (where != -1):
#print("#", cagCount, "[position:", where, "]", DNAcags[where:])
prevCag = where #tracks the position of the previous CAG for tracking purposes
#print(prevCag)
where = DNAcags.find(REPEAT, where + 1) #tracks current CAG
#print(where)
if (where - 3 == prevCag):
print("CAG repeat at position:", prevCag)
print(DNAcags[prevCag:])
repeatCount = repeatCount + 1 #counts CAG repeats
print("Total CAG repeats:", repeatCount)
if(repeatCount <= 26):
print("Classification: Normal. Unaffected by disease.")
elif(repeatCount <= 35):
print("Classification: Intermediate. Unaffected by disease.")
elif(repeatCount <= 39):
print("Classification: Reduced Penetrance. May be affected by disease.")
elif(repeatCount > 39):
print("Classification: Full Penetrance. Definitely affected by disease.")
# ---------------------------------------------------------
def getGCpercentage(DNA):
"""return the GC percentage of a strand of DNA"""
dnaLength = len(DNA) #counts the length of the DNA string
findG = DNA.count("G") #finds the letter G in DNA string
findC = DNA.count("C") #finds the letter C in DNA string
print(findG)
print(findC)
print(dnaLength)
GCpercent = ((findC + findG)/dnaLength) * 100 #calculates percentage of Gs and Cs
print("Percentage of G and C:"," %6.2f" % GCpercent)
return getGCpercentage
# ---------------------------------------------------------
def main():
gotDNA = False # flag to tell if we've already loaded some DNA
DNA = "" # string of DNA held here
choice = -1 # bogus setting to get us started
while (choice != EXIT):
print ("------------------------------")
print (" 1 - Read DNA file")
print (" 2 - Find", REPEAT, "repeat");
print (" 3 - Report percent GC");
print (" 4 - EXIT");
print ("---------------------------")
choice = input("ENTER: ")
# trap bad user input
if ( (str(READFILE) <= choice) and (choice <= str(EXIT)) ):
# force to an integer, for example "1" to 1
choice = eval(choice)
else:
badInput = choice
# force to an integer to test below
choice = ERROR;
# ============ 1: READ DNA FILE =============================
if ( choice == READFILE):
dnaFile = input("Enter DNA filename: ")
DNA = getDNA(dnaFile)
if (DNA != ""):
gotDNA = True
print ("First 10 nucleotides in DNA file in one string is:\n", DNA[:10].lower())
# end if
# ============ 2: FIND CAG Repeats =============================
elif (choice == FINDCAG):
if (gotDNA):
#start looking in the string DNA at start(0) for cag-repeats
findCAGs(DNA)
# end if gotDNA
else:
print ("You cannot search for CAGs at this time.")
print ("Please open a file of DNA to search first.")
# end else no file open yet
# ============ 3: COMPUTE GC % =============================
elif (choice == GCPERCENT):
if (gotDNA):
getGCpercentage(DNA)
# end if gotDNA
else:
print ("You cannot compute GC % at this time.")
print ("Please open a file of DNA to search first.")
# end else no file open yet
# ============ 4: EXIT =====================================
elif (choice == EXIT):
print ("Goodbye ...")
# ============ ? HUH ? =====================================
else: # invalid input
print ("ERROR: ", badInput, "is an invalid input. Try again.")
# end WHILE input is not EXIT
print ("\n\nDone.\n")
# end main
#-----------------------------------------------------
if __name__ == '__main__':
main()
#-----------------------------------------------------
| true |
004a3e7748f0c37946673aee97e9c79e1385daff | Python | jyotishsolutions/python | /ListInDictionary_5.py | UTF-8 | 275 | 3.453125 | 3 | [] | no_license | myvariable = 'hello world'
my_init=10
my_float=10.98
my_bool=True
my_list = [my_init, my_float, my_bool, myvariable ] # this is list in python
print my_list
my_dict = {1:my_init, 2:my_float, 3:my_bool, 4:myvariable ,5:my_list} # this is dictionary in python
print my_dict
| true |
de49022d39d0be8017f23f65425a22b40a98ed84 | Python | BrianSipple/Bioinformatics | /comparing_dna/sequence_alignment.py | UTF-8 | 2,956 | 4 | 4 | [] | no_license | import numpy as np
def longet_common_subsequence(string1, string2):
"""
Finds A longest common subsequence between two strings using
insertion (string1) and deletion (string2) methods
"""
pass
def min_number_of_coins(money, coin_choices):
"""
Demonstrates use of dynamic programming to find the minimum
number of coins needed to attain some some, "money"
"""
min_num_coins = [0]
max_array_length = max(coin_choices) # TODO: Ensure that the array never exceeds this size
for i in range(1, money + 1):
array_index = i % max_array_length
min_num_coins.append(money * money) # initialize a maximum for this array position at the start of the iteration
for j in range(len(coin_choices)):
# Only in this case can a choice potentially meet our needs
if i >= coin_choices[j]:
if min_num_coins[i - coin_choices[j]] + 1 < min_num_coins[i]:
# Update the lookup list
min_num_coins[i] = min_num_coins[i - coin_choices[j]] + 1
return min_num_coins[-1]
def south_or_east(i, j):
"""
computes the length of the longest path to node (i, j) in
a rectangular grid in the Manhattan Problem,
abiding by the observation that the only way to reach node (i, j)
is either by moving south (↓) from (i − 1, j) or east (→) from (i, j − 1).
:return: max(i, j)
"""
if i == 0 and j == 0:
return 0
x = -1 * (j ** j) # Initialize an impossible minimum for x and y
y = -1 * (i ** i)
if i > 0:
x = south_or_east(i - 1, j) + southward_edge_weight(i-1, j)
if j > 0:
y = south_or_east(i, j - 1) + eastward_edge_weight(i, j-1)
return max(x, y)
def longest_path_in_graph(n, m, down, right):
"""
dynamic programming algorithm for finding the
length of a longest path in the Manhattan Tourist Problem.
Conceptually, we can think of down-i-j and right-i-j as being
the respective weights of the vertical and horizontal edges entering node (i, j).
We denote the matrices holding down-i-j and right-i-j as `Down` and `Right`, respectively.
"""
# Initialize the graph
graph = [[0] * m] * n
if n == 0 and m == 0:
return 0
y = -1 * (n ** n) # Initialize an impossible minimum for y and x (rows and columns)
x = -1 * (m ** m)
for i in range(1, n):
graph[i][0] = graph[i-1][0] + down
for j in range(1, m):
graph[0][j] = graph[0][j-1] + right
# if n > 0:
# y = longest_path_in_graph(n - 1, m, down, right) + down
#
# if m > 0:
# x = longest_path_in_graph(n, m-1, down, right) + right
for ii in range(n):
for jj in range(m):
graph[ii][jj] = max(graph[ii-1][j] + down, graph[ii][jj-1] + right)
# The final value should be a result of computing the longest possible path!
return graph[n][m]
| true |
addded658973d74734ca5f2fa02451142fe6004d | Python | sasidhar513/NutritionEngine | /Src/regression.py | UTF-8 | 980 | 2.59375 | 3 | [] | no_license | import operator
import functools
import numpy as np
import pickle
import os
import constants
def range_cost(x, theta, y_min, y_max):
pass
def compute_cost(x, theta, y):
pred = dot_product(x, theta)
return sum(sum((y-pred)**2))
def dot_product(x, theta):
product = x*theta
return np.array([functools.reduce(operator.add, lis) for lis in product])
def gradient_descent(x, y, theta):
nutrients_count = x.shape[2]
input_sample_size = x.shape[0]
temp_theta = np.zeros(shape=theta.shape)
for i in range(0, theta.shape[0]):
temp_theta[i] = [
(constants.ALPHA/input_sample_size) * sum(sum(((
(dot_product(x, theta)-y)
* x[:, i:i+1].reshape(input_sample_size, nutrients_count)
))))
] * nutrients_count
for i in range(0, theta.shape[0]):
theta[i] = theta[i] - temp_theta[i]
for i in range(0, theta.shape[0]):
theta[i] = [max(x, 0) for x in theta[i]]
| true |
99ecc13aa1c6c00c68424a0f63e3f36db330de7b | Python | unibas-dmi-hpc/SPH-EXA | /scripts/parse_dat_file.py | UTF-8 | 275 | 2.59375 | 3 | [
"MIT"
] | permissive | import sys
import numpy as np
if len(sys.argv) != 2:
print("Incorrect number of parameters. Correct usage: ./{} dat_filename_without_extension".format(sys.argv[0]))
exit()
filename = sys.argv[1]
data = np.loadtxt(filename + '.dat')
data.T.tofile(filename + '.bin')
| true |
6110fe01433e4e1233b75adf25daef55cd1fc4f9 | Python | paulolima18/ProgrammingMesh | /Python/turtle_module/turtlemoves.py | UTF-8 | 686 | 4.1875 | 4 | [
"MIT"
] | permissive | import turtle
# f -> Forward
# r -> Right
# l -> Left
# b -> Back
def move(length,text):
for i in range(length):
y = text[i:i+1:]
if ord(y) == ord("l"):
turtle.left(90)
elif ord(y) == ord("f"):
turtle.fd(90)
elif ord(y) == ord("b"):
turtle.right(180)
turtle.fd(100)
elif ord(y) == ord("r"):
turtle.right(90)
else:
print("Wrong letter")
turtle.exitonclick()
if __name__=='__main__':
print("Inputs:\nf -> Forward\nr -> Right\nl -> Left\nb -> Back")
text=str(input("Sequence:", ))
length = len(text)
move(length,text)
| true |
7006486096d002952534a3f7d750c7fbf01e1123 | Python | asmodehn/crypy | /crypy/storage/__init__.py | UTF-8 | 374 | 2.515625 | 3 | [
"MIT"
] | permissive | """
Provides a class as a common interface for all implemented storages API, using delegation.
"""
from enum import Enum
class Storage_Impl(Enum):
Arctic = 1
class Storage:
def __init__(self, implementation: Storage_Impl) -> Storage:
if implementation == Storage_Impl.Arctic:
from . import arctic
self.impl = arctic.init_api()
| true |
ffe378f5e399ed38b68e6a9783216032c407674e | Python | shivg7706/openCV | /util/gui/YCrCbExtrator.py | UTF-8 | 896 | 2.65625 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
def call_back(x):
pass
def create(name):
image = cv2.namedWindow(name)
cv2.moveWindow(name, 1000,0)
cv2.createTrackbar('Y_low', name, 0, 255, call_back)
cv2.createTrackbar('Y_high', name, 255, 255, call_back)
cv2.createTrackbar('Cr_low', name, 0, 255, call_back)
cv2.createTrackbar('Cr_high', name, 255, 255, call_back)
cv2.createTrackbar('Cb_low', name, 0, 255, call_back)
cv2.createTrackbar('Cb_high', name, 255, 255, call_back)
def extract(name):
Y_low = cv2.getTrackbarPos('Y_low', name)
Y_high = cv2.getTrackbarPos('Y_high', name)
Cr_low = cv2.getTrackbarPos('Cr_low', name)
Cr_high = cv2.getTrackbarPos('Cr_high', name)
Cb_low = cv2.getTrackbarPos('Cb_low', name)
Cb_high = cv2.getTrackbarPos('Cb_high', name)
return (
(Y_low, Cr_low, Cb_low),
(Y_high, Cr_high, Cb_high) )
| true |
0d7f0c11b32bd625fd5d121c3d6145d6544e98a8 | Python | emperorerror/useless | /day_dictionary.py | UTF-8 | 1,031 | 3.46875 | 3 | [] | no_license | import datetime
import calendar
def findDay(D):
D_D= { 'Mon':0, 'Tue':0, 'Wed':0, 'Thu':0, 'Fri':0,'Sat':0,'Sun':0}
for keys in D:
val=D[keys]
name = datetime.datetime.strptime(keys, '%Y-%m-%d').weekday()
if ( calendar.day_name[name] == 'Monday'):
D_D['Mon']=( D_D['Mon']+ val)
elif ( calendar.day_name[name] == 'Tuesday'):
D_D['Tue']=D_D['Tue']+ val
elif ( calendar.day_name[name] == 'Wednesday'):
D_D['Wed']= (D_D['Wed'] + val)
elif ( calendar.day_name[name] == 'Thursday'):
D_D['Thu']=D_D['Thu']+ val
elif ( calendar.day_name[name] == 'Friday'):
D_D['Fri']=D_D['Fri']+ val
elif ( calendar.day_name[name] == 'Saturday'):
D_D['Sat']=D_D['Sat']+ val
elif ( calendar.day_name[name] == 'Sunday'):
D_D['Sun']=D_D['Sun']+ val
print(D_D)
# Driver program
D = {'2020-01-01':6}
print(findDay(D)) | true |
e328c03dffdd3a77c7c5d0e9763119662647722b | Python | jeffbarnette/Python-One-Liners | /machine_learning/k-nearest_neighbors.py | UTF-8 | 437 | 3.75 | 4 | [
"MIT"
] | permissive | """Example of K-Nearest Neighbors (KNN)"""
# Dependencies
from sklearn.neighbors import KNeighborsRegressor
import numpy as np
# Data (House Size (square meters) / House Prices ($))
X = np.array([[35, 30000], [45, 45000], [40, 50000],
[35, 35000], [25, 32500], [40, 40000]])
# One-liner
KNN = KNeighborsRegressor(n_neighbors=3).fit(X[:,0].reshape(-1,1), X[:,1])
# Result & Prediction
res = KNN.predict([[30]])
print(res) | true |
44e2d97054294ce4e31679aa62d25e9cee467568 | Python | Zegocover/enmerkar | /tests/test_render.py | UTF-8 | 871 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | # -- encoding: utf-8 --
import pytest
@pytest.mark.parametrize('locale', ('en', 'fi', 'sv', 'pt-BR'))
def test_babel_render(client, locale):
"""
Test the middleware and the rendery bits.
"""
response = client.get('/', HTTP_ACCEPT_LANGUAGE=locale)
# "Parse" the key-value format
lines = response.content.decode('utf-8').strip().splitlines()
content = dict(kv.split('=', 1) for kv in lines)
# See that we're rendering in the locale we expect
assert content['language_code'] == locale.lower()
# check that we could access `babel.Locale.language_name`
assert content['language_name'] == {
'en': 'English',
'fi': 'suomi',
'sv': 'svenska',
'pt-BR': 'português',
}[locale]
# The rest are not really tested (aside from smoke tests) further;
# the Babel test suite has taken care of that.
| true |
3de70fc09f01cfb4dfc3928f2e5859acedff8a9c | Python | dgiambra/schrodinger | /schrodinger/schrodinger.py | UTF-8 | 1,812 | 2.90625 | 3 | [
"MIT"
] | permissive | def schrodinger(V_0 , c, n_basis_set, fxn, d_0 , d_1):
'''
This function solves Schrodingers wave equation in two dimensions.
Parameters
----------
V_0 : int
Potential Energy Constant
c : int
constant
n_basis_set : int
number of basis sets to use
d_0 : int
lower bound of domain
d_1 : int
upperbound of domain
Output
------
energy : tuple
first element is an array of eigenvalues
second element is the eigen vector corresponding to the basis set coefficients
'''
import scipy.ndimage.filters as snf
import numpy as np
import scipy.integrate as integrate
from numpy.polynomial import Polynomial, Legendre
from numpy import polyder as deriv
import numpy.linalg as linalg
## crs = open(file_name, "r")
x = np.linspace(d_0,d_1,1000)
## fps = []
## for columns in (raw.strip().split() for raw in crs):
## fps.append(float(columns[2]))
## x.append(float(columns[1]))
## fxn_x = fxn(x)
ai=[]
h=np.zeros((n_basis_set,n_basis_set))
## for i in list(range((n_basis_set))):
## b.append(np.exp(1j*2*np.pi*i*x))
## ai.append(integrate.quad(lambda x:fxn(x)*np.exp(1j*2*np.pi*i*x),d_0,d_1 )[0])
## h.append([])
for i in list(range((n_basis_set))):
for z in list(range((n_basis_set))):
h[i][z]=integrate.quad(lambda x:(float(-c*-i**2*np.pi**2*np.exp(1j*2*np.pi*i*x)+V_0*np.exp(1j*2*np.pi*i*x))*np.exp(1j*2*np.pi*z*x)),d_0,d_1 )[0]
## ai = np.matrix(ai)
h = np.matrix(h)
energy = linalg.eig(h)
return energy
| true |
a0bfd56602775aa88e7f45bd97cfcf9634ba1a53 | Python | ppvyou/python-pi | /main.py | UTF-8 | 142 | 2.84375 | 3 | [] | no_license | #!/usr/bin/python3
def main(msg):
//adding the second commment to this file
// Print a message
print(msg)
main("Hello people")
| true |
edcaa41e23b610433fa755a202c42f53bb4439b4 | Python | Yipit/pyeqs | /tests/unit/test_aggregation.py | UTF-8 | 8,719 | 2.53125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pyeqs.dsl import Aggregations
from tests.helpers import homogeneous
def test_add_agg():
"""
Create aggregations block
"""
# When add an agg block
t = Aggregations("agg_name", "field_name", "metric")
# Then I see correct json
results = {
"agg_name": {
"metric": {"field": "field_name"}
}
}
homogeneous(t, results)
def test_add_agg_with_size():
"""
Create aggregations block specifying size
"""
# When add a terms agg block w/ size
t = Aggregations("agg_name", "field_name", "terms", size=1)
# Then I see correct json
results = {
"agg_name": {
"terms": {
"field": "field_name",
"order": {"_count": "desc"},
"min_doc_count": 1,
"size": 1
}
}
}
homogeneous(t, results)
def test_add_agg_with_order():
"""
Create aggregations block specifying order type and direction
"""
# When add a terms agg block w/ size
t = Aggregations("agg_name", "field_name", "terms", order_type="_term", order_dir="asc")
# Then I see correct json
results = {
"agg_name": {
"terms": {
"field": "field_name",
"order": {"_term": "asc"},
"min_doc_count": 1,
"size": 0
}
}
}
homogeneous(t, results)
def test_add_agg_with_min_doc_count():
"""
Create aggregations block specifying the min_doc_count
"""
# When add a terms agg block w/ size
t = Aggregations("agg_name", "field_name", "terms", min_doc_count=10)
# Then I see correct json
results = {
"agg_name": {
"terms": {
"field": "field_name",
"order": {"_count": "desc"},
"min_doc_count": 10,
"size": 0
}
}
}
homogeneous(t, results)
def test_add_agg_nested():
"""
Create nested aggregations block
"""
# When add a nested_path with agg block
t = Aggregations("agg_name", "field_name", "metric", nested_path="nested_doc")
# The I see correct json
results = {
"nested_doc": {
"nested": {"path": "nested_doc"},
"aggregations": {
"agg_name": {"metric": {"field": "nested_doc.field_name"}},
}
}
}
homogeneous(t, results)
def test_add_agg_nested_with_size():
"""
Create nested aggregations block specifying size
"""
# When add a nested_path with terms agg block w/ size
t = Aggregations("agg_name", "field_name", "terms", size=1,
nested_path="nested_doc")
# The I see correct json
results = {
"nested_doc": {
"nested": {"path": "nested_doc"},
"aggregations": {
"agg_name": {"terms": {
"field": "nested_doc.field_name",
"order": {"_count": "desc"},
"min_doc_count": 1,
"size": 1
}}
}
}
}
homogeneous(t, results)
def test_add_agg_nested_with_order():
"""
Create nested aggregations block specifying order type and direction
"""
# When add a nested_path with terms agg block w/ size
t = Aggregations("agg_name", "field_name", "terms", order_type="_term", order_dir="asc",
nested_path="nested_doc")
# The I see correct json
results = {
"nested_doc": {
"nested": {"path": "nested_doc"},
"aggregations": {
"agg_name": {"terms": {
"field": "nested_doc.field_name",
"order": {"_term": "asc"},
"min_doc_count": 1,
"size": 0
}}
}
}
}
homogeneous(t, results)
def test_add_agg_nested_with_min_doc_count():
"""
Create nested aggregations block specifying min_doc_count
"""
# When add a nested_path with terms agg block w/ size
t = Aggregations("agg_name", "field_name", "terms", min_doc_count=10,
nested_path="nested_doc")
# The I see correct json
results = {
"nested_doc": {
"nested": {"path": "nested_doc"},
"aggregations": {
"agg_name": {"terms": {
"field": "nested_doc.field_name",
"order": {"_count": "desc"},
"min_doc_count": 10,
"size": 0
}}
}
}
}
homogeneous(t, results)
def test_add_agg_filtered():
"""
Create an aggregations block with filter
"""
# With a filter
filter_value = {"filter_type": {"other_field": {"comparison": "value"}}}
# When add a filtered agg block
t = Aggregations("agg_name", "field_name", "metric", filter_val=filter_value,
filter_name="filter_on_other")
# Then I see correct json
results = {
"filter_on_other": {
"filter": filter_value,
"aggregations": {
"agg_name": {"metric": {"field": "field_name"}}
}
}
}
homogeneous(t, results)
def test_add_agg_global():
"""
Create an aggregations block that is global
"""
# When add a global agg block
t = Aggregations("agg_name", "field_name", "metric", global_name="global_agg")
# Then I see correct json
results = {
"global_agg": {
"global": {},
"aggregations": {
"agg_name": {"metric": {"field": "field_name"}}
}
}
}
homogeneous(t, results)
def test_add_agg_range():
"""
Create an aggregations block for a range
"""
# When add an agg block w/ range
range_list = [1, 2, 3]
t = Aggregations("agg_name", "field_name", "metric", range_list=range_list, range_name="my_ranges")
# Then I see the correct json
results = {
"my_ranges": {
"range": {
"field": "field_name",
"ranges": [
{"to": 1},
{"from": 1, "to": 2},
{"from": 2, "to": 3},
{"from": 3}
]
}}
}
homogeneous(t, results)
# Also works without a given range_name
t = Aggregations("agg_name", "field_name", "metric", range_list=range_list)
# Then I see the correct json
results = {
"field_name_ranges": {
"range": {
"field": "field_name",
"ranges": [
{"to": 1},
{"from": 1, "to": 2},
{"from": 2, "to": 3},
{"from": 3}
]
}}
}
homogeneous(t, results)
def test_add_agg_histogram():
"""
Create an aggregations block w/ histogram intervals
"""
# Whan add an agg block w/ interval
t = Aggregations("agg_name", "field_name", "metric", histogram_interval=20)
# Then I see correct json
results = {
"agg_name": {
"histogram": {
"field": "field_name",
"interval": 20,
"order": {"_key": "desc"},
"min_doc_count": 1
}
}
}
homogeneous(t, results)
def test_add_agg_histogram_with_order():
"""
Create an aggregations block w/ histogram intervals and order type/direction
"""
# Whan add an agg block w/ interval
t = Aggregations("agg_name", "field_name", "metric", histogram_interval=20,
order_type="_count", order_dir="asc")
# Then I see correct json
results = {
"agg_name": {
"histogram": {
"field": "field_name",
"interval": 20,
"order": {"_count": "asc"},
"min_doc_count": 1
}
}
}
homogeneous(t, results)
def test_add_agg_histogram_with_min_doc_count():
"""
Create an aggregations block w/ histogram intervals and min_doc_count
"""
# Whan add an agg block w/ interval
t = Aggregations("agg_name", "field_name", "metric", histogram_interval=20,
min_doc_count=10)
# Then I see correct json
results = {
"agg_name": {
"histogram": {
"field": "field_name",
"interval": 20,
"order": {"_key": "desc"},
"min_doc_count": 10
}
}
}
homogeneous(t, results)
| true |
58ababa10f67fcb4710fe48937d80bf4b6b97445 | Python | vinay-reddy/test1 | /http_statistics.py | UTF-8 | 1,233 | 2.671875 | 3 | [] | no_license |
import pandas as pd
#csv file generation
fh2 = open('/Users/vinayreddy/Desktop/logs/gregory-server-performance/tmp8AqQtA/SystemLogs/var/log/httpd/http_csv.csv', 'w')
with open('/Users/vinayreddy/Desktop/logs/Gary/logs_march5/tmpVYKaK3/SystemLogs/var/log/httpd/ssl_access_log.1', 'r') as fh:
for line in fh:
line = line.strip()
a = line.split()
# print(len(a), a)
# print(a[0],a[3].split('[')[1], a[5].split('"')[1], a[6], a[7].split('"')[0], a[-1], sep=',')
fh2.write(a[0] + ',' + a[3].split('[')[1]+','+ a[5].split('"')[1] + ',' + a[-1].split('µ')[0] +',' + a[6]+','+ a[7].split('"')[0] + '\n')
fh2.close()
pd.options.display.max_rows = 1200
pd.options.display.max_colwidth = 1000
# to-do: load from pandas
columns = ['IP address','Date and Time','HTTP Methods', 'Time taken', 'URL', 'HTTP']
pandas_load = pd.read_csv('/Users/vinayreddy/Desktop/logs/gregory-server-performance/tmp8AqQtA/SystemLogs/var/log/httpd/http_csv.csv', names= columns, header= None)
print(pandas_load)
print(pandas_load['Time taken'].dtype)
b = pandas_load.sort_values(by=['Time taken'], ascending=False)
series = b['URL']
c = series.value_counts(sort=True)
print(c[:30])
print(b) | true |
099b7d9e835d394b7ecc8069e6da22758916762b | Python | xingyuexingyue/AutoInterfaceTest | /src/com/sangyu/core/CaseData.py | UTF-8 | 623 | 2.75 | 3 | [] | no_license | """
这个类主要是将一个case封装成一个class对象
user: 2020 by pyp
"""
class CaseData:
test_id = ''
test_features = ''
test_url = ''
test_result = ''
def __init__(self, test_id, test_features, test_url, test_result):
self.test_id = test_id
self.test_features = test_features
self.test_url = test_url
self.test_result = test_result
def getId(self):
return self.test_id
def getFeatures(self):
return self.test_features
def getUrl(self):
return self.test_url
def getResult(self):
return self.test_result
| true |
0aac049c8263f7e956cea14027ed8e142b6344e5 | Python | ivklisurova/SoftUni_Fundamentals_module | /Exercises/Regular_Expresions/furniture.py | UTF-8 | 468 | 3.578125 | 4 | [] | no_license | import re
furniture = []
total_money = 0
while True:
order = input()
if order == 'Purchase':
break
pattern = r'>{2}([a-zA-z]+)<{2}(\d+[.]\d+|\d+)!(\d+)'
matches = re.findall(pattern, order)
for i in matches:
if len(i) == 0:
break
furniture.append(i[0])
total_money += float(i[1]) * float(i[2])
print('Bought furniture:')
[print(x) for x in furniture]
print(f'Total money spend: {total_money:.2f}')
| true |
4f6900fe105036e494126dbf2a078f7868df1b65 | Python | friti/ML-Intro-course | /task2/subtask3/make_predictions_task3.py | UTF-8 | 4,077 | 2.515625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, metrics
from sklearn import svm, neighbors, neural_network
from sklearn.linear_model import Ridge, Lasso
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
import datetime
import pickle
import json
import sys
## Read training data, labels, test data
train_features = pd.read_csv('../reprocessing/preprocessed_files/train_features_SERIES_IMPUTE0_N_S_preprocessed.csv', delimiter=',')
train_labels = pd.read_csv('../dataset/train_labels.csv' , delimiter=',')
test_features_original = pd.read_csv('../reprocessing/preprocessed_files/test_features_N_AVG_MIN_MAX_DIFF_S_preprocessed.csv', delimiter=',')
# Sort in pid
train_features = train_features.sort_values(['pid'])
pids_features = train_features["pid"].drop_duplicates().to_list()
train_labels = train_labels[train_labels["pid"].isin(pids_features)].sort_values(['pid'])
test_features_original = test_features_original.sort_values(['pid'])
test_features_original.index = test_features_original["pid"]
print(test_features_original)
'''for col in train_features.columns:
if not col.endswith("_n"):
continue
else:
train_features[col]=train_features[col]/12.
'''
#print(train_features["BUN_n"])
# Sanity checks
pids_labels = train_labels["pid"].drop_duplicates().to_list()
if pids_features != pids_labels:
print("PID ERROR")
print(pids_features)
print(pids_labels)
sys.exit()
# Use pid as index
train_features.set_index("pid", inplace=True)
train_labels.set_index("pid", inplace=True)
## Define labels to predict
labels_to_predict = ["LABEL_RRate", "LABEL_ABPm", "LABEL_SpO2", "LABEL_Heartrate"]
train_labels = train_labels[labels_to_predict]
## Select a subset of training features
#features_names_used = [ x for x in train_features.columns if x.endswith("_avg") or x == "Age" or x.endswith("_n") ]
#features_names_used = [ x for x in train_features.columns if x.endswith("_avg") or x.endswith("_slope") ]
features_names_used = [ x for x in train_features.columns if (x.endswith("_avg") or x.endswith("_slope")) ]
#features_names_used = train_features.columns
train_features = train_features[features_names_used]
print("Used features:")
print(features_names_used)
print("Number of used features: %d" %(len(features_names_used)))
## Split train data into train and validation
random_state = None
X_train, X_test, y_train, y_test = train_test_split(train_features, train_labels, train_size=0.8, random_state=random_state)
models = {}
## Fit model
for label in labels_to_predict:
print("\n=== %s ===" %label)
# Define model
# models[label] = neural_network.MLPRegressor(
# hidden_layer_sizes=(100),
# alpha=100, # L2 regularization
# activation="relu",
# solver="adam",
# learning_rate_init=0.01,
# learning_rate="constant",
# max_iter=500
# )
models[label] = Lasso(alpha = 0.1, fit_intercept=True)
# Fit model
print(datetime.datetime.now())
models[label].fit(X_train, y_train[label])
print(datetime.datetime.now())
# Prediction to evaluate the model
y_pred = models[label].predict(X_test)
print("R2 score: %.2f" %(metrics.r2_score(y_test[label], y_pred)))
## Make predictions
test_features = test_features_original[features_names_used].copy()
df_predictions = pd.DataFrame()
print(df_predictions)
df_predictions["pid"] = test_features.index
print(df_predictions)
for label in labels_to_predict:
df_predictions[label] = models[label].predict(test_features)
print(df_predictions)
#df_predictions = df_predictions.set_index(test_features_original['pid'].to_numpy(),drop = False)
#print(df_predictions)
#df_predictions.insert(0, 'pid', test_features_original["pid"])
#print(df_predictions)
#print(df_predictions)
df_predictions.set_index("pid", inplace=True)
print(df_predictions)
df_predictions.to_csv("subtask3_predictions.csv")
| true |
48ea216117906b2004dc8f7506b9e25df4d4d25e | Python | darkrider231/TrianglePython | /app.py | UTF-8 | 205 | 2.796875 | 3 | [] | no_license | # FreeCodeCamp Youtube Tutorial: https://www.youtube.com/watch?v=rfscVS0vtbw&t=402s. Learning as much coding as I can just cause I love to code.
print(" /|")
print(" / |")
print(" / |")
print("/___|")
| true |
428ee571324b6b3e2bc879dd7d49edaeceb99a6e | Python | moniiesk/python-gui-contacts-app | /contacts-app.py | UTF-8 | 6,755 | 2.765625 | 3 | [] | no_license | import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from tkinter import filedialog
from datetime import datetime, timedelta
import os
from PIL import Image, ImageTk
import re
contacts = [
{"phoneNumber":"555 0001","name":"Sheldon Lee Cooper","dateOfBirth":"1980-02-26","email":"sheldon@gmail.com"},
{"phoneNumber":"555 0002","name":"Howard Joel Wolowitz","dateOfBirth":"1981-03-01","email":"howard@gmail.com"},
{"phoneNumber":"555 0003","name":"Rajesh Ramayan Koothrappali","dateOfBirth":"1981-10-06","email":"raj@gmail.com"},
{"phoneNumber":"555 0004","name":"Penny Hofstadter","dateOfBirth":"1985-12-02","email":"penny@gmail.com"},
{"phoneNumber":"555 0005","name":"Amy Farrah Fowler","dateOfBirth":"1979-12-17","email":"amy@gmail.com"},
{"phoneNumber":"555 0002","name":"Bernadette Rostenkowski-Wolowitz","dateOfBirth":"1984-01-01","email":"bernadette@gmail.com"},
{"phoneNumber":"555 0006","name":"Leonard Hofstadter","dateOfBirth":"1980-05-17","email":"leonard@gmail.com"}
]
class AppWindow():
def __init__(self, parent):
# Create the window
self.parent = parent
self.window = tk.Toplevel()
self.window.geometry("670x400")
self.window.title("Contacts app")
self.window.protocol("WM_DELETE_WINDOW", self.window.quit)
# Create a text label and place it in the window
self.title_label = tk.Label(self.window, text="Contacts app", font=("Arial", 16))
self.title_label.place(x=10, y=10)
# Create the list box
self.contact_list = tk.Listbox(self.window, width=25, height=17)
self.contact_list.place(x=10, y=40)
# -- When an item in the list is selected, execute the list_clicked function
self.contact_list.bind('<<ListboxSelect>>', self.list_clicked)
self.update_list()
# Name
self.name_label = tk.Label(self.window, text="Name", font=("Arial", 13))
self.name_label.place(x=250,y=40)
self.name_text = tk.Entry(self.window, width=25)
self.name_text.place(x=250,y=60)
# Email
self.email_label = tk.Label(self.window, text="Email", font=("Arial", 13))
self.email_label.place(x=250,y=100)
self.email_text = tk.Entry(self.window, width=25)
self.email_text.place(x=250,y=120)
# Phone number
self.validate_command1 = parent.register(self.name_validate)
self.phone_label = tk.Label(self.window, text="Phone", font=("Arial", 13))
self.phone_label.place(x=250,y=160)
self.phone_text = tk.Entry(self.window, width=25, validate="all", validatecommand=(self.validate_command1, "%P"))
self.phone_text.place(x=250,y=180)
# Date of birth
self.dob_label = tk.Label(self.window, text="Date of birth", font=("Arial", 13))
self.dob_label.place(x=250,y=220)
self.dob_text = tk.Entry(self.window, width=25)
self.dob_text.place(x=250,y=240)
# Age
self.age_label = tk.Label(self.window, text="Age", font=("Arial", 13))
self.age_label.place(x=250,y=280)
self.age_info = tk.Label(self.window, text="", font=("Arial", 13))
self.age_info.place(x=250,y=300)
# Photo
self.image_label = tk.Label(self.window)
self.image_label.place(x=500,y=40,width=150,height=200)
# Buttons
self.new_button = tk.Button(self.window, text="Save new", command=self.save_new)
self.new_button.place(x=250, y=340, width=100, height=40)
self.existing_button = tk.Button(self.window, text="Save existing", command=self.save_existing)
self.existing_button.place(x=370, y=340, width=100, height=40)
def name_validate(self, text_to_check):
print("Validating "+text_to_check)
return re.match("^([A-Za-z]{2}[0-9]{5})$", text_to_check)
def list_clicked(self, e):
if len(self.contact_list.curselection()) == 0:
return None
print(f"list_clicked: {self.contact_list.curselection()}")
self.selected = int(self.contact_list.curselection()[0]) # item number selected in list
print(f"You clicked item number {self.selected}")
# Get the selected contact
contact = contacts[self.selected]
# Show name
self.name_text.delete(0, tk.END)
self.name_text.insert(0, contact["name"])
# Show email
self.email_text.delete(0, tk.END)
self.email_text.insert(0, contact["email"])
# Show phone number
self.phone_text.delete(0, tk.END)
self.phone_text.insert(0, contact["phoneNumber"])
# Show date of birth and age
birthday = datetime.strptime(contact["dateOfBirth"], "%Y-%m-%d")
self.dob_text.delete(0, tk.END)
self.dob_text.insert(0, birthday.strftime("%d/%m/%Y"))
today = datetime.now()
age = today.year - birthday.year
if today.month < birthday.month:
age = age - 1
elif today.month == birthday.month and today.day < birthday.day:
age = age - 1
self.age_info["text"] = str(age) + " years"
# Show photo if it exists
if os.path.exists(contact["name"]+".jpg"):
img = Image.open(contact["name"]+".jpg")
img = img.resize((150, 200))
self.contact_photo = ImageTk.PhotoImage(img)
self.image_label.configure(image=self.contact_photo)
else:
self.contact_photo = None
self.image_label.configure(image=None)
def update_list(self):
# Empty list
self.contact_list.delete(0, tk.END)
# Add all contacts to list
for contact in contacts:
# Add each item to the end of the list
self.contact_list.insert(tk.END, contact["name"])
# Set a default to indicate no item selected
self.selected = -1
def save_new(self):
print("You clicked button save_new")
new_contact = {}
new_contact["name"] = self.name_text.get()
new_contact["email"] = self.email_text.get()
new_contact["phoneNumber"] = self.phone_text.get()
new_contact["dateOfBirth"] = datetime.strptime(self.dob_text.get(), "%d/%m/%Y").strftime("%Y-%m-%d")
contacts.append(new_contact)
self.update_list()
def save_existing(self):
print("You clicked button save_existing")
if self.selected >= 0:
contacts[self.selected]["name"] = self.name_text.get()
contacts[self.selected]["email"] = self.email_text.get()
contacts[self.selected]["phoneNumber"] = self.phone_text.get()
contacts[self.selected]["dateOfBirth"] = datetime.strptime(self.dob_text.get(), "%d/%m/%Y").strftime("%Y-%m-%d")
self.update_list()
if __name__ == "__main__":
root = tk.Tk()
root.withdraw()
app = AppWindow(root)
root.mainloop()
| true |
27ad4fb6da0ce97e807d19f72dabd0da7fa340fd | Python | duleryr/hpc_gpu | /Scripts_Python/Special_Input_Generator.py | UTF-8 | 857 | 3.546875 | 4 | [] | no_license | import os
import sys
from random import randrange
def main():
os.chdir("../Input_Files")
try:
filename = sys.argv[1]
except IndexError:
print("First argument should be the name of the file you want to create")
return
try:
V = int(sys.argv[2])
except IndexError:
print("Second argument should be the number of vertex")
return
try:
E = int(sys.argv[3])
except IndexError:
print("Third argument should be the number of edges")
return
file = open(filename + ".txt", "w")
file.write(str(V) + ' ' + str(E) + "\n")
for i in range(E):
origen = randrange(0, V)
destination = randrange(0, V)
weight = randrange(1, 100)
file.write("{} {} {}\n".format(origen, destination, weight))
file.close()
main()
| true |
d88b952f5da6cfb9a140714abebf9afdc9ed6ef3 | Python | demisto/content | /Packs/ThreatGrid/Integrations/FeedCiscoSecureMalwareAnalytics/FeedCiscoSecureMalwareAnalytics_test.py | UTF-8 | 3,414 | 2.5625 | 3 | [
"MIT"
] | permissive | from FeedCiscoSecureMalwareAnalytics import Client, fetch_indicators, fetch_indicators_command, \
create_entity_relationships
from CommonServerPython import *
from test_data.feed_data import banking_dns_response, sinkholed_ip_dns_response
def test_fetch_indicators(requests_mock):
"""Unit test
Given
- fetch incidents command
- feed name
- command raw response
When
- run the fetch indicators command.
Then
- Validate creates indicators and unifies if they are the same
"""
first_fetch = arg_to_datetime(arg='today', arg_name='First fetch')
client = Client(api_key='1234', verify=False, feed_name=['sinkholed-ip-dns'], first_fetch=first_fetch,
tlp_color="", feed_tags="", create_relationships=True)
requests_mock.get(
f'https://panacea.threatgrid.com/api/v3/feeds/sinkholed-ip-dns.json?api_key={client._api_key}',
json=sinkholed_ip_dns_response,
)
indicators, status = fetch_indicators(client, None)
assert len(indicators) == 15
def test_fetch_indicators_command_list(requests_mock, mocker):
"""Unit test
Given
- fetch incidents command
- list of feed names
- command raw response
When
- run the fetch indicators command with 2 feed names.
Then
- Validate creates indicators and unifies if they are the same
_ Validate that the fields: 'FeedRelatedIndicators' and 'tags' have been updated properly
"""
first_fetch = arg_to_datetime(arg='today', arg_name='First fetch')
client = Client(
api_key='1234', verify=False, feed_name=['sinkholed-ip-dns', 'banking-dns'], first_fetch=first_fetch,
tlp_color="", feed_tags="", create_relationships=True)
requests_mock.get(
f'https://panacea.threatgrid.com/api/v3/feeds/sinkholed-ip-dns.json?api_key={client._api_key}',
json=sinkholed_ip_dns_response,
)
requests_mock.get(
f'https://panacea.threatgrid.com/api/v3/feeds/banking-dns.json?api_key={client._api_key}',
json=banking_dns_response,
)
a = mocker.patch.object(demisto, "createIndicators")
fetch_indicators_command(client)
fetch_indicators_command(client)
for indicator in a.call_args.args[0]:
if indicator['value'] == 'Example1.com':
assert len(indicator['fields']['FeedRelatedIndicators']) == 14
assert len(indicator['relationships']) == 14
assert len(indicator['fields']['Tags']) == 2
if indicator['value'] == 'Example3.com':
assert len(indicator['fields']['FeedRelatedIndicators']) == 4
assert len(indicator['relationships']) == 4
assert len(indicator['fields']['Tags']) == 1
assert len(a.call_args.args[0]) == 28
def test_create_entity_relationships():
"""
Given
- indicator domain name
- related indicators
When
- run the fetch incidents command
Then
- Validate created relationships
"""
domain_name = "test.com"
relevant_indicators = [
{
'type': 'IP',
'value': '1.1.1.1'
}
]
relationships = create_entity_relationships(relevant_indicators, domain_name)
assert relationships[0].get('entityB') == '1.1.1.1'
assert relationships[0].get('entityBType') == 'IP'
assert relationships[0].get('entityA') == 'test.com'
assert relationships[0].get('entityAType') == 'Domain'
| true |
f0030c2c3927ec4ba91846d1ea2222897c715a57 | Python | Miiihal/demakein | /demakein/geom.py | UTF-8 | 14,647 | 3.03125 | 3 | [] | no_license | """
Large parts of this are just python translations of lib2geom.
"""
import collections, math
class XYZ(collections.namedtuple('XYZ','x y z')):
def __neg__(self):
return XYZ(-self.x,-self.y,-self.z)
def __add__(self, other):
return XYZ(self.x+other.x,self.y+other.y,self.z+other.z)
def __sub__(self, other):
return XYZ(self.x-other.x,self.y-other.y,self.z-other.z)
def __mul__(self, other):
return XYZ(self.x * other, self.y * other, self.z * other)
def __rmul__(self, other):
return self*other
def dot(self, other):
return self.x*other.x+self.y*other.y+self.z*other.z
def cross(self, other):
return XYZ(
self.y*other.z-self.z*other.y,
self.z*other.x-self.x*other.z,
self.x*other.y-self.y*other.x
)
def mag2(self):
return self.dot(self)
def mag(self):
return math.sqrt(self.mag2())
def unit(self):
return (1.0/self.mag()) * self
class Linear(collections.namedtuple('Linear','a0 a1')):
""" f(t) = a0*(1-t)+a1*t
"""
def __call__(self, t):
return self.a0*(1-t)+self.a1*t
def __add__(self, other):
return type(self)(self.a0+other.a0,self.a1+other.a1)
def __sub__(self, other):
return type(self)(self.a0-other.a0,self.a1-other.a1)
def __mul__(self, other):
return type(self)(self.a0*other,self.a1*other)
def __rmul__(self, other):
return self*other
def tri(self):
return self.a1-self.a0
def hat(self):
return (self.a1+self.a0)*0.5
class S_basis(tuple):
""" length>=1 immutable list of Linear
f(t) = sum self[i](t) * s**t where s = t*(1-t)
"""
def __call__(self, t):
s = t*(1-t)
result = self[0](t)
p = s
for i in xrange(1,len(self)):
result += self[i](t) * p
p *= s
return result
def __repr__(self):
return 'S_basis(' + repr(list(self)) + ')'
def scaled(self,other):
return type(self)( item*other for item in self )
def shifted(self, i):
return type(self)((self[0]*0,)*i+tuple(self))
def truncated(self, n):
return type(self)(self[:n])
def _compat(self,other):
size = max(len(self),len(other))
if len(self) < size:
self = type(other)(tuple(self) + (self[0]*0,)*(size-len(self)))
if len(other) < size:
other = type(other)(tuple(other) + (other[0]*0,)*(size-len(other)))
return size, self, other
def __add__(self, other):
size, self, other = self._compat(other)
return type(self)( self[i]+other[i] for i in xrange(size) )
def __sub__(self, other):
size, self, other = self._compat(other)
return type(self)( self[i]-other[i] for i in xrange(size) )
def multiplied(self, other, operator):
zero = operator(self[0].a0*0,other[0].a0*0)
c = [Linear(zero,zero)]*(len(self)+len(other))
for j in xrange(len(other)):
for i in xrange(j,j+len(self)):
tri = operator(other[j].tri(),self[i-j].tri())
c[i+1] = c[i+1] + Linear(-tri,-tri)
c[i] = c[i] + Linear(operator(other[j].a0,self[i-j].a0), operator(other[j].a1,self[i-j].a1))
#while len(c) > 1 and c[-1] == zero:
# del c[-1]
return S_basis(c)
def __mul__(self, other):
return self.multiplied(other, lambda a,b: a*b)
def __rmul__(self, other):
return self*other
def dot(self, other):
return self.multiplied(other, lambda a,b: a.dot(b))
def divided(self, other, k):
return least_squares(
S_basis([Linear(self[0].a0/other[0].a0,self[0].a1/other[0].a1)]),
lambda x: other*x-self,
lambda x: other,
lambda x: ZERO,
k)
#remainder = self
#result = [ ]
#for i in xrange(k):
# if len(remainder) <= i:
# break
# ci = Linear(remainder[i].a0/other[0].a0, remainder[i].a1/other[0].a1)
# result.append(ci)
# remainder = remainder - (S_basis([ci])*other).shifted(i)
#return S_basis(result)
def reciprocal(self, k):
return ONE.divided(self, k)
def derivative(self):
c = [ ]
for k in xrange(len(self)-1):
d = (2*k+1)*(self[k].a1 - self[k].a0)
c.append(Linear(
d + (k+1)*self[k+1].a0,
d - (k+1)*self[k+1].a1
))
k = len(self)-1
d = (2*k+1)*(self[k].a1 - self[k].a0)
c.append(Linear(d,d))
return S_basis(c)
def integral(self):
a = [ self[0]*0 ]
for k in xrange(1,len(self)+1):
ahat = self[k-1].tri()*(-1.0/(2*k))
a.append(Linear(ahat,ahat))
aTri = self[0].a0*0
for k in xrange(len(self)-1,-1,-1):
aTri = (self[k].hat() + (k+1)*0.5*aTri)*(1.0/(2*k+1))
a[k] = a[k] + Linear(-0.5*aTri,0.5*aTri)
return S_basis(a)
def sqrt(self, k):
return least_squares(self, lambda x: x*x-self, lambda x: x.scaled(2), lambda x: ONE.scaled(2), k)
#""" Calculate square root by newton's method """
#result = self
#for i in xrange(iters):
# result = (result+self.divided(result, k)).scaled(0.5)
#return result
def compose(self, other):
""" Calculate f(t)=self(other(t)) """
s = (ONE-other)*other
result = S_basis([self[0]*0])
for i in xrange(len(self)-1,-1,-1):
result = result*s + (ONE-other).scaled(self[i].a0) + other.scaled(self[i].a1)
#S_basis([Linear(self[i].a0,self[i].a0)]) + other.scaled(self[i].a1-self[i].a0)
return result
def solve(self, target, k, iters=20):
""" Solve self.compose(x) = target for x using Newton's method
"""
result = target
deriv = self.derivative()
for i in xrange(iters):
result = result - (self.compose(result) - target).divided(deriv.compose(result), k)
result = result.truncated(k)
return result.truncated(k)
def inverse(self, k, iters=5):
return self.solve(IDENTITY, k, iters)
ONE = S_basis([Linear(1.0,1.0)])
ZERO = S_basis([Linear(0.0,0.0)])
IDENTITY = S_basis([Linear(0.0,1.0)])
#def fit(initial, transform, k, iters):
# def score(guess):
# tx = transform(guess)
# return (tx*tx).integral()[0].tri()
#
# result = initial
# current = score(result)
#
# #basis = [ ]
# #slant = IDENTITY-ONE.scaled(0.5)
# #for i in xrange(k):
# # basis.append(ONE.shifted(i))
# # basis.append(slant.shifted(i))
# #basis.append(IDENTITY.shifted(i))
# #basis.append((ONE-IDENTITY).shifted(i))
#
# #needs to be an orthogonal basis
#
# # Legendre polynomials
# X = IDENTITY.scaled(2.0)-ONE # Compress from -1,1 to 0,1
# basis = [ ONE, X ]
# while len(basis) < k:
# n = len(basis)
# basis.append( (basis[-1]*X).scaled((2*n+1)/(n+1.0)) - basis[-2].scaled(n/(n+1.0)) )
#
# #plot(*basis)
# #foo
#
# step = 1.0
# for i in xrange(iters):
# for item in basis:
# low = result + item.scaled(-step)
# low_score = score(low)
# high = result + item.scaled(step)
# high_score = score(high)
#
# c = current
# a = (high_score+low_score)*0.5-current
# b = high_score-a-c
#
# if high_score < current:
# result = high
# current = high_score
# #step *= 2.0
# if low_score < current:
# result = low
# current = low_score
# #step *= 2.0
#
# if a:
# min_point = -0.5/step*b/a
# new = result + item.scaled( min_point )
# new_score = score(new)
# if new_score < current:
# result = new
# current = new_score
# step = max(step, abs(min_point)*2.0)
# step *= 0.85
# print current, step
# #if not step: break
#
# return result
def newtonoid(initial, fp,fpp, k):
""" Choose x to minimize the integral of f(x) over [0,1] """
def scorep(a,b):
return (b*fp(a)).integral()[0].tri()
def scorepp(a,b):
return (b*b*fpp(a)).integral()[0].tri()
result = initial
#current = score(result)
# Legendre polynomials
X = IDENTITY.scaled(2.0)-ONE # Compress from -1,1 to 0,1
basis = [ ONE, X ]
while len(basis) < k*2:
n = len(basis)
basis.append( (basis[-1]*X).scaled((2*n+1)/(n+1.0)) - basis[-2].scaled(n/(n+1.0)) )
#plot(*basis)
#foo
step = 1.0
for i in xrange(k*8):
for item in basis:
#c = score(result)
#b = scorep(result)
#a = 0.5*scorepp(result)
#step = -b/2a
step = -scorep(result,item)/scorepp(result,item)
#print step
new = result + item.scaled(step)
#new = result + item.scaled( min_point )
#new_score = score(new)
#if new_score < current:
result = new
#current = new_score
#step *= 0.85
#print current, step
#if not step: break
return result
def least_squares(guess, f,fp,fpp, k):
""" Choose x to minimize the integral of f(x)^2 over [0,1] """
#def f2(x):
# y = f(x)
# return y*y
def f2p(x):
return (fp(x)*f(x)).scaled(2.0)
def f2pp(x):
yp = fp(x)
return (fpp(x)*f(x)+yp*yp).scaled(2.0)
return newtonoid(
guess,
f2p,
f2pp,
k)
"""TODO: optimize path for aesthetics
"""
class Frame(collections.namedtuple('Frame', 'origin x y z')):
""" Origin and orthogonal basis. """
def apply(self, point):
return self.origin + self.x*point.x + self.y*point.y+ self.z*point.z
def unapply(self, point):
point = point - self.origin
return XYZ(self.x.dot(point),self.y.dot(point),self.z.dot(point))
class Path(collections.namedtuple('Path','path velocity normal position')):
def find(self, position):
low = 0.0
high = 1.0
for i in xrange(32):
mid = (low+high)*0.5
value = self.position(mid)
if position < value:
high = mid
elif position > value:
low = mid
else:
return mid #Unlikely.
return (low+high)*0.5
def get_length(self):
return self.position[0].a1
def get_frame(self, position):
t = self.find(position)
point = self.path(t)
z = self.velocity(t).unit()
x = self.normal(t)
x = (x - z*x.dot(z)).unit()
y = z.cross(x)
return Frame(point, x,y,z)
def get_point(self, position):
return self.path(self.find(position))
def get_bentness(self, a,b):
aa = self.find(a)
bb = self.find(b)
seg = self.path.compose(S_basis([Linear(aa,bb)]))
straight = seg[:1]
diff = seg-straight
return math.sqrt( diff.dot(diff).integral()[0].tri() / seg[0].tri().mag2() )
def path(point0,vec0,norm0,point1,vec1,norm1):
#a = S_basis([Linear(XYZ(0.0,0.0,0.0), XYZ(1.0,1.0,0.0))])
#b = S_basis([Linear(XYZ(3.0,0.0,0.0), XYZ(0.0,-1.0,0.0))])
#arc = a + (S_basis([Linear(-a[0].tri(),a[0].tri())])+b).shifted(1)
tri = point1-point0
length = tri.mag()
print '.', length
vec0 = vec0.unit()
vec1 = vec1.unit()
for i in xrange(4):
s = length
path = S_basis([Linear(point0,point1),Linear(vec0*s-tri,vec1*-s+tri)])
velocity = path.derivative()
speed = velocity.dot(velocity).sqrt( 6 )
position = speed.integral()
position = position - ONE.scaled(position[0].a0)
length = position[0].a1
print '-', length
normal = S_basis([Linear(norm0,norm1)])
return Path(path, velocity, normal, position)
def plot(*items):
import pylab
ts = [ i/100.0 for i in xrange(101) ]
for item in items:
pylab.plot(ts, [ item(t) for t in ts ])
pylab.show()
if __name__ == '__main__':
import pylab
x = IDENTITY+ONE.scaled(0.0)
#dent = x.sqrt(2) #( (x).compose(IDENTITY) - IDENTITY ).divided(x.derivative(),5) #inverse(10,1)
#dent = fit(x, lambda y: y*y-x, 10, 100)
dent = least_squares(x, lambda y: y*y-x, lambda y: y.scaled(2), lambda y: ONE.scaled(2), 2)
#dent = (dent*dent)#.truncated(20)
#dent = b
#r = 0.25
#a = S_basis([Linear(0.5-r, 0.5+r)])
#b = a.inverse(1)
#a = ONE + IDENTITY*(ONE-IDENTITY)
#a = a.compose(a)
#b = a.reciprocal(5)
#a = b.inverse(5)
#b = a.inverse(5)
#dent = x.compose(a).reciprocal(3).compose(b)
#dent = b
#dent = dent + (x-dent.reciprocal(5)).reciprocal(5)
#a = S_basis([Linear(XYZ(0.0,0.0,0.0), XYZ(1.0,1.0,0.0))])
#b = S_basis([Linear(XYZ(3.0,0.0,0.0), XYZ(0.0,-1.0,0.0))])
#arc = a + (S_basis([Linear(-a[0].tri(),a[0].tri())])+b).shifted(1)
#
#velocity = arc.derivative()
#speed = velocity.dot(velocity).sqrt(10)
#position = speed.integral()
#position = position - ONE.scaled(position(0.0))
#scaler = position.scaled(1.0/position(1.0))
#iscaler = scaler.inverse(10, 100)
#dent = scaler.compose(iscaler)
#
#arc = arc.compose(scaler)
#print position(0.0), position(1.0)
#print arc.derivative()(0), arc.derivative()(1)
ts = [ i/100.0 for i in xrange(101) ]
#pylab.plot([ arc(t).x for t in ts ],[ arc(t).y for t in ts ],'.')
pylab.plot([ dent(t) for t in ts ])
pylab.plot([ x(t)**0.5 for t in ts ], 'o')
pylab.show()
#print arc(0.5)
#print ONE
#print ONE.compose(IDENTITY)
#a = IDENTITY*IDENTITY + IDENTITY
#b = a.inverse(10)
#print a
#print b
#print a.compose(b).truncated(5)
#x = S_basis([Linear(1.0,2.0)])
#print (x*x)(3)
#y = (x*x).sqrt(4)
#for i in xrange(11):
# t = i/10.0
# print x(t), y(t)
#
#print S_basis([Linear(0.01,4.0)]).sqrt(4)
| true |
38ff99705b6dad4161f92ec330292f322c8762e7 | Python | ImperialCollegeLondon/sharpy | /sharpy/utils/settings.py | UTF-8 | 17,351 | 2.75 | 3 | [
"BSD-3-Clause"
] | permissive | """
Settings Generator Utilities
"""
import configparser
import ctypes as ct
import numpy as np
import sharpy.utils.exceptions as exceptions
import sharpy.utils.cout_utils as cout
import ast
class DictConfigParser(configparser.ConfigParser):
def as_dict(self):
d = dict(self._sections)
for k in d:
d[k] = dict(self._defaults, **d[k])
d[k].pop('__name__', None)
return d
def cast(k, v, pytype, ctype, default):
try:
# if default is None:
# raise TypeError
val = ctype(pytype(v))
except KeyError:
val = ctype(default)
cout.cout_wrap("--- The variable " + k + " has no given value, using the default " + default, 2)
except TypeError:
raise exceptions.NoDefaultValueException(k)
except ValueError:
val = ctype(v.value)
return val
def to_custom_types(dictionary, types, default, options=dict(), no_ctype=True):
for k, v in types.items():
if type(v) != list:
data_type = v
else:
if k in dictionary:
data_type = get_data_type_for_several_options(dictionary[k], v, k)
else:
# Choose first data type in list for default value
data_type = v[0]
dictionary[k] = get_custom_type(dictionary, data_type, k, default, no_ctype)
check_settings_in_options(dictionary, types, options)
unrecognised_settings = []
for k in dictionary.keys():
if k not in list(types.keys()):
unrecognised_settings.append(exceptions.NotRecognisedSetting(k))
for setting in unrecognised_settings:
cout.cout_wrap(repr(setting), 4)
if unrecognised_settings:
raise Exception(unrecognised_settings)
def get_data_type_for_several_options(dict_value, list_settings_types, setting_name):
"""
Checks the data type of the setting input in case of several data type options.
Only a scalar or list can be the case for these cases.
Args:
dict_values: Dictionary value of processed settings
list_settings_types (list): Possible setting type options for this setting
Raises:
exception.NotValidSetting: if the setting is not allowed.
"""
for data_type in list_settings_types:
if 'list' in data_type and (type(dict_value) == list or not np.isscalar(dict_value)):
return data_type
elif 'list' not in data_type and np.isscalar(dict_value):
return data_type
exceptions.NotValidSettingType(setting_name, dict_value, list_settings_types)
def get_default_value(default_value, k, v, data_type = None, py_type = None):
if default_value is None:
raise exceptions.NoDefaultValueException(k)
if v in ['float', 'int', 'bool']:
converted_value = cast(k, default_value, py_type, data_type, default_value)
elif v == 'str':
converted_value = cast(k, default_value, eval(v), eval(v), default_value)
else:
converted_value = default_value.copy()
notify_default_value(k, converted_value)
return converted_value
def get_custom_type(dictionary, v, k, default, no_ctype):
if v == 'int':
if no_ctype:
data_type = int
else:
data_type = ct.c_int
try:
dictionary[k] = cast(k, dictionary[k], int, data_type, default[k])
except KeyError:
dictionary[k] = get_default_value(default[k], k, v, data_type=data_type, py_type=int)
elif v == 'float':
if no_ctype:
data_type = float
else:
data_type = ct.c_double
try:
dictionary[k] = cast(k, dictionary[k], float, data_type, default[k])
except KeyError:
dictionary[k] = get_default_value(default[k], k, v, data_type=data_type, py_type=float)
elif v == 'str':
try:
dictionary[k] = cast(k, dictionary[k], str, str, default[k])
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
elif v == 'bool':
if no_ctype:
data_type = bool
else:
data_type = ct.c_bool
try:
dictionary[k] = cast(k, dictionary[k], str2bool, data_type, default[k])
except KeyError:
dictionary[k] = get_default_value(default[k], k, v, data_type=data_type, py_type=str2bool)
elif v == 'list(str)':
try:
# if isinstance(dictionary[k], list):
# continue
# dictionary[k] = dictionary[k].split(',')
# getting rid of leading and trailing spaces
dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
elif v == 'list(dict)':
try:
# if isinstance(dictionary[k], list):
# continue
# dictionary[k] = dictionary[k].split(',')
# getting rid of leading and trailing spaces
for i in range(len(dictionary[k])):
dictionary[k][i] = ast.literal_eval(dictionary[k][i])
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
elif v == 'list(float)':
try:
dictionary[k]
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
if isinstance(dictionary[k], np.ndarray):
return dictionary[k]
if isinstance(dictionary[k], list):
for i in range(len(dictionary[k])):
dictionary[k][i] = float(dictionary[k][i])
dictionary[k] = np.array(dictionary[k])
return dictionary[k]
# dictionary[k] = dictionary[k].split(',')
# # getting rid of leading and trailing spaces
# dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
if dictionary[k].find(',') < 0:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=' ', dtype=ct.c_double)
else:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=',', dtype=ct.c_double)
elif v == 'list(int)':
try:
dictionary[k]
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
if isinstance(dictionary[k], np.ndarray):
return dictionary[k]
if isinstance(dictionary[k], list):
for i in range(len(dictionary[k])):
dictionary[k][i] = int(dictionary[k][i])
dictionary[k] = np.array(dictionary[k])
return dictionary[k]
# dictionary[k] = dictionary[k].split(',')
# # getting rid of leading and trailing spaces
# dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
if dictionary[k].find(',') < 0:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=' ').astype(ct.c_int)
else:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=',').astype(ct.c_int)
elif v == 'list(complex)':
try:
dictionary[k]
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
if isinstance(dictionary[k], np.ndarray):
return dictionary[k]
if isinstance(dictionary[k], list):
for i in range(len(dictionary[k])):
dictionary[k][i] = complex(dictionary[k][i])
dictionary[k] = np.array(dictionary[k])
return dictionary[k]
# dictionary[k] = dictionary[k].split(',')
# # getting rid of leading and trailing spaces
# dictionary[k] = list(map(lambda x: x.strip(), dictionary[k]))
if dictionary[k].find(',') < 0:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=' ').astype(complex)
else:
dictionary[k] = np.fromstring(dictionary[k].strip('[]'), sep=',').astype(complex)
elif v == 'dict':
try:
if not isinstance(dictionary[k], dict):
raise TypeError('Setting for {:s} is not a dictionary'.format(k))
except KeyError:
dictionary[k] = get_default_value(default[k], k, v)
else:
raise TypeError('Variable %s has an unknown type (%s) that cannot be casted' % (k, v))
return dictionary[k]
def check_settings_in_options(settings, settings_types, settings_options):
"""
Checks that settings given a type ``str`` or ``int`` and allowable options are indeed valid.
Args:
settings (dict): Dictionary of processed settings
settings_types (dict): Dictionary of settings types
settings_options (dict): Dictionary of options (may be empty)
Raises:
exception.NotValidSetting: if the setting is not allowed.
"""
for k in settings_options:
if settings_types[k] == 'int':
try:
value = settings[k].value
except AttributeError:
value = settings[k]
if value not in settings_options[k]:
raise exceptions.NotValidSetting(k, value, settings_options[k])
elif settings_types[k] == 'str':
value = settings[k]
if value not in settings_options[k] and value:
# checks that the value is within the options and that it is not an empty string.
raise exceptions.NotValidSetting(k, value, settings_options[k])
elif settings_types[k] == 'list(str)':
for item in settings[k]:
if item not in settings_options[k] and item:
raise exceptions.NotValidSetting(k, item, settings_options[k])
else:
pass # no other checks implemented / required
def load_config_file(file_name: str) -> dict:
"""This function reads the flight condition and solver input files.
Args:
file_name (str): contains the path and file name of the file to be read by the ``configparser``
reader.
Returns:
config (dict): a ``ConfigParser`` object that behaves like a dictionary
"""
# config = DictConfigParser()
# config.read(file_name)
# dict_config = config.as_dict()
import configobj
dict_config = configobj.ConfigObj(file_name)
return dict_config
def str2bool(string):
false_list = ['false', 'off', '0', 'no']
if isinstance(string, bool):
return string
if isinstance(string, ct.c_bool):
return string.value
if not string:
return False
elif string.lower() in false_list:
return False
else:
return True
def notify_default_value(k, v):
cout.cout_wrap('Variable ' + k + ' has no assigned value in the settings file.')
cout.cout_wrap(' will default to the value: ' + str(v), 1)
class SettingsTable:
"""
Generates the documentation's setting table at runtime.
Sphinx is our chosen documentation manager and takes docstrings in reStructuredText format. Given that the SHARPy
solvers contain several settings, this class produces a table in reStructuredText format with the solver's settings
and adds it to the solver's docstring.
This table will then be printed alongside the remaining docstrings.
To generate the table, parse the setting's description to a solver dictionary named ``settings_description``, in a
similar fashion to what is done with ``settings_types`` and ``settings_default``. If no description is given it will
be left blank.
Then, add at the end of the solver's class declaration method an instance of the ``SettingsTable`` class and a call
to the ``SettingsTable.generate()`` method.
Examples:
The end of the solver's class declaration should contain
.. code-block:: python
# Generate documentation table
settings_table = settings.SettingsTable()
__doc__ += settings_table.generate(settings_types, settings_default, settings_description)
to generate the settings table.
"""
def __init__(self):
self.n_fields = 4
self.n_settings = 0
self.field_length = [0] * self.n_fields
self.titles = ['Name', 'Type', 'Description', 'Default']
self.settings_types = dict()
self.settings_description = dict()
self.settings_default = dict()
self.settings_options = dict()
self.settings_options_strings = dict()
self.line_format = ''
self.table_string = ''
def generate(self, settings_types, settings_default, settings_description, settings_options=dict(), header_line=None):
"""
Returns a rst-format table with the settings' names, types, description and default values
Args:
settings_types (dict): Setting types.
settings_default (dict): Settings default value.
settings_description (dict): Setting description.
header_line (str): Header line description (optional)
Returns:
str: .rst formatted string with a table containing the settings' information.
"""
self.settings_types = settings_types
self.settings_default = settings_default
self.n_settings = len(self.settings_types)
#
if header_line is None:
header_line = 'The settings that this solver accepts are given by a dictionary, ' \
'with the following key-value pairs:'
else:
assert type(header_line) == str, 'header_line not a string, verify order of arguments'
if type(settings_options) != dict:
raise TypeError('settings_options is not a dictionary')
if settings_options:
# if settings_options are provided
self.settings_options = settings_options
self.n_fields += 1
self.field_length.append(0)
self.titles.append('Options')
self.process_options()
try:
self.settings_description = settings_description
except AttributeError:
pass
self.set_field_length()
self.line_format = self.setting_line_format()
table_string = '\n ' + header_line + '\n'
table_string += '\n ' + self.print_divider_line()
table_string += ' ' + self.print_header()
table_string += ' ' + self.print_divider_line()
for setting in self.settings_types:
table_string += ' ' + self.print_setting(setting)
table_string += ' ' + self.print_divider_line()
self.table_string = table_string
return table_string
def process_options(self):
self.settings_options_strings = self.settings_options.copy()
for k, v in self.settings_options.items():
opts = ''
for option in v:
opts += ' ``%s``,' %str(option)
self.settings_options_strings[k] = opts[1:-1] # removes the initial whitespace and final comma
def set_field_length(self):
field_lengths = [[] for i in range(self.n_fields)]
for setting in self.settings_types:
stype = str(self.settings_types.get(setting, ''))
description = self.settings_description.get(setting, '')
default = str(self.settings_default.get(setting, ''))
option = str(self.settings_options_strings.get(setting, ''))
field_lengths[0].append(len(setting) + 4) # length of name
field_lengths[1].append(len(stype) + 4) # length of type + 4 for the rst ``X``
field_lengths[2].append(len(description)) # length of type
field_lengths[3].append(len(default) + 4) # length of type + 4 for the rst ``X``
if self.settings_options:
field_lengths[4].append(len(option))
for i_field in range(self.n_fields):
field_lengths[i_field].append(len(self.titles[i_field]))
self.field_length[i_field] = max(field_lengths[i_field]) + 2 # add the two spaces as column dividers
def print_divider_line(self):
divider = ''
for i_field in range(self.n_fields):
divider += '='*(self.field_length[i_field]-2) + ' '
divider += '\n'
return divider
def print_setting(self, setting):
type = '``' + str(self.settings_types.get(setting, '')) + '``'
description = self.settings_description.get(setting, '')
default = '``' + str(self.settings_default.get(setting, '')) + '``'
if self.settings_options:
option = self.settings_options_strings.get(setting, '')
line = self.line_format.format(['``' + str(setting) + '``', type, description, default, option]) + '\n'
else:
line = self.line_format.format(['``' + str(setting) + '``', type, description, default]) + '\n'
return line
def print_header(self):
header = self.line_format.format(self.titles) + '\n'
return header
def setting_line_format(self):
string = ''
for i_field in range(self.n_fields):
string += '{0[' + str(i_field) + ']:<' + str(self.field_length[i_field]) + '}'
return string
def set_value_or_default(dictionary, key, default_val):
try:
value = dictionary[key]
except KeyError:
value = default_val
return value
| true |
627582845e31dd253a0c8258b122c75b01aad33d | Python | CahitEyigunlu/Telecommunications | /lab4/smtp.py | UTF-8 | 1,859 | 2.515625 | 3 | [] | no_license | import smtplib as smtp
import sys
from PyQt5.QtWidgets import *
from login_password import login, password
def send():
server = None
try:
message = 'From: {}\nTo: {}\nSubject: {}\n\n{}\n.'.format(email,
destination_field.text(),
subject_field.text(),
mail_field.toPlainText())
server = smtp.SMTP_SSL('smtp.yandex.com')
server.set_debuglevel(1)
server.ehlo(email)
server.login(email, password)
server.auth_plain()
server.sendmail(email, destination_field.text(), message)
server.quit()
mail_field.clear()
subject_field.clear()
except:
if server:
server.quit()
if __name__ == '__main__':
email = login
dest_email = 'vladimir.karnygin@gmail.com'
try:
app = QApplication(sys.argv)
main_window = QWidget()
main_window.setWindowTitle('lab 4 task 2')
main_window.setLayout(QVBoxLayout())
destination_field = QLineEdit()
destination_field.setText('vladimir.karnygin@gmail.com')
subject_field = QLineEdit()
mail_field = QTextEdit()
start_btn = QPushButton("Send mail")
start_btn.clicked.connect(send)
main_window.layout().addWidget(QLabel("Destination email:"))
main_window.layout().addWidget(destination_field)
main_window.layout().addWidget(QLabel("Subject"))
main_window.layout().addWidget(subject_field)
main_window.layout().addWidget(QLabel("Message"))
main_window.layout().addWidget(mail_field)
main_window.layout().addWidget(start_btn)
main_window.show()
sys.exit(app.exec_())
except Exception as e:
print(e)
| true |
3f84cdca99d8984e05a77843b74c221cc9ccf2a4 | Python | Bastyl/borrador | /test_cliente_servidor/cliente_final.py | UTF-8 | 1,022 | 3.046875 | 3 | [] | no_license | import socket
import sys
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 10000)
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
public_key = b''
try:
message = b'Dame una llave publica.'
print('sending {!r}'.format(message))
sock.sendall(message)
amount_received = 0
while amount_received != 1 : #recibe la llave
data = sock.recv(16)
print('received {!r}'.format(data))
public_key += data
if(data == b'--'):
public_key += data
amount_received = 1
#____________ CIFRA ___________
message = b'You can attack now!'
key = RSA.importKey(public_key)
# HACER QUE EN ESTA PARTE SE GUARDE LA KEY EN public_KEY
cipher = PKCS1_OAEP.new(key)
ciphertext = cipher.encrypt(message)
print(ciphertext)
finally:
print('closing socket')
sock.close()
print(key) | true |