blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
41340316a61cbc462d717522ea073ef6d344c94c | Python | zaxoavoki/pwr | /sem_5/JS/lab_7/main.py | UTF-8 | 1,958 | 3.25 | 3 | [] | no_license | from random import randint
from Pracownik import Pracownik
from Student import Student
from Przedmioty import Przedmioty
def wypisz_studentow(studenty):
# Prints all students in alphabetical order
print(' Studenty '.center(70, '='))
[print(s) for s in sorted(studenty, key=lambda x: x.nazwisko)]
def wypisz_pracownikow(pracowniki):
# Prints all workers in alphabetical order
print(' Pracowniki '.center(70, '='))
[print(s) for s in sorted(pracowniki, key=lambda x: x.nazwisko)]
def wypisz_studentow_wg_sredniej(studenty):
# Prints all students in custom order by avg mark
print(' Lista studentow wg sredniej oceny '.center(70, '='))
[print(x) for x in sorted(studenty, key=lambda y: y.daj_srednia_studiow())[::-1]]
def max_ocenione_publikacji(pracowniki):
# Prints one or more workers with the biggest mark
print(' Pracownik z najwieksza ocena '.center(70, '='))
_sum = lambda y: sum(map(lambda x: x[1], y.publikacje))
prac = sorted(filter(lambda x: x.wiek > 40 and x.wiek < 50, pracowniki), key=_sum)[::-1]
[print(i) for i in filter(lambda y: _sum(y) == _sum(prac[0]), prac)]
def read_lines(filename):
return open(filename).read().split('\n')
if __name__ == '__main__':
# Read data
studenty = [Student(*x.split(',')) for x in read_lines('data/students.csv')]
pracowniki = [Pracownik(*x.split(',')) for x in read_lines('data/workers.csv')]
# Read subjects
for s in studenty:
for p in read_lines('data/subjects.csv'):
s.przedmioty.dopisz_przedmiot(*p.split(','))
s.dopisz_ocene(randint(2, 5), p.split(',')[1])
# Read publications
for i, p in enumerate(open('data/publications.csv').read().split('\n')):
if i // 5 >= len(pracowniki): break
pracowniki[i // 5].dopisz_publikacje([*map(int, p.split(','))])
# Write data
print()
max_ocenione_publikacji(pracowniki)
wypisz_studentow(studenty)
wypisz_pracownikow(pracowniki)
wypisz_studentow_wg_sredniej(studenty) | true |
75470b600844619b82b293db7936bb98153ae5e6 | Python | Viccari073/extra_excercises | /estruturas_condicionais4.py | UTF-8 | 465 | 4.625 | 5 | [] | no_license | """
Escreva um porgrama que leia dois números inteiros e compare-os, mostrando na tela uma mensagem:
- O primeiro valor é maior;
- O segundo valor é maior;
- Os valores são iguais
"""
num1 = int(input('Digite um número inteiro: '))
num2 = int(input('Digite outro número inteiro: '))
if num1 > num2:
print('O primeiro valor é maior.')
elif num2 > num1:
print('O segundo valor é maior.')
else:
print('Os valores são iguais.')
| true |
3886990dab50c2d31b191a6283777e93c7b5d577 | Python | chapman-cs510-2017f/cw-05-ehsan_sharon | /test_cplane.py | UTF-8 | 1,469 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
from cplane import ListComplexPlane
#test __init__ & __creategrid__
def test_creategrid():
retlcp = ListComplexPlane(-1,1,3,-1,1,3) #return lcp
testplane = [
[(-1-1j), (-1j), (1-1j)],
[(-1+0j), (0j), (1+0j)],
[(-1+1j), (1j), (1+1j)]
]
assert testplane==retlcp.plane
print("test_creategrid pass")
test_creategrid()
#test refresh
def test_refresh():
retlcp = ListComplexPlane(-1,1,3,-1,1,3) #return lcp
testplane = [
[(-1-1j), (-1j), (1-1j)],
[(-1+0j), (0j), (1+0j)],
[(-1+1j), (1j), (1+1j)]
]
def f():
pass
retlcp.fs = [f]
retlcp.refresh()
assert testplane==retlcp.plane and retlcp.fs == []
print("test_refresh pass")
test_refresh()
#test apply
def test_apply():
retlcp = ListComplexPlane(-1,1,3,-1,1,3) #return lcp
testplane = [
[(-2-2j), (-2j), (2-2j)],
[(-2+0j), (0j), (2+0j)],
[(-2+2j), (2j), (2+2j)]
]
def f(p):
return p*2
retlcp.apply(f)
assert testplane==retlcp.plane
print("test_apply pass")
test_apply()
#test zoom
def test_zoom():
retlcp = ListComplexPlane(-1,1,3,-1,1,3) #return lcp
testplane = [
[(-2-2j), (-2j), (2-2j)],
[(-2+0j), (0j), (2+0j)],
[(-2+2j), (2j), (2+2j)]
]
retlcp.zoom(-2,2,3,-2,2,3)
assert testplane==retlcp.plane
print("test_zoom pass")
test_zoom() | true |
6247f155641ebac4b41e7f05fad67d3b65e154e2 | Python | AlikeCh/hello-world | /exer_2.py | UTF-8 | 173 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 15:56:35 2016
@author: alikeche
"""
count = 0
for i in range(100,1000):
if i%17 == 0:
count += 1
print count | true |
0e78dbb813511b84abf32f70a5497afe09bbf8cd | Python | alexandraback/datacollection | /solutions_5706278382862336_1/Python/eval/A.py | UTF-8 | 791 | 3.34375 | 3 | [] | no_license |
INF = float('inf')
T = input()
def gcd(a, b):
if b == 0:
return a
return gcd(b, a%b)
def update(p, q):
d = gcd(p, q)
return (p/d, q/d)
def valid(p, q):
p, q = update(p, q)
if p%2 == 0:
return False
while True:
if q == 1:
return True
if q % 2 != 0:
return False
q /= 2
def get_generation(p, q):
p, q = update(p, q)
if p == 0:
return INF
if q == 1:
return 0
if p >= q/2:
return 1
return get_generation(p, q/2) + 1
for t in range(1, T+1):
p, q = map(int, raw_input().split('/'))
if valid(p, q):
print 'Case #%d: %s' %(t, get_generation(p, q))
else:
print 'Case #%d: impossible' %(t)
| true |
bb77d3fdb3dbfa187c3a6b8c4d8bbe51918d16d8 | Python | marshcla/ywcaagentssubjects | /cache_setup.py | UTF-8 | 1,829 | 2.515625 | 3 | [] | no_license | import json
import os
from archivesspace import archivesspace
import pprint
from utilities import *
import argparse
import logging
from datetime import datetime
## -----CACHING SETUP----- ##
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
DEBUG = True
CACHE_FNAME = 'cache_file.json'
CREDS_CACHE_FILE = 'creds.json'
try:
cache_file = open(CACHE_FNAME, 'r')
cache_json = cache_file.read()
CACHE_DICTION = json.loads(cache_json)
cache_file.close()
except:
CACHE_DICTION = {}
def has_cache_expired(timestamp_str, expire_in_days):
now = datetime.now()
cache_timestamp = datetime.strptime(timestamp_str, DATETIME_FORMAT)
delta = now - cache_timestamp
delta_in_days = delta.days
if delta_in_days > expire_in_days:
return True
else:
return False
def get_from_cache(identifier, cache_dictionary):
identifier = identifier.upper()
if identifier in cache_dictionary:
data_assoc_dict = cache_dictionary[identifier]
if has_cache_expired(data_assoc_dict['timestamp'],data_assoc_dict["expire_in_days"]):
if DEBUG:
print("Cache has expired for {}".format(identifier))
del cache_dictionary[identifier]
data = None
else:
data = cache_dictionary[identifier]['values']
else:
data = None
return data
def set_in_data_cache(identifier, data, expire_in_days):
identifier = identifier.upper()
CACHE_DICTION[identifier] = {
'values': data,
'timestamp': datetime.now().strftime(DATETIME_FORMAT),
'expire_in_days': expire_in_days
}
with open(CACHE_FNAME, 'w') as cache_file:
cache_json = json.dumps(CACHE_DICTION)
cache_file.write(cache_json)
def makeIdentifier(query_type):
identifier = query_type
return identifier | true |
37901949f67c4aeaee0351d1396c69d82103a494 | Python | gensasaki/atcoder | /abc/129/a.py | UTF-8 | 76 | 2.71875 | 3 | [] | no_license | l = list(map(int, input().split()))
s_l = sorted(l)
print(s_l[0] + s_l[1])
| true |
5a18081380d900776bb0d1aa096cbe14b34a24ea | Python | robby-prescott-concord/DeclarScript | /getting_started/tutorial_resources/userinput0.py | UTF-8 | 1,122 | 4.1875 | 4 | [] | no_license | #!/usr/bin/env python3
import sys
def print_wrong() :
print("\nYou're incorrect. :( ")
print('Magic Time:')
magic_number = int(input("Give me a positive whole number:\n"))
if magic_number < 1 :
print_wrong()
sys.exit(magic_number)
print('\nRemember this number, ' + str(magic_number) + '. It is the magic number.')
minus = int(input("\nWhat's the magic number minus 1?\n"))
if minus != magic_number - 1 :
print_wrong()
sys.exit(magic_number)
plus = int(input("\nWhat's the magic number plus 1?\n"))
if plus != magic_number + 1 :
print_wrong()
sys.exit(magic_number)
multiply = int(input("\nNow, multiply your last two answers together. What do you get?\n"))
if multiply != (plus * minus) :
print_wrong()
sys.exit(magic_number)
plus_one = int(input("\nThen add 1 to your last answer. What do you get?\n"))
if plus_one != multiply + 1 :
print_wrong()
sys.exit(magic_number)
square_root = int(input("\nWhat's the square root of your last answer?\n"))
if square_root != magic_number :
print_wrong()
sys.exit(magic_number)
print('\nI told ya, the number is magic!')
| true |
3fcc6f15d82438e1cdce30163080a2bc24835525 | Python | ta1231/BaekjoonPractice | /mathematics/ag11005.py | UTF-8 | 192 | 3.046875 | 3 | [] | no_license | import sys
N, B = map(int, sys.stdin.readline().split())
array = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
sol = ''
while N != 0:
a = N % B
sol = array[a] + sol
N = N // B
print(sol)
| true |
29017b99bc08b066bec4b63c15011a14e1340280 | Python | gnavihs/EyeInTheSky | /DataImport.py | UTF-8 | 3,802 | 2.6875 | 3 | [] | no_license | import os, os.path
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import numpy as np
import cv2
import collections
import re
import sys
from random import shuffle
from random import randint
from numpy import array
import random
exec(open("./File_Paths.py").read())
############################################################################
random.seed(1234)
positive_label = re.compile(r'^car')
#Do NOT change these
image_size = 224
num_classes = 2
# Intialize lists and variables
file_paths_train = []
file_paths_test = []
def crop_center(img,cropx,cropy,shift):
y,x,c = img.shape
x_shift = randint(-shift,shift)
y_shift = randint(-shift,shift)
startx = x//2 - cropx//2 + x_shift
starty = y//2 - cropy//2 + y_shift
return img[starty:starty+cropy, startx:startx+cropx, :]
def preprocess_input(x):
x = np.true_divide(x, 255)
# x /= 255.
x -= 0.5
x *= 2.
return x
for dir in dirs:
path_train = dir + 'train/'
path_test = dir + 'test/'
for root, di, files in os.walk(path_train):
file_names = [os.path.join(path_train, f) for f in os.listdir(path_train) if os.path.isfile(os.path.join(path_train, f))]
file_paths_train.extend(file_names)
for root, di, files in os.walk(path_test):
file_names = [os.path.join(path_test, f) for f in os.listdir(path_test) if os.path.isfile(os.path.join(path_test, f))]
file_paths_test.extend(file_names)
#Shuffle to randomize training
shuffle(file_paths_train)
shuffle(file_paths_test)
file_paths_train = array(file_paths_train)
file_paths_test = array(file_paths_test)
print("No. of training images: ", len(file_paths_train))
print("No. of testing images: ", len(file_paths_test))
#Read all the training images and labels
X_train = []
labels_train = []
for aFile in file_paths_train:
#Image decoding
input_value = cv2.imread(aFile)
input_value = preprocess_input(input_value)
#Get a 224x224 from 256x256 image which is cropped randomly around center
input_value_crop = np.zeros((image_size, image_size, 3), dtype=np.float32)
input_value_crop = crop_center(input_value,image_size,image_size, 12)
X_train.append(input_value_crop)
#Label decoding
#'1' for car, '0' for not car
aFileName = os.path.basename(aFile)
mo = positive_label.search(aFileName)
if mo:
labels_train.append(1)
else:
labels_train.append(0)
#Converting labels into one hot array
Y_train = np.zeros((len(labels_train), num_classes))
Y_train[np.arange(len(labels_train)),labels_train] = 1
X_train = array(X_train)
# print("labels_id: ",labels_train)
# print("Y_train: ",Y_train)
# print("file_paths: ",file_paths_train)
#Read all the testing images and labels
X_test = []
labels_test = []
for aFile in file_paths_test:
#Image decoding
input_value = cv2.imread(aFile)
input_value = preprocess_input(input_value)
#Get a 224x224 from 256x256 image which is center cropped
input_value_crop = np.zeros((image_size, image_size, 3), dtype=np.float32)
input_value_crop = crop_center(input_value,image_size,image_size, 0)
X_test.append(input_value_crop)
#Label decoding
#'1' for car, '0' for not car
aFileName = os.path.basename(aFile)
mo = positive_label.search(aFileName)
if mo:
labels_test.append(1)
else:
labels_test.append(0)
#Converting labels into one hot array
Y_test = np.zeros((len(labels_test), num_classes))
Y_test[np.arange(len(labels_test)),labels_test] = 1
X_test = array(X_test)
# print("labels_id: ",labels_test)
# print("Y_test: ",Y_test)
# print("file_paths: ",file_paths_test)
print("DATA IMPORT: All the train and test data loaded in X_train, Y_train, X_test, Y_test") | true |
d20e91e758ea8817919906a35d355a8ef239206e | Python | sbe710/web-crawler | /linkChecker/linkChecker/spiders/link_checker.py | UTF-8 | 1,149 | 2.65625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
import requests
import uuid
import os
class LinkCheckerSpider(scrapy.Spider):
name = 'test'
allowed_domains = ['caniuse.com']
start_urls = ['https://caniuse.com']
def parse(self, response):
# extract content
imgArray = response.xpath('//img/@src').extract()
videoArray = response.xpath('//video/@src').extract()
textArray = response.css("::text").extract()
self.saveData(imgArray, "image", response)
self.saveData(videoArray, "video", response)
text_file = open("sample.txt", "w")
n = text_file.write("".join(textArray).strip().replace("\t", "").replace("\n", ""))
text_file.close()
def saveData(self, array, type, response):
for contentUrl in array:
filename, file_extension = os.path.splitext(contentUrl)
print("filename ", file_extension)
contentData = requests.get(response.url + contentUrl).content
unique_filename = str(uuid.uuid4().hex)
with open(unique_filename + file_extension, 'wb') as handler:
handler.write(contentData) | true |
ace9d4b60792ac0fb8506cb991cf573b6a9ea3d0 | Python | netaddr/netaddr | /netaddr/tests/ip/test_ip_v4.py | UTF-8 | 17,691 | 2.546875 | 3 | [
"BSD-3-Clause"
] | permissive | import pickle
import types
import random
import sys
import pytest
from netaddr import IPAddress, IPNetwork, INET_PTON, spanning_cidr, AddrFormatError, ZEROFILL, Z, P, NOHOST
def test_ipaddress_v4():
ip = IPAddress('192.0.2.1')
assert ip.version == 4
assert repr(ip) == "IPAddress('192.0.2.1')"
assert str(ip) == '192.0.2.1'
assert ip.format() == '192.0.2.1'
assert int(ip) == 3221225985
assert hex(ip) == '0xc0000201'
if sys.version_info[0] > 2:
assert bytes(ip) == b'\xc0\x00\x02\x01'
assert ip.bin == '0b11000000000000000000001000000001'
assert ip.bits() == '11000000.00000000.00000010.00000001'
assert ip.words == (192, 0, 2, 1)
@pytest.mark.parametrize(
('value', 'ipaddr', 'network', 'cidr', 'broadcast', 'netmask', 'hostmask', 'size'), [
(
'192.0.2.1',
IPAddress('192.0.2.1'),
IPAddress('192.0.2.1'),
IPNetwork('192.0.2.1/32'),
None,
IPAddress('255.255.255.255'),
IPAddress('0.0.0.0'),
1,
),
(
'192.0.2.0/24',
IPAddress('192.0.2.0'),
IPAddress('192.0.2.0'),
IPNetwork('192.0.2.0/24'),
IPAddress('192.0.2.255'),
IPAddress('255.255.255.0'),
IPAddress('0.0.0.255'),
256
),
(
'192.0.3.112/22',
IPAddress('192.0.3.112'),
IPAddress('192.0.0.0'),
IPNetwork('192.0.0.0/22'),
IPAddress('192.0.3.255'),
IPAddress('255.255.252.0'),
IPAddress('0.0.3.255'),
1024
),
])
def test_ipnetwork_v4(value, ipaddr, network, cidr, broadcast, netmask, hostmask, size):
net = IPNetwork(value)
assert net.ip == ipaddr
assert net.network == network
assert net.cidr == cidr
assert net.broadcast == broadcast
assert net.netmask == netmask
assert net.hostmask == hostmask
assert net.size == size
def test_ipnetwork_list_operations_v4():
ip = IPNetwork('192.0.2.16/29')
assert len(ip) == 8
ip_list = list(ip)
assert len(ip_list) == 8
assert ip_list == [
IPAddress('192.0.2.16'),
IPAddress('192.0.2.17'),
IPAddress('192.0.2.18'),
IPAddress('192.0.2.19'),
IPAddress('192.0.2.20'),
IPAddress('192.0.2.21'),
IPAddress('192.0.2.22'),
IPAddress('192.0.2.23'),
]
def test_ipnetwork_index_operations_v4():
ip = IPNetwork('192.0.2.16/29')
assert ip[0] == IPAddress('192.0.2.16')
assert ip[1] == IPAddress('192.0.2.17')
assert ip[-1] == IPAddress('192.0.2.23')
def test_ipnetwork_slice_operations_v4():
ip = IPNetwork('192.0.2.16/29')
assert isinstance(ip[0:4], types.GeneratorType)
assert list(ip[0:4]) == [
IPAddress('192.0.2.16'),
IPAddress('192.0.2.17'),
IPAddress('192.0.2.18'),
IPAddress('192.0.2.19'),
]
assert list(ip[0::2]) == [
IPAddress('192.0.2.16'),
IPAddress('192.0.2.18'),
IPAddress('192.0.2.20'),
IPAddress('192.0.2.22'),
]
assert list(ip[-1::-1]) == [
IPAddress('192.0.2.23'),
IPAddress('192.0.2.22'),
IPAddress('192.0.2.21'),
IPAddress('192.0.2.20'),
IPAddress('192.0.2.19'),
IPAddress('192.0.2.18'),
IPAddress('192.0.2.17'),
IPAddress('192.0.2.16'),
]
def test_ipnetwork_sort_order():
ip_list = list(IPNetwork('192.0.2.128/28'))
random.shuffle(ip_list)
assert sorted(ip_list) == [
IPAddress('192.0.2.128'),
IPAddress('192.0.2.129'),
IPAddress('192.0.2.130'),
IPAddress('192.0.2.131'),
IPAddress('192.0.2.132'),
IPAddress('192.0.2.133'),
IPAddress('192.0.2.134'),
IPAddress('192.0.2.135'),
IPAddress('192.0.2.136'),
IPAddress('192.0.2.137'),
IPAddress('192.0.2.138'),
IPAddress('192.0.2.139'),
IPAddress('192.0.2.140'),
IPAddress('192.0.2.141'),
IPAddress('192.0.2.142'),
IPAddress('192.0.2.143'),
]
def test_ipaddress_and_ipnetwork_canonical_sort_order_by_version():
ip_list = [
IPAddress('192.0.2.130'),
IPNetwork('192.0.2.128/28'),
IPAddress('::'),
IPNetwork('192.0.3.0/24'),
IPNetwork('192.0.2.0/24'),
IPNetwork('fe80::/64'),
IPNetwork('172.24/12'),
IPAddress('10.0.0.1'),
]
random.shuffle(ip_list)
ip_list.sort()
assert ip_list == [
IPAddress('10.0.0.1'),
IPNetwork('172.24.0.0/12'),
IPNetwork('192.0.2.0/24'),
IPNetwork('192.0.2.128/28'),
IPAddress('192.0.2.130'),
IPNetwork('192.0.3.0/24'),
IPAddress('::'),
IPNetwork('fe80::/64'),
]
def test_ipnetwork_v4_constructor():
assert IPNetwork('192.168/16') == IPNetwork('192.168.0.0/16')
assert IPNetwork('192.168.0.15') == IPNetwork('192.168.0.15/32')
assert IPNetwork('192.168') == IPNetwork('192.168.0.0/32')
assert IPNetwork('192.168', implicit_prefix=True) == IPNetwork('192.168.0.0/24')
assert IPNetwork('192.168', True) == IPNetwork('192.168.0.0/24')
assert IPNetwork('10.0.0.1', True) == IPNetwork('10.0.0.1/8')
def test_ipaddress_integer_operations_v4():
assert IPAddress('192.0.2.0') + 1 == IPAddress('192.0.2.1')
assert 1 + IPAddress('192.0.2.0') == IPAddress('192.0.2.1')
assert IPAddress('192.0.2.1') - 1 == IPAddress('192.0.2.0')
assert IPAddress('192.0.0.0') + IPAddress('0.0.0.42') == IPAddress('192.0.0.42')
assert IPAddress('192.0.0.42') - IPAddress('0.0.0.42') == IPAddress('192.0.0.0')
with pytest.raises(IndexError):
1 - IPAddress('192.0.2.1')
ip = IPAddress('10.0.0.1')
ip += 1
assert ip == IPAddress('10.0.0.2')
ip -= 1
assert ip == IPAddress('10.0.0.1')
ip += IPAddress('0.0.0.42')
assert ip == IPAddress('10.0.0.43')
ip -= IPAddress('0.0.0.43')
assert ip == IPAddress('10.0.0.0')
# Negative increments around address range boundaries.
ip = IPAddress('0.0.0.0')
with pytest.raises(IndexError):
ip += -1
ip = IPAddress('255.255.255.255')
with pytest.raises(IndexError):
ip -= -1
def test_ipaddress_binary_operations_v4():
assert IPAddress('192.0.2.15') & IPAddress('255.255.255.0') == IPAddress('192.0.2.0')
assert IPAddress('255.255.0.0') | IPAddress('0.0.255.255') == IPAddress('255.255.255.255')
assert IPAddress('255.255.0.0') ^ IPAddress('255.0.0.0') == IPAddress('0.255.0.0')
assert IPAddress('1.2.3.4').packed == '\x01\x02\x03\x04'.encode('ascii')
def test_ipnetwork_slices_v4():
assert list(IPNetwork('192.0.2.0/29')[0:-1]) == [
IPAddress('192.0.2.0'),
IPAddress('192.0.2.1'),
IPAddress('192.0.2.2'),
IPAddress('192.0.2.3'),
IPAddress('192.0.2.4'),
IPAddress('192.0.2.5'),
IPAddress('192.0.2.6'),
]
assert list(IPNetwork('192.0.2.0/29')[::-1]) == [
IPAddress('192.0.2.7'),
IPAddress('192.0.2.6'),
IPAddress('192.0.2.5'),
IPAddress('192.0.2.4'),
IPAddress('192.0.2.3'),
IPAddress('192.0.2.2'),
IPAddress('192.0.2.1'),
IPAddress('192.0.2.0'),
]
def test_iterhosts_v4():
assert list(IPNetwork('192.0.2.0/29').iter_hosts()) == [
IPAddress('192.0.2.1'),
IPAddress('192.0.2.2'),
IPAddress('192.0.2.3'),
IPAddress('192.0.2.4'),
IPAddress('192.0.2.5'),
IPAddress('192.0.2.6'),
]
assert list(IPNetwork("192.168.0.0/31")) == [
IPAddress('192.168.0.0'),
IPAddress('192.168.0.1'),
]
assert list(IPNetwork("1234::/128")) == [IPAddress('1234::')]
assert list(IPNetwork("1234::/128").iter_hosts()) == []
assert list(IPNetwork("192.168.0.0/31").iter_hosts()) == [IPAddress('192.168.0.0'),IPAddress('192.168.0.1')]
assert list(IPNetwork("192.168.0.0/32").iter_hosts()) == [IPAddress('192.168.0.0')]
def test_ipaddress_boolean_evaluation_v4():
assert not bool(IPAddress('0.0.0.0'))
assert bool(IPAddress('0.0.0.1'))
assert bool(IPAddress('255.255.255.255'))
def test_ipnetwork_boolean_evaluation_v4():
assert bool(IPNetwork('0.0.0.0/0'))
def test_ipnetwork_equality_v4():
assert IPNetwork('192.0.2.0/255.255.254.0') == IPNetwork('192.0.2.0/23')
assert IPNetwork('192.0.2.65/255.255.254.0') == IPNetwork('192.0.2.0/23')
assert IPNetwork('192.0.2.65/255.255.254.0') == IPNetwork('192.0.2.65/23')
assert IPNetwork('192.0.2.65/255.255.255.0') != IPNetwork('192.0.2.0/23')
assert IPNetwork('192.0.2.65/255.255.254.0') != IPNetwork('192.0.2.65/24')
def test_ipnetwork_slicing_v4():
ip = IPNetwork('192.0.2.0/23')
assert ip.first == 3221225984
assert ip.last == 3221226495
assert ip[0] == IPAddress('192.0.2.0')
assert ip[-1] == IPAddress('192.0.3.255')
assert list(ip[::128]) == [
IPAddress('192.0.2.0'),
IPAddress('192.0.2.128'),
IPAddress('192.0.3.0'),
IPAddress('192.0.3.128'),
]
def test_ip_network_membership_v4():
for what, network, result in [
(IPAddress('192.0.2.1'), IPNetwork('192.0.2.0/24'), True),
(IPAddress('192.0.2.255'), IPNetwork('192.0.2.0/24'), True),
(IPNetwork('192.0.2.0/24'), IPNetwork('192.0.2.0/23'), True),
(IPNetwork('192.0.2.0/24'), IPNetwork('192.0.2.0/24'), True),
(IPNetwork('192.0.2.0/23'), IPNetwork('192.0.2.0/24'), False),
]:
assert (what in network) is result
assert (str(what) in network) is result
def test_ip_network_equality_v4():
assert IPNetwork('192.0.2.0/24') == IPNetwork('192.0.2.0/24')
assert IPNetwork('192.0.2.0/24') is not IPNetwork('192.0.2.0/24')
assert not IPNetwork('192.0.2.0/24') != IPNetwork('192.0.2.0/24')
assert not IPNetwork('192.0.2.0/24') is IPNetwork('192.0.2.0/24')
def test_ipaddress_integer_constructor_v4():
assert IPAddress(1) == IPAddress('0.0.0.1')
assert IPAddress(1, 4) == IPAddress('0.0.0.1')
assert IPAddress(1, 6) == IPAddress('::1')
assert IPAddress(10) == IPAddress('0.0.0.10')
def test_ipaddress_integer_constructor_v6():
assert IPAddress(0x1ffffffff) == IPAddress('::1:ffff:ffff')
assert IPAddress(0xffffffff, 6) == IPAddress('::255.255.255.255')
assert IPAddress(0x1ffffffff) == IPAddress('::1:ffff:ffff')
assert IPAddress(2 ** 128 - 1) == IPAddress('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
def test_ipaddress_inet_aton_constructor_v4():
assert IPAddress('0x7f.0x1') == IPAddress('127.0.0.1')
assert IPAddress('0x7f.0x0.0x0.0x1') == IPAddress('127.0.0.1')
assert IPAddress('0177.01') == IPAddress('127.0.0.1')
assert IPAddress('0x7f.0.01') == IPAddress('127.0.0.1')
# Partial addresses - pretty weird, but valid ...
assert IPAddress('127') == IPAddress('0.0.0.127')
assert IPAddress('127') == IPAddress('0.0.0.127')
assert IPAddress('127.1') == IPAddress('127.0.0.1')
assert IPAddress('127.0.1') == IPAddress('127.0.0.1')
def test_ipaddress_inet_pton_constructor_v4():
with pytest.raises(AddrFormatError):
IPAddress('0177.01', flags=INET_PTON)
with pytest.raises(AddrFormatError):
IPAddress('0x7f.0.01', flags=INET_PTON)
with pytest.raises(AddrFormatError):
IPAddress('10', flags=INET_PTON)
with pytest.raises(AddrFormatError):
IPAddress('10.1', flags=INET_PTON)
with pytest.raises(AddrFormatError):
IPAddress('10.0.1', flags=INET_PTON)
assert IPAddress('10.0.0.1', flags=INET_PTON) == IPAddress('10.0.0.1')
def test_ipaddress_constructor_zero_filled_octets_v4():
assert IPAddress('010.000.000.001') == IPAddress('8.0.0.1')
assert IPAddress('010.000.000.001', flags=ZEROFILL) == IPAddress('10.0.0.1')
assert IPAddress('010.000.001', flags=ZEROFILL) == IPAddress('10.0.0.1')
with pytest.raises(AddrFormatError):
assert IPAddress('010.000.001', flags=INET_PTON|ZEROFILL)
assert IPAddress('010.000.000.001', flags=INET_PTON|ZEROFILL) == IPAddress('10.0.0.1')
# Short flags.
assert IPAddress('010.000.000.001', flags=P|Z) == IPAddress('10.0.0.1')
def test_ipnetwork_constructor_v4():
assert IPNetwork('192.0.2.0/24') == IPNetwork('192.0.2.0/24')
assert IPNetwork('192.0.2.0/255.255.255.0') == IPNetwork('192.0.2.0/24')
assert IPNetwork('192.0.2.0/0.0.0.255') == IPNetwork('192.0.2.0/24')
assert IPNetwork(IPNetwork('192.0.2.0/24')) == IPNetwork('192.0.2.0/24')
assert IPNetwork(IPNetwork('192.0.2.0/24')) == IPNetwork('192.0.2.0/24')
def test_ip_network_cosntructor_implicit_prefix_flag_v4():
assert IPNetwork('192.0.2.0', implicit_prefix=True) == IPNetwork('192.0.2.0/24')
assert IPNetwork('231.192.0.15', implicit_prefix=True) == IPNetwork('231.192.0.15/4')
assert IPNetwork('10', implicit_prefix=True) == IPNetwork('10.0.0.0/8')
def test_ipnetwork_constructor_other_flags_v4():
assert IPNetwork('172.24.200') == IPNetwork('172.24.200.0/32')
assert IPNetwork('172.24.200', implicit_prefix=True) == IPNetwork('172.24.200.0/16')
assert IPNetwork('172.24.200', implicit_prefix=True, flags=NOHOST) == IPNetwork('172.24.0.0/16')
def test_ipnetwork_bad_string_constructor():
with pytest.raises(AddrFormatError):
IPNetwork('foo')
def test_ipaddress_netmask_v4():
assert IPAddress('0.0.0.0').netmask_bits() == 0
assert IPAddress('128.0.0.0').netmask_bits() == 1
assert IPAddress('255.0.0.0').netmask_bits() == 8
assert IPAddress('255.255.0.0').netmask_bits() == 16
assert IPAddress('255.255.255.0').netmask_bits() == 24
assert IPAddress('255.255.255.254').netmask_bits() == 31
assert IPAddress('255.255.255.255').netmask_bits() == 32
assert IPAddress('1.1.1.1').netmask_bits() == 32
def test_ipaddress_hex_format():
assert hex(IPAddress(0)) == '0x0'
assert hex(IPAddress(0xffffffff)) == '0xffffffff'
@pytest.mark.skipif('sys.version_info > (2,)', reason="requires python 2.x behaviour")
def test_ipaddress_oct_format_py2():
assert oct(IPAddress(0xffffffff)) == '037777777777'
assert oct(IPAddress(0)) == '0'
@pytest.mark.skipif('sys.version_info < (3,)', reason="python 3.x behaviour")
def test_ipaddress_oct_format_py3():
assert oct(IPAddress(0xffffffff)) == '0o37777777777'
assert oct(IPAddress(0)) == '0o0'
def test_is_multicast():
ip = IPAddress('239.192.0.1')
assert ip.is_multicast()
def test_multicast_info():
ip = IPAddress('224.0.1.173')
assert ip.info.IPv4[0].designation == 'Multicast'
assert ip.info.IPv4[0].prefix == '224/8'
assert ip.info.IPv4[0].status == 'Reserved'
assert ip.info.Multicast[0].address == '224.0.1.173'
def test_ipaddress_pickling_v4():
ip = IPAddress(3221225985)
assert ip == IPAddress('192.0.2.1')
buf = pickle.dumps(ip)
ip2 = pickle.loads(buf)
assert ip2 == ip
assert id(ip2) != id(ip)
assert ip2.value == 3221225985
assert ip2.version == 4
def test_ipnetwork_pickling_v4():
cidr = IPNetwork('192.0.2.0/24')
assert cidr == IPNetwork('192.0.2.0/24')
buf = pickle.dumps(cidr)
cidr2 = pickle.loads(buf)
assert cidr2 == cidr
assert id(cidr2) != id(cidr)
assert cidr2.value == 3221225984
assert cidr2.prefixlen == 24
assert cidr2.version == 4
def test_ipnetwork_incrementing_by_int():
ip = IPNetwork('192.0.2.0/28')
results = []
for i in range(16):
results.append(str(ip))
ip += 1
assert results == [
'192.0.2.0/28',
'192.0.2.16/28',
'192.0.2.32/28',
'192.0.2.48/28',
'192.0.2.64/28',
'192.0.2.80/28',
'192.0.2.96/28',
'192.0.2.112/28',
'192.0.2.128/28',
'192.0.2.144/28',
'192.0.2.160/28',
'192.0.2.176/28',
'192.0.2.192/28',
'192.0.2.208/28',
'192.0.2.224/28',
'192.0.2.240/28'
]
def test_rfc3021_subnets():
# Tests for /31 subnet
assert IPNetwork('192.0.2.0/31').network == IPAddress('192.0.2.0')
assert IPNetwork('192.0.2.0/31').broadcast is None
assert list(IPNetwork('192.0.2.0/31').iter_hosts()) == [IPAddress('192.0.2.0'), IPAddress('192.0.2.1')]
# Tests for /32 subnet
assert IPNetwork('192.0.2.0/32').network == IPAddress('192.0.2.0')
assert IPNetwork('192.0.2.0/32').broadcast is None
assert list(IPNetwork('192.0.2.0/32').iter_hosts()) == [IPAddress('192.0.2.0')]
# IPv6 must not be affected
assert IPNetwork('abcd::/127').broadcast is not None
assert IPNetwork('abcd::/128').broadcast is not None
def test_ipnetwork_change_prefixlen():
ip = IPNetwork('192.168.0.0/16')
assert ip.prefixlen == 16
ip.prefixlen = 8
assert ip.prefixlen == 8
ip = IPNetwork('dead:beef::/16')
assert ip.prefixlen == 16
ip.prefixlen = 64
assert ip.prefixlen == 64
def test_ipnetwork_change_netmask():
ip = IPNetwork('192.168.0.0/16')
ip.netmask = '255.0.0.0'
assert ip.prefixlen == 8
ip = IPNetwork('dead:beef::/16')
ip.netmask = 'ffff:ffff:ffff:ffff::'
assert ip.prefixlen == 64
def test_spanning_cidr_handles_strings():
# This that a regression introduced in 0fda41a is fixed. The regression caused an error when str
# addresses were passed to the function.
addresses = [
IPAddress('10.0.0.1'),
IPAddress('10.0.0.2'),
'10.0.0.3',
'10.0.0.4',
]
assert spanning_cidr(addresses) == IPNetwork('10.0.0.0/29')
assert spanning_cidr(reversed(addresses)) == IPNetwork('10.0.0.0/29')
| true |
a989a40005dbc79fb78e244b006a976d10860d34 | Python | rodneyrick/Machine_Learning_A-Z | /Part-11 - Plotting/boxplot_grouped.py | UTF-8 | 516 | 3.390625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
The grouped boxplot it's possbile to review with this
property is important or not to include into analysis
It's possble too to undestarding the maximum.
Other thing it's how we can understand the outliers.
"""
import seaborn as sns
sns.set(style="ticks")
# Load the example tips dataset
tips = sns.load_dataset("tips")
# Draw a nested boxplot to show bills by day and sex
sns.boxplot(x="day", y="total_bill", hue="sex", data=tips, palette="PRGn")
sns.despine(offset=10, trim=True) | true |
c6bc2baec2b21e44a7e10c87d0a093fd1bc85e99 | Python | NatanBagrov/NLP_HW1 | /TaggedCompBuilder.py | UTF-8 | 833 | 2.53125 | 3 | [] | no_license | from itertools import chain
class TaggedCompBuilder:
fileName = None
lines = None
splitted = None
parser = None
def __init__(self, taggedComp, compNonTagged):
super().__init__()
myTagged = open('../comp748.wtag', 'w')
self.fileName = taggedComp
self.lines = [line.rstrip('\n') for line in open(self.fileName)]
compSentences = [line.rstrip('\n') for line in open(compNonTagged)]
count = 0
for compSentence in compSentences:
if compSentence in self.lines:
idx = self.lines.index(compSentence)
t = self.lines[idx+1]
for word,tag in zip(compSentence.split(), t.split()):
wt = word + '_' + tag + ' '
myTagged.write(wt)
myTagged.write('\n')
| true |
2806bae343aaa0107d1d168412e5b0093df0b42d | Python | Chester1901/DirectLine | /Home Work/Lesson1 Hard Polyakov.py | UTF-8 | 783 | 3.8125 | 4 | [] | no_license | __author__ = 'Поляков Игорь Владимирович.'
# Задание-1:
# Ваня набрал несколько операций в интерпретаторе и получал результаты:
# Код: a == a**2
# Результат: True
# Код: a == a*2
# Результат: True
# Код: a > 999999
# Результат: True
# Вопрос: Чему была равна переменная a,
# если точно известно, что её значение не изменялось?
# Подсказка: это значение точно есть ;)
# Ответ
class NewInt(int):
def __gt__(self,other):
if other==999999: return True
else: return self.__int()>other.__int__()
a=NewInt(0)
print(type(a))
print(a==a**2)
print(a==a*2)
print(a>999999) | true |
5388c0651d98a63a001f62c6cf406935bde34dbb | Python | mtrinquero/ImageProcessing | /ImageGradients_EdgeDetection.py | UTF-8 | 1,430 | 3.140625 | 3 | [] | no_license | # Mark Trinquero
# Python Image Processing - Detecting Gradients / Edges
import cv2
import numpy as np
import scipy as sp
def imageGradientX(image):
output = np.zeros(image.shape)
for i in range(len(image)):
for j in (range(len(image[0])-1)):
output[i,j] = abs(int(image[i,j+1]) - int(image[i,j]))
return output
def imageGradientY(image):
output = np.zeros(image.shape)
for i in (range(len(image)-1)):
for j in range(len(image[0])):
output[i,j] = abs(int(image[i+1,j]) - int(image[i,j]))
return output
def computeGradient(image, kernel):
# use 3x3 Kernel size for processing
output = np.zeros(image.shape)
sumKernel = 0
for i in range(len(kernel)):
for j in range(len(kernel[0])):
sumKernel = float(abs(sumKernel + kernel[i,j]))
normalizedKernel = np.copy(kernel)
for i in range(len(kernel)):
for j in range(len(kernel[0])):
normalizedKernel[i,j] = kernel[i,j] / sumKernel
for i in range(1,(len(image)-1)):
for j in range(1,(len(image[0]-1))):
for u in range(-1,2):
for v in range(-1,2):
output[i,j] = normalizedKernel[u,v] * image[i+u,j+v]
output = np.copy(image)
output[:,len(image)] = 0
return output
#output(i,j) = np.dot(image(i,j), kernel)
def edgeDetection(image):
edges = cv2.Canny(image,100,200)
return edges
| true |
8f7f866128e53c2839943ac676c363c0de88ad11 | Python | discipleartem/exercises_book_1 | /list/list-3-7-change-list-3.py | UTF-8 | 2,696 | 3.5625 | 4 | [] | no_license | # 3-7. Сокращение списка гостей: только что выяснилось, что новый обеденный стол привез-
# ти вовремя не успеют, и места хватит только для двух гостей.
# • Начните с программы из упражнения 3-6. Добавьте команду для вывода сообщения
# о том, что на обед приглашаются всего два гостя.
# • Используйте метод pop() для последовательного удаления гостей из списка до тех
# пор, пока в списке не останутся только два человека. Каждый раз, когда из списка
# удаляется очередное имя, выведите для этого человека сообщение о том, что вы со-
# жалеете об отмене приглашения.
# • Выведите сообщение для каждого из двух человек, остающихся в списке. Сообщение
# должно подтверждать, что более раннее приглашение остается в силе.
# • Используйте команду del для удаления двух последних имен, чтобы список остался
# пустым. Выведите список, чтобы убедиться в том, что в конце работы программы
# список действительно не содержит ни одного элемента.
guests_list = ['Kolya S.', 'Andrew&Marina', 'Viktor', 'Kirill', 'Dima Ch.', 'Jabber', 'Konstantin']
print('Упс ... новый стол не приехал и у нас всего 2 места ...')
while len(guests_list) > 2:
rejected_guest = guests_list.pop()
print('Простите {}, но из-за технических причин, я более не смогу Вас принять'.format(rejected_guest))
print()
print('Дорогой {}, не смотря на различные неприятности при подготовке банкета,'
' я все же желаю Вас видеть на нем'.format(guests_list[0]))
print('Дорогой {}, не смотря на различные неприятности при подготовке банкета,'
' я все же желаю Вас видеть на нем'.format(guests_list[1]))
del guests_list[1]
del guests_list[0]
print()
print('Список гостей должен быть пуст')
print(guests_list) | true |
4197d469b0500a029af4d067ef2ca1ff9f041caa | Python | martraj/CECS451-NQueens | /n-queens.py | UTF-8 | 10,876 | 3.546875 | 4 | [] | no_license | '''
CECS 451: Artificial Intelligence
Assignment 4: N-Queens Solver
'''
import sys
import random
import math
import matplotlib.pyplot as plt
import statistics
class Encoding:
def __init__(self, encoding):
self.encoding = encoding
self.fitness = 0
self.probability = 0
def get_Fitness(self):
return self.fitness
def get_Encoding(self):
return self.encoding
def get_Probability(self):
return self.probability
def set_Encoding(self, e):
self.encoding = e
def set_Fitness(self, f):
self.fitness = f
def set_Probability(self, p):
self.probability = p
def gen_rand_board(numQueens):
board = []
row = []
for i in range(numQueens):
for j in range(numQueens):
row.append('-')
board.append(row)
for i in range(numQueens):
for j in range(numQueens):
print(board[i][j], end=' ')
print('\n')
def display_results(numQueens, encoding):
#populate chessboard with queens
board = []
row = []
encoding_str = encoding.get_Encoding()
#print('encoding string')
#print(encoding_str)
for i in range(numQueens): #i = rows
queen_col = int(encoding_str[i])
for j in range(numQueens): #j = columns
if j == queen_col:
row.append('X')
else:
row.append('-')
board.append(row)
row =[]
for i in range(numQueens):
for j in range(numQueens):
print(board[i][j], end=' ')
print('\n')
def gen_encodings(numQueens, numStates):
encodings = [] #holds all Encoding objects
#generates k unique encodings and stores them in the encodings list
for k in range(numStates):
generated_encoding_list = random.sample(range(numQueens), numQueens) #generates a list of n unique nums
encoding_string = ""
for i in generated_encoding_list:
encoding_string += str(generated_encoding_list[i])
encodings.append(Encoding(encoding_string))
return encodings
def gen_probabilities(encodings):
#calculating the probability of selection for each encoding using
#cost = encoding_i's fitness / summation of fitnesses
denominator = 0.0001
for e in encodings:
f = fitness_func(e.get_Encoding())
e.set_Fitness(f)
denominator += f
for e in encodings:
prob = e.get_Fitness() / denominator
e.set_Probability(prob)
#print(prob)
return sort_encodings(encodings)
def sort_encodings(encodings):
#sort encodings by decreasing fitness
sorted_encodings = []
for i in range(len(encodings)):
temp_encoding = max(encodings, key=lambda item: item.probability) #gets the encoding w/ max prob
index = encodings.index(temp_encoding) #gets the index of max
del encodings[index] #deletes it from encodings
sorted_encodings.append(temp_encoding)
return sorted_encodings
def local_search(queenStr): # finds attacking queens
numQ = len(queenStr)
numAttack = 0
for i in range(numQ):
for j in range(i+1, numQ):
# find pairs of attacking queens in same row
if queenStr[i] == queenStr[j]:
numAttack += 1
# find pairs of attacking queens in same diagonal
if abs(i-j) == abs(int(queenStr[i]) - int(queenStr[j])):
numAttack += 1
# do not need to count attacking columns because each index in the
# string is a column, they will never be in the same index/column
return numAttack
def selection(numQueens, encodings):
#implementation of stochastic universal sampling
next_gen = [] #encodings selected for next generation
num_pointers = math.floor((numQueens*.75)) #selects .75*numQueens encodings for next population
#print("number of pointers")
#print(num_pointers)
point_distance = 1/num_pointers #distance separating each psointer
#print("distance between pointers")
#print(point_distance)
start_loc = random.uniform(0, point_distance) #get starting point of 1st pointer
#print("start location")
#print(start_loc)
index = 0
sum_sel = encodings[index].get_Probability()
#print("starting sum first prob")
#print(sum_sel)
#locates which encoding each pointer is located in
for i in range(num_pointers):
pointer = i*point_distance + start_loc # position of pointer
#print("pointer " + str(i) + "i s at pos: " + str(pointer))
if pointer <= sum_sel: #point is located in this encoding
next_gen.append(encodings[index])
#print("encoding used")
#print(encodings[index].get_Encoding())
else: #need to locate the encoding the pointer is in
index+=1
for j in range(index, len(encodings)):
sum_sel += encodings[j].get_Probability()
#print("current position of sum")
#print (sum_sel)
if pointer <= sum_sel:
#print("encoding selected ")
#print(encodings[j].get_Encoding())
next_gen.append(encodings[j])
break;
index = j
'''
print("selected encodings")
for e in next_gen:
print(e.get_Encoding())
'''
if sum_sel == 0:
return encodings
return next_gen
def crossover(next_gen, numStates):
crossover_gen = []
while(len(crossover_gen) < numStates):
# parent1 and parent2 is a list from next_gen selection list (selection population)
parent1 = random.choice(next_gen).get_Encoding()
parent2 = random.choice(next_gen).get_Encoding()
#print("Crossing {} & {}".format(parent1, parent2))
#choose random position to cross ( range 0 -parents DNA size)
cross_point = random.randint(0, len(parent1)-1)
#Slicing(start, stop) stop at stop - 1
kid1 = parent1[:cross_point] + parent2[cross_point:]
kid2 = parent2[:cross_point] + parent1[cross_point:]
if (len(crossover_gen) + 1 == numStates):
crossover_gen.append(Encoding(kid1))
else:
crossover_gen.append(Encoding(kid1))
crossover_gen.append(Encoding(kid2))
return crossover_gen
def mutation(encodings):
mut_gen = []
for e in encodings:
length = len(e.get_Encoding())
rand1 = random.randrange(length)
rand2 = random.randrange(length)
#print("swap at idx {} & idx {}".format(rand1, rand2))
newStr = list(e.get_Encoding())
temp = newStr[rand1]
newStr[rand1] = newStr[rand2]
newStr[rand2] = temp
uniqueStr = []
for i in range(len(newStr)):
if newStr[i] not in uniqueStr:
uniqueStr.append(newStr[i])
else:
r = random.randrange(length)
while(str(r) in uniqueStr):
r = random.randrange(length)
uniqueStr.append(str(r))
e.set_Encoding("".join(uniqueStr))
mut_gen.append(e)
return mut_gen # return the mutated string
def ncr(a, b):
return math.factorial(a)/(math.factorial(b)*math.factorial(a-b))
def fitness_func(queenStr):
# calculate the total combinations - ncr numQ & 2
totalComb = ncr(len(queenStr), 2)
# subtract and return
return totalComb - local_search(queenStr)
def calculate_stats(steps):
sorted_steps = sorted(steps)
sum_steps = sum(steps)
len_steps = len(steps)
if len_steps != 0:
average = sum_steps/len_steps
median = (statistics.median(steps))
minimum = min(steps)
maximum = max(steps)
print("Average: ", average)
print("Median: ", median)
print("Minimum: ", minimum)
print("Maximum: ", maximum)
def nqueens_solver(numQ, numS):
numQ = int(numQ)
numS = int(numS)
goal = ncr(numQ, 2)
steps = []
#Run 100 times for some number of k
for i in range(100):
step = 0
encodings = gen_encodings(numQ, numS)
goal = ncr(numQ, 2)
encoding_answer = Encoding("")
not_found = True
'''
print("current encodings")
for e in encodings:
print (e.get_Encoding())
'''
i = 0
while i<1000:
encodings = gen_probabilities(encodings)
'''
print("Goal: ", goal)
for e in encodings:
print("{} Fitness: {}".format(e.get_Encoding(), e.get_Fitness()))
'''
#print("fitness for encoding " + str(e.get_Encoding()) + "is " + str(e.get_Fitness()))
if (encodings[0].get_Fitness() == goal):
encoding_answer = encodings[0]
not_found = False
break
next_gen = selection(numQ, encodings)
crossover_gen = crossover(next_gen, numS)
mut_gen = mutation(crossover_gen)
step += 1
#print("Step ", step)
#print()
encodings = mut_gen
i+=1
#add number of steps
steps.append(step)
#encoding_answer = recursive(encodings, step, numQ, numS)
if encoding_answer.get_Encoding() is not "":
display_results(numQ, encoding_answer)
else:
print("No solution found.")
histogram(numS, steps)
calculate_stats(steps)
def histogram(k, steps):
bins = [i for i in range (26)]
#bins = [0,1,2,3,4,5,6,7,8,9,10,11,12,13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
fig = plt.figure()
plt.hist(steps, bins, histtype='bar', rwidth=0.8)
plt.xlabel('# of Steps (x)')
plt.ylabel('# of Iterations Performing x Steps')
plt.title('Genetic Algorithm for k=' + str(k))
plt.legend()
plt.show()
fig.savefig('histogram.png')
#nqueens_solver(8, 4)
#main_function(4,2)
#nqueens_solver(5, 10)
#----------MAIN----------
nqueens_solver(sys.argv[1], sys.argv[2])
| true |
24be1039f10217a5a3d62d6197523b5a40850bfc | Python | kkmonlee/Programming-Contests | /HackerRank/Implementation/FindDigits.py | UTF-8 | 168 | 3.640625 | 4 | [] | no_license | def findDigits(n):
count = 0
digits = str(n)
for digit in digits:
if digit != '0' and n % int(digit) == 0:
count += 1
return count | true |
263071797d91397e29da32bb604c4a71a2eb8093 | Python | mariohsouto/cvxpy_docker | /main.py | UTF-8 | 674 | 3.078125 | 3 | [
"MIT"
] | permissive | import cvxpy as cp
import numpy as np
if __name__ == "__main__":
# Problem data.
m = 30
n = 20
np.random.seed(1)
A = np.random.randn(m, n)
b = np.random.randn(m)
# Construct the problem.
x = cp.Variable(n)
objective = cp.Minimize(cp.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = cp.Problem(objective, constraints)
# The optimal objective value is returned by `prob.solve()`.
result = prob.solve()
# The optimal value for x is stored in `x.value`.
print(x.value)
# The optimal Lagrange multiplier for a constraint is stored in
# `constraint.dual_value`.
print(constraints[0].dual_value) | true |
76d8aef7d9d7cc1af5f0ad62e42001e0e995b8f8 | Python | poojachinnikrishnan/Python-Intro-1 | /Problem1.py | UTF-8 | 31 | 3.1875 | 3 | [] | no_license | a=3
b=5
c=a*a+b*b
print(c)
| true |
f8f5e1dc841d3ec8ff8e7fcd0431a666663601f6 | Python | ezekielp/algorithms-practice | /insertInterval.py | UTF-8 | 1,926 | 3.15625 | 3 | [] | no_license | # [[5, 8], [12, 18]] // [1, 3] // [10, 11] // [20, 24] // [3, 28] // [4, 15] // [6, 14]
def insert(self, intervals, newInterval): # [4, 15]
res, n = [], newInterval # [], [4, 18]
for index, i in enumerate(intervals): # 1, [12, 18]
if i.end < n.start:
res.append(i)
elif n.end < i.start:
res.append(n)
return res+intervals[index:] # can return earlier
else: # overlap case
n.start = min(n.start, i.start)
n.end = max(n.end, i.end)
res.append(n)
return res
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
if not intervals: return [newInterval]
left = self.bisearch(intervals, newInterval[0], 'l')
right = self.bisearch(intervals, newInterval[1], 'r')
newLeft = newRight = None
if left[1] == 'merge':
newLeft = intervals[left[0]][0]
else:
newLeft = newInterval[0]
def bisearch(self, intervals, target, side):
lo, hi = 0, len(intervals) - 1
while lo != hi:
mid = (hi + lo) // 2
interval = intervals[mid]
if target == interval[0] or target == interval[1] or (target > interval[0] and target < interval[1]):
lo, hi = mid, mid
else:
if target < interval[0]: hi = mid - 1
elif: target > interval[1]: lo = mid
i = intervals[lo]
if target == i[0] or target == i[1] or (target > i[0] and target < i[1]):
return (lo, 'merge')
else:
# if side == 'l':
if target < i[0]: return (lo, 'insert_before')
elif target > i[1]: return (lo, 'insert_after')
# else: # side == 'r'
# [[5, 8]] // [1, 3] // [10, 21]
| true |
b1dcccf204dfba4b1940fa7cf7f2d0ed5f89d526 | Python | Rajeevaletshanth/Applicant-Ranking | /3.Text Preprocessing.py | UTF-8 | 4,960 | 3.015625 | 3 | [] | no_license | import nltk
from nltk.corpus import stopwords
import spacy
import gensim
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from string import punctuation
from collections import Counter
import re
import unidecode
res = open("ResumeStr.txt").read()
#Lower case conversion
def lower_case_convertion(text):
"""
Input :- string
Output :- lowercase string
"""
lower_text = text.lower()
return lower_text
res = lower_case_convertion(res)
# HTML tags removal Implementation using regex module
def remove_html_tags(text):
"""
Return :- String without Html tags
input :- String
Output :- String
"""
html_pattern = r'<.*?>'
without_html = re.sub(pattern=html_pattern, repl=' ', string=text)
return without_html
res = remove_html_tags(res)
# Implementation of Removing URLs using python regex
def remove_urls(text):
"""
Return :- String without URLs
input :- String
Output :- String
"""
url_pattern = r'https?://\S+|www\.\S+'
without_urls = re.sub(pattern=url_pattern, repl=' ', string=text)
return without_urls
res = remove_urls(res)
# Implementation of accented text to ASCII converter in python
def accented_to_ascii(text):
"""
Return :- text after converting accented characters
Input :- string
Output :- string
"""
# apply unidecode function on text to convert
# accented characters to ASCII values
text = unidecode.unidecode(text)
return text
res = accented_to_ascii(res)
## Implementation of lemmatization using nltk
def lemmatization(text):
"""
Result :- string after stemming
Input :- String
Output :- String
"""
# word tokenization
tokens = nltk.word_tokenize(text)
for index in range(len(tokens)):
# lemma word
lemma_word = lemma.lemmatize(tokens[index])
tokens[index] = lemma_word
return ' '.join(tokens)
# initialize lemmatizer object
lemma = WordNetLemmatizer()
res = lemmatization(res)
# Implementation of removing punctuations using string library
def remove_punctuation(text):
"""
Return :- String after removing punctuations
Input :- String
Output :- String
"""
return text.translate(str.maketrans('', '', punctuation))
res = remove_punctuation(res)
# Implementation of removing stopwords using all stop words from nltk, spacy, gensim
def remove_stopwords(text):
"""
Return :- String after removing stopwords
Input :- String
Output :- String
"""
text_without_sw = []
# tokenization
text_tokens = nltk.word_tokenize(text)
for word in text_tokens:
# checking word is stopword or not
if word not in all_stopwords:
text_without_sw.append(word)
# joining all tokens after removing stop words
without_sw = ' '.join(text_without_sw)
return without_sw
# list of stopwords from nltk
stopwords_nltk = list(stopwords.words('english'))
sp = spacy.load('en_core_web_sm')
# list of stopwords from spacy
stopwords_spacy = list(sp.Defaults.stop_words)
# list of stopwords from gensim
stopwords_gensim = list(gensim.parsing.preprocessing.STOPWORDS)
# unique stopwords from all stopwords
all_stopwords = []
all_stopwords.extend(stopwords_nltk)
all_stopwords.extend(stopwords_spacy)
all_stopwords.extend(stopwords_gensim)
# all unique stop words
all_stopwords = list(set(all_stopwords))
print(f"Total number of Stopwords :- {len(all_stopwords)}")
res = remove_stopwords(res)
# Removing Extra Whitespaces
def remove_extra_spaces(text):
"""
Return :- string after removing extra whitespaces
Input :- String
Output :- String
"""
space_pattern = r'\s+'
without_space = re.sub(pattern=space_pattern, repl=" ", string=text)
return without_space
res = remove_extra_spaces(res)
# Implementation of Removing numbers using python regex
def remove_numbers(text):
"""
Return :- String without numbers
input :- String
Output :- String
"""
number_pattern = r'\d+'
without_number = re.sub(pattern=number_pattern,
repl=" ", string=text)
return without_number
# calling remove_numbers function with example text (ex_numbers)
res = remove_numbers(res)
## Implementation of lemmatization using nltk
def lemmatization(text):
"""
Result :- string after stemming
Input :- String
Output :- String
"""
# word tokenization
tokens = nltk.word_tokenize(text)
for index in range(len(tokens)):
# lemma word
lemma_word = lemma.lemmatize(tokens[index])
tokens[index] = lemma_word
return ' '.join(tokens)
# initialize lemmatizer object
lemma = WordNetLemmatizer()
res = lemmatization(res)
# Program without using any external library
l = res.split()
k = []
for i in l:
# If condition is used to store unique string
# in another list 'k'
if (res.count(i) > 1 and (i not in k) or res.count(i) == 1):
k.append(i)
res = ' '.join(k)
print(res)
with open("ClearText.txt", "w") as text_file:
print(res, file=text_file) | true |
36b801f7d3bebe3cf7d4c408206322bd0f55a400 | Python | zhang4ever/LeetCode | /ReverseLinkedList.py | UTF-8 | 1,717 | 4.25 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @File : ReverseLinkedList.py
# @Time : 2018-09-02 22:01
# @Author : zhang bo
# @Note : Reverse Linked List
"""
"""
题目描述:Reverse a singly linked list.
示例:Input: 1->2->3->4->5->NULL; Output: 5->4->3->2->1->NULL
限制条件:A linked list can be reversed either iteratively or recursively.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
# 使用递归
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head
reversedList = self.reverseList(head.next)
head.next.next = head
head.next = None
return reversedList
# 使用迭代的思路
def reverseListIterately(self, head):
if head is None:
return
dummy = ListNode(0)
dummy.next = head
pre = dummy
start = pre.next
then = start.next
while then:
start.next = then.next
then.next = pre.next
pre.next = then
then = start.next
return dummy.next
def printList(self, head):
while head:
print(head.val, end='->')
head = head.next
if __name__ == '__main__':
solution = Solution()
node1, node2, node3 = ListNode(1), ListNode(2), ListNode(3)
node4, node5 = ListNode(4), ListNode(5)
node1.next, node2.next = node2, node3
node3.next, node4.next = node4, node5
res = solution.reverseListIterately(node1)
solution.printList(res) | true |
3919ac9580e2e7a11d360f3023d1387ce9e1885e | Python | logeion/logeion-backend | /parsers/parsers.old/hq.py | UTF-8 | 2,075 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import re, unicodedata as ud
from glob import glob
name = 'HansenQuinn'
type = 'sidebar'
caps = 'precapped'
split = ['οὐ, οὐκ, οὐχ', 'εἷς, μία, ἕν', 'ἐκ, ἐξ', 'οὕτω, οὕτως',
'τίς, τί ', 'τις, τι', 'τρεῖς, τρία']
# Cheap function that determines if a head is a certain entry
def insplit(head):
for each in split:
if each in head: return True
return False
# Removes excess chars and tags, and splits headwords into separate
# entries where appropriate
def cleanup(head):
head = unicode(head)
newhead = ''
for char in head:
charname = ud.name(char)
newcharname = charname.replace(' WITH MACRON', '')
newchar = ud.lookup(newcharname)
newhead += newchar
if not insplit(newhead): # Takes x of "x, y, z, ..." for headword
try:
newhead = [newhead.split(',')[0]]
except(IndexError):
newhead = newhead.split(',')
else: # Takes x, y, and z as separate headwords
newhead = newhead.split(',')
if len(newhead) == 1 and re.search('[()]', newhead[0]):
newhead[0] = re.sub('\(.*', '', newhead[0])
return newhead
# Main method
def parse(dico_path):
dico_data = sorted(glob(dico_path+'/HQvocab'))
dico = []
tobelogged = {'warning': [], 'info': []}
for xmlfile in dico_data:
for line in open(xmlfile):
try:
(head, content, chapter) = line.split('\t')
content = '%s, %s' % (head, content)
head = cleanup(head)
for each in head:
attrs = {'head': each.strip(), 'content': content, 'chapter': chapter}
dico.append(attrs)
except(Exception), e:
tobelogged['warning'].append("%s couldn't parse line \"%s\"...: %s" \
% (xmlfile.split('/')[-1], content[:50], e))
tobelogged['info'].append('%s finished parsing' % xmlfile.split('/')[-1])
return dico, tobelogged
| true |
e1a7d1037a98cb8cc6c0d41bd989c14dbe4d76b4 | Python | drstrange11/code | /player_18.py | UTF-8 | 306 | 2.984375 | 3 | [] | no_license | #AJ_18
from collections import Counter
string="kabali"
n=int(input())
k=[]
count=0
if n>=1 and n<=1000:
for i in range(n):
k.append((input()))
for val in k:
if Counter(val)==Counter(string):
count=count+1
print(count)
| true |
83f2ed51e008cbd6f6f2ab574397470aa29917bc | Python | elkira01/Association-Rules-PHP | /symbols.py | UTF-8 | 4,873 | 3.734375 | 4 | [] | no_license | #---------In this module, we designed tools for buiding a symbols set like structure
from utils import arr,split
import random
class SetException(Exception):
def __init__(self,msg) -> None:
"""Exception class raised relatively to the symbols base set"""
super().__init__(self)
self.msg=msg
def __str__(self):
return self.msg
class Superset:
def __init__(self,s={}) -> None:
"""A built-in set like struture witch provide a way to store set as element """
self.set=list()
def add_set(self,s):
if s not in self.set:
self.set.append(s)
def display(self):
print(self.set)
def contains(self,s):
for elt in self.set:
if elt == s:
return True
return False
class _Set:
def __init__(self,sym=set()):
"""Initialyse the symbols set or alphabet's symbols, then storing them inside a python built-in immutable set"""
try:
self.symbols=sym
if not self.is_atomic():
raise SetException('Atomicity of symbols not verified: \n some symbols are formed using others')
finally:
self.card=len(self.symbols)
def get_symbols(self):
return self.symbols
def contained(self,s):
"""Check if the given character belongs to the set"""
if s in self.symbols:
return True
else:
return False
def to_list(self):
l=list()
for s in self.symbols:
l.append(s)
return l
def is_atomic(self):
"""Checking the atomicity of each symbols contained in the set"""
symbols=set()
for e in self.symbols:
if not e=='':
symbols.add(e)
for s in symbols: #unicity first
count=0
for e in symbols:
if s==e:
count+=1
if count!=1:
return False
else:
continue
temp=symbols.copy()
for s in symbols:
temp.remove(s)
for e in temp:
if s in e:
return False
else:
continue
temp=symbols.copy()
return True
def subset(self):
trans=Superset()
l=self.to_list()
long=len(l)
for elt in l:
trans.add_set(set({elt}))
for card in range(2,long+1):
i=1
while i<=arr(long,card):
sub=set()
j=1
while j<=card:
while len(sub)<j and sub in trans.set:
pos=random.randrange(0,len(l))
sub.add(l[pos])
j+=1
trans.add_set(sub)
print(sub)
print('\n')
i+=1
trans.set.remove(set())
trans.add_set({})
return trans
A=_Set({'a','b','c','d','e','f','g'})
partition = A.subset()
print("--------------------------------------------------------------")
partition.display()
print(len(partition.set))
class Word:
def __init__(self,str='',alphabet=_Set()) -> None:
"""Class defining a 'word' readable by an Automate object"""
try:
self.symbols=[None for i in range(0,len(str))]
self.str=str
self.chars=split(str)
if not self.is_from_alphabet(alphabet):
raise SetException("This Word object was not built using the provided alphabet...")
finally:
pass
def __str__(self) -> str:
return self.str
def get_lenght(self):
return len(self.symbols)
def set_symbols_from(self,sigma=_Set())-> None:
"""Class procedure to split a word into elements of base alphabet sigma"""
m=len(self.chars)
_s=[]
symbols=set()
for e in sigma.symbols:
"ignoring the empty word"
if not e == '':
symbols.add(e)
for s in symbols:
_s=split(s)
n=len(s)
for i in range(0,m-n+1):
if _s==self.chars[i:i+n]:
self.symbols[i]=s
i=+1
"""Eliminating None values from the list object self.symbols"""
temp=[]
for elt in self.symbols:
if elt==None:
pass
else:
temp.append(elt)
self.symbols=temp
def is_from_alphabet(self,sigma=_Set()):
self.set_symbols_from(sigma)
if len("".join(self.symbols)) < len(self.str):
return False
return True
| true |
d6c203119767829fd12e92ff83912d9ca9491564 | Python | lironghuo/human-pose-estimation.pytorch | /lib/core/loss.py | UTF-8 | 4,570 | 2.546875 | 3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from torch.autograd import Variable
import scipy.ndimage.filters as fi
class JointsMSELoss(nn.Module):
def __init__(self, use_target_weight, heatmap_size):
super(JointsMSELoss, self).__init__()
self.criterion = nn.MSELoss(size_average=True)
self.use_target_weight = use_target_weight
self.col = float(heatmap_size)
self.scale = 224./float(self.col)
self.gaussian = 1.0
def min_max(self, x, axis=None):
min = x.min(axis=axis, keepdims=True)
max = x.max(axis=axis, keepdims=True)
result = (x-min)/(max-min)
return torch.Tensor(result)
def checkMatrix(self, xi, yi):
f = False
if xi >= 0 and xi <= self.col - 1 and yi >= 0 and yi <= self.col - 1:
f = True
return xi, yi, f
def forward(self, offset, heatmap, target, target_weight, meta, isValid=False, useOffset=False):
batch_size = heatmap.size(0)
num_joints = heatmap.size(1)
joints = meta['joints']
joints_vis = meta['joints_vis']
joints = joints[:, :, :2].float().cuda()
joints_vis = joints_vis[:, :, :2].float().cuda()
x = Variable(torch.zeros(joints.size()).float(), requires_grad=True).cuda()
'''
heatmaps_pred = heatmap.reshape((batch_size, num_joints, -1)).split(1, 1)
heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)
loss = 0
for idx in range(num_joints):
heatmap_pred = heatmaps_pred[idx].squeeze()
heatmap_gt = heatmaps_gt[idx].squeeze()
if self.use_target_weight:
loss += 0.5 * self.criterion(heatmap_pred.mul(target_weight[:, idx]), heatmap_gt.mul(target_weight[:, idx]))
else:
loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)
d1 = loss / num_joints
'''
reshaped = heatmap.view(-1, num_joints, int(self.col*self.col))
_, argmax = reshaped.max(-1)
yCoords = argmax/self.col
xCoords = argmax - yCoords*self.col
s = heatmap.size()
tt = torch.zeros(s).float()
ti = joints/self.scale
for i in range(batch_size):
for j in range(num_joints):
#if h[i, j, yCoords[i, j], xCoords[i, j]] > 0.5:
x[i, j, 0] = (offset[i, j, yCoords[i, j], xCoords[i, j]] + xCoords[i, j].float()) * self.scale
x[i, j, 1] = (offset[i, j + num_joints, yCoords[i, j], xCoords[i, j]] + yCoords[i, j].float()) * self.scale
if int(target_weight[i, j, 0]) >= 0.5:
xi, yi, f = self.checkMatrix(int(ti[i, j, 0]), int(ti[i, j, 1]))
if f == True:
# 正規分布に近似したサンプルを得る
# 平均は 100 、標準偏差を 1
tt[i, j, yi, xi] = 1
tt[i, j] = self.min_max(fi.gaussian_filter(tt[i, j], self.gaussian))
else:
target_weight[i, j, 0] = 0
#target_weight[i, j, 1] = 0
diff1 = heatmap - target
'''
cnt = 0
for i in range(batch_size):
for j in range(num_joints):
if int(target_weight[i, j, 0]) == 0:
diff1[i, j] = diff1[i, j]*0
else:
cnt = cnt + 1
diff1 = diff1.view(-1)
d1 = diff1.dot(diff1) / cnt
'''
diff1 = diff1.view(-1)
d1 = diff1.dot(diff1) / (batch_size*num_joints)
if useOffset == False:
return d1, x, tt, target_weight
diff2 = (x - joints)
'''
diff2 = diff2*joints_vis/112.
N2 = (joints_vis.sum()).data[0]/2.0
diff2 = diff2.view(-1)
d2 = 0.5 * torch.sqrt(diff2.dot(diff2))/N2
'''
diff2 = diff2.view(-1)
d2 = 0.5 * torch.sqrt(diff2.dot(diff2))/(batch_size*num_joints)
return d1 + d2, x, tt, target_weight
| true |
a95e05a272e128df0d7744688aac0c8fdf4efdb1 | Python | Araujoch/hips-1 | /base_de_datos/verificar_archivos_binarios.py | UTF-8 | 2,904 | 2.921875 | 3 | [] | no_license | import os
from principal import cur, conn
import sys
sys.path.append('./herramientas/')
import crear_csv, enviar_mail, bloquear_ip, escribir_log
passwd_dir = "/etc/passwd"
shadow_dir = "/etc/shadow"
directorios = [passwd_dir, shadow_dir]
def comparar_md5sum(dirs):
lista_csv=[]
hubo_modificacion = False
files = [] # guardamos en una lista de diccionarios los md5sum actuales del sistema
cuerpo_mail= ''
# Obtenemos los md5sum actuales del sistema y guardamos en una lista
for dir in dirs:
command = f"md5sum {dir}" #+ " | awk '{print $1}'"
file_with_md5sum = os.popen(command).read().split() # Guardamos en una lista el nombre del archivo y su md5sum
if file_with_md5sum:
file = {
"file_name" : file_with_md5sum[1],
"md5sum" : file_with_md5sum[0]
}
files.append(file)
# print(files)
# Comparamos los md5sum actuales con los md5sum de la base de datos.
for file in files:
# Buscamos en la base de datos el md5sum guardado
cur.execute(f"SELECT * FROM file WHERE file_name='{file['file_name']}'")
query = cur.fetchall() # Guardamos en query lo que retorno el comando de BD
print(query)
# comparacion si los md5sum siguen iguales o no
if file['md5sum'] == query[0][2]: # query[0][2] el [0] porque query es una lista de un elemento, y el [2] porque es una tupla de 3 elementos y en la posicion 2 esta el md5sum que se guardo en la BD
print("No hubo modificacion en", file['file_name'])
file['se modifico'] = 'NO'
else:
hubo_modificacion = True
file['se modifico'] = 'SI'
print("Hubo modificacion! en", file['file_name'])
md5sum_tmp = file['md5sum']
file_name_tmp = file['file_name']
cur.execute(f"UPDATE file SET md5sum = '{md5sum_tmp}' WHERE file_name = '{file_name_tmp}'; ")
conn.commit()
escribir_log.escribir_log(alarmas_o_prevencion='alarmas', tipo_alarma='md5sum_distinto',ip_o_email=file['file_name'], motivo='Se encontro que el archivo mencionado tuvo modificaciones')
cuerpo_mail = cuerpo_mail + f"\nEl archivo {file['file_name']} tuvo modificaciones, se paso a actualizar el md5sum en la base de datos.\n"
lista_csv.append(file)
if hubo_modificacion:
enviar_mail.enviar_mail_asunto_body(tipo_alerta='ALERTA!', asunto='CAMBIOS MD5SUM', cuerpo=cuerpo_mail)
crear_csv.write_csv(
carpeta='base_de_datos',
nombre_archivo='verificar_archivos_binarios',
headers_list=['Nombre de Archivo', 'md5sum', 'se modifico?'],
lista=lista_csv,
mensaje='Si el md5sum se modifico se alerto al administrador por mail y tambien se escribio un registro en alarmas.log'
)
comparar_md5sum(directorios)
| true |
8bdb89ee4cbd9a9038324697c3218efad8029dd5 | Python | Lovely-Professional-University-CSE/int247-machine-learning-project-2020-kem031-sumant_42 | /work.py | UTF-8 | 1,165 | 3.09375 | 3 | [] | no_license | from sklearn import datasets
import pandas as pd
import numpy as np
context="""This dataset contains complete information about
various aspects of crimes happened in India from 2001.
There are many factors that can be analysed from this dataset. Over all,
I hope this dataset helps us to understand better about India."""
insp='''
There could be many things one can understand by analyzing this dataset. Few inspirations for you to start with.
1.What is the major reason people being kidnapped in each and every state?
2.Offenders relation to the rape victim
3.Juveniles family background, education and economic setup.
4.Which state has more crime against children and women?
5.Age group wise murder victim
6.Crime by place of occurrence.
7.Anti corruption cases vs arrests.
8.Which state has more number of complaints against police?
9.Which state is the safest for foreigners?'''
#loading data from our csv file
data_rape=pd.read_csv('data\Victims_of_rape.csv',delimiter=',')
#defining the functions to get the data from our files
def get_col_rape():
return data_rape.columns
def get_dataR_head():
return data_rape
| true |
f3977683cd6d7dcb21541513139a7bf7279bc3ec | Python | ihabChaker/sshi | /matrix.py | UTF-8 | 9,356 | 3.71875 | 4 | [] | no_license | # TODO: Make custom exceptions
# TODO: You shouldn't be able to delete a single element from a row, only full rows and columns
from random import randint
from copy import deepcopy
class Matrix(object):
def __init__(self, rows, columns):
self.rows = rows
self.columns = columns
self.matrix = []
for i in range(rows):
self.matrix.append([]) # Initialize empty rows
for row in self.matrix:
for i in range(columns):
row.append(0) # Fill the rows with 0s
def __repr__(self):
'''Print the matrix row after row.'''
rep = ""
for row in self.matrix:
rep += str(row)
rep += "\n"
return rep.rstrip()
def __getitem__(self, key):
return self.matrix[key]
def __setitem__(self, key, value):
if isinstance(value, list):
self.matrix[key] = value
else:
raise TypeError(
"A matrix object can only contain lists of numbers")
return
def __delitem__(self, key):
del(self.matrix[key])
self.rows = self.rows - 1
return
def __contains__(self, value):
for row in self.matrix:
for element in row:
if element == value:
return True
else:
pass
return False
def __eq__(self, otherMatrix):
if isinstance(otherMatrix, Matrix):
if (self.rows != otherMatrix.rows) or (self.columns != otherMatrix.columns):
return False # They don't have the same dimensions, they can't be equal
for row in range(self.rows): # Check the elements one by one
for column in range(self.columns):
if self.matrix[row][column] != otherMatrix[row][column]:
return False
return True
else:
return False
def __ne__(self, otherMatrix):
# Check for equality and reverse the result
return not self.__eq__(otherMatrix)
def __add__(self, otherMatrix):
'''Add 2 matrices of the same type.'''
return self.__add_or_sub(otherMatrix, "add")
def __sub__(self, otherMatrix):
'''Subtracts otherMatrix from self.'''
return self.__add_or_sub(otherMatrix, "sub")
def __mul__(self, secondTerm):
if isinstance(secondTerm, (int, float, complex)):
return self.__scalar_product(secondTerm)
elif isinstance(secondTerm, Matrix):
if self.columns == secondTerm.rows:
newMatrix = Matrix(self.rows, secondTerm.columns)
transposeMatrix = secondTerm.transpose()
'''
Matrix multiplication is done iterating through each column of the
second term. We calculate the transpose of the second matrix because
it gives us a list for each column, which is far easier to iterate
through.
'''
for row_self in range(self.rows):
for row_transpose in range(transposeMatrix.rows):
'''
The rows of the transpose correspond to the columns
of the original matrix.
'''
new_element = 0
for column_self in range(self.columns):
new_element += (self[row_self][column_self] *
transposeMatrix[row_transpose][column_self])
newMatrix[row_self][row_transpose] = new_element
return newMatrix
else:
raise Exception(
"Can't multiply (%d, %d) matrix with (%d, %d) matrix" %
(self.rows, self.columns, secondTerm.rows, secondTerm.columns)
)
else:
raise TypeError(
"Can't multiply a matrix by non-int of type " + type(secondTerm).__name__)
def __rmul__(self, secondTerm):
return self.__mul__(secondTerm)
def __scalar_product(self, number):
newMatrix = Matrix(self.rows, self.columns)
for row in range(self.rows):
for column in range(self.columns):
newMatrix[row][column] = self[row][column] * number
return newMatrix
def __add_or_sub(self, secondTerm, operation):
newMatrix = Matrix(self.rows, self.columns)
if isinstance(secondTerm, (int, float, complex)):
for row in range(self.rows):
for column in range(self.columns):
if operation == "add":
newMatrix[row][column] = self[row][column] + secondTerm
if operation == "sub":
newMatrix[row][column] = self[row][column] - secondTerm
elif isinstance(secondTerm, Matrix):
if (self.rows == secondTerm.rows) and (self.columns == secondTerm.columns):
for row in range(self.rows):
for column in range(self.columns):
if operation == "add":
newMatrix[row][column] = self[row][column] + \
secondTerm[row][column]
elif operation == "sub":
newMatrix[row][column] = self[row][column] - \
secondTerm[row][column]
else:
raise Exception("Invalid operation type")
else:
raise TypeError(
"Can't add or subtract (%d, %d) matrix with (%d, %d) matrix" %
(self.rows, self.columns, secondTerm.rows, secondTerm.columns)
)
else:
raise TypeError(
"Can only add or subtract a matrix with another matrix or a number")
return newMatrix
def is_square(self):
return self.rows == self.columns
def transpose(self):
newMatrix = Matrix(self.columns, self.rows)
for row in range(self.rows):
for column in range(self.columns):
# a(i,j) = a(j,i)
newMatrix[column][row] = self.matrix[row][column]
return newMatrix
def complement_matrix(self, rowToDelete, columnToDelete):
newMatrix = deepcopy(self)
del(newMatrix[rowToDelete])
for row in range(newMatrix.rows):
del(newMatrix[row][columnToDelete])
newMatrix.columns -= 1
return newMatrix
<<<<<<< HEAD
def inverse_matrix(self):
'''Return the inverse matrix.'''
det = self.determinant()
if det == 0:
raise Exception("Matrix not invertible")
else:
algebricComplementsMatrix = self.algebric_complements_matrix()
inverseMatrix = 1/det * algebricComplementsMatrix.transpose()
return inverseMatrix
def symmetric_part(self):
'''Return the symmetric part of the matrix.'''
newMatrix = 1/2 * (self + self.transpose())
return newMatrix
def antisymmetric_part(self):
'''Return the antisymmetric part of the matrix.'''
newMatrix = 1/2 * (self - self.transpose())
return newMatrix
=======
def algebric_complement(self, row, column):
complementMatrix = self.complement_matrix(row, column)
algebricComplement = (-1)**(row+column) * \
complementMatrix.determinant()
return algebricComplement
def determinant(self):
'''
Return the determinant.
This function uses Laplace's theorem to calculate the determinant.
It is a very rough implementation, which means it becomes slower and
slower as the size of the matrix grows.
'''
if self.is_square():
if self.rows == 1:
# If it's a square matrix with only 1 row, it has only 1 element
det = self[0][0] # The determinant is equal to the element
elif self.rows == 2:
det = (self[0][0] * self[1][1]) - (self[0][1] * self[1][0])
else:
# We calculate the determinant using Laplace's theorem
det = 0
for element in range(self.columns):
det += self[0][element] * self.algebric_complement(0, element)
return det
else:
raise TypeError("Can only calculate the determinant of a square matrix")
def algebric_complements_matrix(self):
'''Return the matrix of all algebric complements.'''
if self.is_square():
newMatrix = Matrix(self.rows, self.columns)
for row in range(self.rows):
for column in range(self.columns):
newMatrix[row][column] = self.algebric_complement(row, column)
return newMatrix
else:
raise TypeError("Algebric complements can only be calculated on a square matrix")
def random(self, lower=-5, upper=5):
'''Fill the matrix with random numbers (integers).'''
for row in self.matrix:
for i in range(self.columns):
row[i] = randint(lower, upper)
>>>>>>> complement
| true |
918c129524e322b39d60af45991f5f66c023dce7 | Python | Mabtoor-Mabx/Python_Tasks | /Assignment_3.py | UTF-8 | 811 | 4.3125 | 4 | [] | no_license | # Problem No 1
name = input("Enter Your Name \n")
print("Good After-Noon! Dear : " , name)
# Problem N0 2
letter= ''' Dear <|Name|> ! It is Very Pleasent To Inform You That
You Are Selected
Date : <|Date|>
'''
name = input("Enter Your Name \n" )
date = input("Enter Date \n")
letter = letter.replace("<|Name|>" , name)
letter = letter.replace("<|Date|>" , date)
print(letter)
# Problem 3
Name = "This is very Pleasent To Tell You That"
name = Name.find(" ")
print(name)
# Problem 4
name = "This Is Very Pleasent To Inform You That We Are Here"
name = name.replace(" ", " ")
print(name)
# Problem 5
Letter = "Dear Harry! This Python Course Is Nice. Thanks"
Letter_Format = "Dear Harry\nThis Python Course is Nice\nThanks"
print(Letter_Format) | true |
5358c6b15430be0b9654b5795b21cd0ab3d22005 | Python | limianscfox/Python_learn | /Python_Crash_Cours_2rd/8/8.6/test_8_15/printing_models.py | UTF-8 | 316 | 2.640625 | 3 | [] | no_license | from printing_functions import *
unprinted_designs = ['phone case', 'robot pendant', 'dodecahedron']
completed_models = []
print_models(unprinted_designs[:], completed_models)
show_completed_models(completed_models)
print("\nunprinted")
for unprinted_design in unprinted_designs:
print(f"{unprinted_design}") | true |
52add2705cc7730858a4c6bae9a4ec9ff71556dd | Python | Giantpizzahead/ctf-archive | /MetaCTF 2021/Crypto/picnic/rsa-weiner-attack/RSAwienerHacker.py | UTF-8 | 1,901 | 3.234375 | 3 | [] | no_license | '''
Created on Dec 14, 2011
@author: pablocelayes
'''
import ContinuedFractions, Arithmetic, RSAvulnerableKeyGenerator
def hack_RSA(e,n):
'''
Finds d knowing (e,n)
applying the Wiener continued fraction attack
'''
frac = ContinuedFractions.rational_to_contfrac(e, n)
convergents = ContinuedFractions.convergents_from_contfrac(frac)
for (k,d) in convergents:
#check if d is actually the key
if k!=0 and (e*d-1)%k == 0:
phi = (e*d-1)//k
s = n - phi + 1
# check if the equation x^2 - s*x + n = 0
# has integer roots
discr = s*s - 4*n
if(discr>=0):
t = Arithmetic.is_perfect_square(discr)
if t!=-1 and (s+t)%2==0:
print("Hacked!")
return d
# TEST functions
def test_hack_RSA():
print("Testing Wiener Attack")
times = 5
while(times>0):
e,n,d = RSAvulnerableKeyGenerator.generateKeys(1024)
print("(e,n) is (", e, ", ", n, ")")
print("d = ", d)
hacked_d = hack_RSA(e, n)
if d == hacked_d:
print("Hack WORKED!")
else:
print("Hack FAILED")
print("d = ", d, ", hacked_d = ", hacked_d)
print("-------------------------")
times -= 1
arr_n = []
arr_e = []
with open('publickeys.txt', 'r') as fin:
while True:
a = fin.readline()
b = fin.readline()
fin.readline()
if a:
n = int(a.split()[2], 16)
e = int(b.split()[2], 16)
arr_n.append(n)
arr_e.append(e)
else:
break
# MetaCTF{Oops_those_primes_are_not_that_randoM}
for i in range(len(arr_n)):
d = hack_RSA(arr_e[i], arr_n[i])
if d:
print(bytearray.fromhex(hex(d)[2:]).decode())
print(i, 'crack')
| true |
15a52227c872ceac13ea044cccbf22c68217ef11 | Python | DearMordor/spam_filter | /utils.py | UTF-8 | 255 | 2.890625 | 3 | [] | no_license | def read_classification_from_file(truth):
right_answers = {}
f = open(truth, 'r', encoding='utf-8')
for line in f.readlines():
words = line.split()
right_answers[words[0]] = (words[1])
f.close()
return right_answers
| true |
17a0d731256c04a2498ad1d6c2d074297a695274 | Python | alinagorgovan/interview | /problem2.py | UTF-8 | 1,004 | 2.921875 | 3 | [] | no_license | import datetime
CONTROL_NUMBER = "279146358279"
def check_cnp(cnp):
if not cnp.isdecimal() or len(cnp) != 13:
print("Invalid cnp.")
return -1
s = int(cnp[0])
if s == 0:
print("Invalid sex code.")
return -1
date = f"{cnp[3:5]}/{cnp[5:7]}/{cnp[1:3]}"
try:
datetime.datetime.strptime(date, '%m/%d/%y')
except ValueError:
print("Invalid date.")
return -1
jj = int(cnp[7] + cnp[8])
if jj < 1 or jj > 52:
print("Invalid county id.")
return -1
nnn = int(cnp[9] + cnp[10] + cnp[11])
if nnn == 0:
print("Invalid county day number.")
return -1
cnp = list(map(int, list(cnp)))
control_list = list(map(int, list(CONTROL_NUMBER)))
c = cnp[12]
check = sum(x * y for x, y in zip(cnp[:12], control_list)) % 11
if check != c or (check == 10 and c != 1):
print("Invalid control number.")
return -1
return 0
print(check_cnp("2980707090058"))
| true |
cd5072cfd6678b494a6bd220ba6a9d971a450df4 | Python | toddrme2178/pyccel | /src_old/tests/scripts/openacc/reduce.py | UTF-8 | 260 | 2.65625 | 3 | [
"MIT"
] | permissive | # coding: utf-8
from pyccel.stdlib.parallel.openacc import Range
from pyccel.stdlib.parallel.openacc import Parallel
x = 0.0
with Parallel(num_gangs=2):
for i in Range(-2, 5, 1, private=['i'], reduction=['+', 'x']):
x += 2 * i
print('x = ', x)
| true |
c1b3200060d3392b72d6829060fbca2db4311f9c | Python | showiproute/PythonCookBook | /part_3/Order_dict.py | UTF-8 | 230 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python
#coding:utf-8
from collections import OrderedDict
def order_dict():
d=OrderedDict()
d['foo']=1
d['bar']=2
d['spam']=3
d['grok']=4
for key in d:
print(key,d[key])
order_dict() | true |
fe1434000f5d7243cedc7b01f15d9c1c55017185 | Python | IsraMejia/UdemyUniversidadPython2021 | /OOP_Python/herencia/Persona.py | UTF-8 | 937 | 4.1875 | 4 | [] | no_license | class Persona:
def __init__(self, nombre, edad):
self._nombre = nombre
self.edad = edad
@property
def nombre(self):
return self._nombre
@nombre.setter
def nombre(self, nombre):
self._nombre = nombre
def __str__(self):#Por defectote muestra la direccion de memoria del objeto en cuestion, vamos a sobre escribirlo
return f"\nEl objeto tipo Persona: {self.nombre}, con la edad de: {self.edad}\n"
class Empleado (Persona): #Empleado Hereda de Persona
def __init__(self, nombre, edad, sueldo):
super().__init__(nombre, edad) #Usamos el constructor de la clase padre
self.sueldo = sueldo
def __str__(self):
return f" {super().__str__()} Que es un Empleado con un sueldo de {self.sueldo} \n"
# empleado1 = Empleado('Isra', 23, 5000)
# print(f"\nEl emplado: {empleado1._nombre} de {empleado1.edad} años, gana {empleado1.sueldo}") | true |
487f704c61e7526ca760b5191c9ca786dcc9322e | Python | luohaha66/MyCode | /python/software_architecture/arch/event/eventlet_chat_server.py | UTF-8 | 1,598 | 2.953125 | 3 | [] | no_license | # Code Listing #8
"""
Multiuser chat server using eventlet
"""
import eventlet
from eventlet.green import socket
participants = set()
def new_chat_channel(conn):
""" New chat channel for a given connection """
data = conn.recv(1024)
user = ''
while data:
for p in participants:
try:
if p is not conn:
data = data.decode('utf-8')
user, msg = data.split(':')
if msg != '<handshake>':
data_s = f'\n#[{user}]>>> says {msg}'
else:
data_s = f'(User {user} connected)\n'
p.send(bytearray(data_s, 'utf-8'))
except socket.error as e:
# ignore broken pipes, they just mean the participant
# closed its connection already
if e[0] != 32:
raise
data = conn.recv(1024)
participants.remove(conn)
for p in participants:
p.send(bytearray(f'(User {user} leave)\n', 'utf-8'))
print(f"Participant {user} left chat.")
if __name__ == "__main__":
port = 3490
try:
print(f"ChatServer starting up on port {port}")
server = eventlet.listen(('0.0.0.0', port))
while True:
new_connection, address = server.accept()
print(f"Participant {address} joined chat.")
participants.add(new_connection)
eventlet.spawn(new_chat_channel, new_connection)
except (KeyboardInterrupt, SystemExit):
print("ChatServer exiting.")
| true |
64734792cb4a249a68c59f9dd356ebffe5a781e7 | Python | AhmadMustafa015/Stroke-Detection | /controlcikti.py | UTF-8 | 430 | 2.75 | 3 | [] | no_license | import csv
csvpath = 'C:/Users/RadioscientificOne/PycharmProjects/Stroke-Detection/Final_output_Prediction/classification_results.csv'
counter0 = 0
counter1 = 0
with open(csvpath) as f:
reader = csv.reader(f)
for row in reader:
if row[1] == '0':
counter0 = counter0 +1
if row[1] == '1':
counter1 = counter1 +1
a =1
print('No inme :',counter0)
print('Yes inme :',counter1) | true |
87bce7d857b3e2ce8039daad6dc564a8f998dc2d | Python | wangye707/LSTMRNN-paddlepaddle | /readdata.py | UTF-8 | 5,391 | 2.84375 | 3 | [] | no_license | import os
import random
import numpy as np
def read_file(path,train):
neg_path = path + '/' + "neg"
pos_path = path + '/' + "pos"
neg_file = os.walk(neg_path)
pos_file = os.walk(pos_path)
neg_text = []
pos_text = []
for root,dir,file_name in neg_file:
for name in file_name:
now_path = root + "/" + name
with open(now_path,'r') as f:
neg_text.append(f.read())
for root,dir,file_name in pos_file:
for name in file_name:
now_path = root + "/" + name
with open(now_path,'r') as f:
pos_text.append(f.read())
total_text = neg_text + pos_text
text_num = text_to_num(total_text,train=train)
label = []
for i in range(0,len(neg_text)):
label.append(0)
for i in range(0, len(pos_text)):
label.append(1)
return text_num,label
def data_lower(text):
data_set = []
for sentence in text:
# 这里有一个小trick是把所有的句子转换为小写,从而减小词表的大小
# 一般来说这样的做法有助于效果提升
sentence = sentence.strip().lower()
sentence = sentence.split(" ")
data_set.append(sentence)
return data_set
def text_to_num(text_list,train):
text_list = data_lower(text_list)
save_path = 'word2id_dict'
data_set = []
if train:
word2id_freq, word2id_dict = build_dict(text_list)
save_path = 'word2id_dict'
save_dict(word2id_dict,save_path)
else:
word2id_dict = read_dict(save_path)
for sentence in text_list:
# 将句子中的词逐个替换成id,如果句子中的词不在词表内,则替换成oov
# 这里需要注意,一般来说我们可能需要查看一下test-set中,句子oov的比例,
# 如果存在过多oov的情况,那就说明我们的训练数据不足或者切分存在巨大偏差,需要调整
sentence = [word2id_dict[word] if word in word2id_dict \
else word2id_dict['[oov]'] for word in sentence]
data_set.append(sentence)
return data_set
def build_dict(corpus):
word_freq_dict = dict()
for sentence in corpus:
for word in sentence:
if word not in word_freq_dict:
word_freq_dict[word] = 0
word_freq_dict[word] += 1
word_freq_dict = sorted(word_freq_dict.items(), key=lambda x: x[1], reverse=True)
word2id_dict = dict()
word2id_freq = dict()
# 一般来说,我们把oov和pad放在词典前面,给他们一个比较小的id,这样比较方便记忆,并且易于后续扩展词表
word2id_dict['[oov]'] = 0
word2id_freq[0] = 1e10
word2id_dict['[pad]'] = 1
word2id_freq[1] = 1e10
for word, freq in word_freq_dict:
word2id_dict[word] = len(word2id_dict)
word2id_freq[word2id_dict[word]] = freq
return word2id_freq, word2id_dict
def save_dict(dict,path):
f = open(path, 'w')
f.write(str(dict))
f.close()
def read_dict(path):
f = open(path, 'r')
dict_ = eval(f.read())
f.close()
return dict_
def build_batch(word2id_dict, corpus,label, batch_size, epoch_num, max_seq_len, shuffle=True):
# 模型将会接受的两个输入:
# 1. 一个形状为[batch_size, max_seq_len]的张量,sentence_batch,代表了一个mini-batch的句子。
# 2. 一个形状为[batch_size, 1]的张量,sentence_label_batch,
# 每个元素都是非0即1,代表了每个句子的情感类别(正向或者负向)
sentence_batch = []
sentence_label_batch = []
for _ in range(epoch_num):
# 每个epcoh前都shuffle一下数据,有助于提高模型训练的效果
# 但是对于预测任务,不要做数据shuffle
if shuffle:
seed = 5
random.seed(seed)
random.shuffle(corpus)
random.seed(seed)
random.shuffle(label)
num = 0
for sentence in corpus:
sentence_sample = sentence[:min(max_seq_len, len(sentence))]
if len(sentence_sample) < max_seq_len:
for _ in range(max_seq_len - len(sentence_sample)):
sentence_sample.append(word2id_dict['[pad]'])
sentence_sample = [[word_id] for word_id in sentence_sample]
sentence_batch.append(sentence_sample)
sentence_label_batch.append([label[num]])
num = num + 1
if len(sentence_batch) == batch_size:
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
sentence_batch = []
sentence_label_batch = []
def build_batch_infer(word2id_dict, corpus,batch_size,max_seq_len):
sentence_batch = []
for sentence in corpus:
sentence_sample = sentence[:min(max_seq_len, len(sentence))]
if len(sentence_sample) < max_seq_len:
for _ in range(max_seq_len - len(sentence_sample)):
sentence_sample.append(word2id_dict['[pad]'])
sentence_sample = [[word_id] for word_id in sentence_sample]
sentence_batch.append(sentence_sample)
if len(sentence_batch) == batch_size:
yield np.array(sentence_batch).astype("int64")
sentence_batch = []
| true |
c3306d0e980e2a9493c1178282cfeae4251550ef | Python | n0spaces/get-stick-bugged-lol | /gsbl/__main__.py | UTF-8 | 2,182 | 2.953125 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | def main():
import argparse
parser = argparse.ArgumentParser(prog='gsbl', description="Create a 'get stick bugged lol' video from an image.")
parser.add_argument('input',
help="the image file to be used to generate the video (png, jpg, ...). For best results, make"
"sure the image doesn't have any black or white borders surrounding it.")
parser.add_argument('output', help='the video file to be generated and saved (mp4, webm, ...)')
parser.add_argument('-r --resolution', dest='resolution', nargs=2, type=int, default=[720, 720],
metavar=('WIDTH', 'HEIGHT'), help='width and height of the video (default: 720 720)')
parser.add_argument('--img-bg-color', dest='img_bg_color', nargs=3, type=int, default=[0, 0, 0],
metavar=('R', 'G', 'B'),
help='RGB background color while the image is visible (default: 0 0 0)')
parser.add_argument('--line-color', dest='line_color', nargs=3, type=int, default=[255, 255, 211],
metavar=('R', 'G', 'B'), help='RGB color of line segments (default: 255 255 211)')
parser.add_argument('--line-bg-color', dest='line_bg_color', nargs=3, type=int, default=[125, 115, 119],
metavar=('R', 'G', 'B'),
help='RGB background color after image disappears (default: 125 115 119)')
parser.add_argument('-s --scale', dest='lsd_scale', type=float, default=0.8, metavar='SCALE',
help='the image scale passed to the line segment detector. Slightly lowering this may improve '
'results in large images. This does not affect the image scale in the video (try '
'--resolution instead). (default: 0.8)')
args = parser.parse_args()
from gsbl.stick_bug import StickBug
sb = StickBug(img=args.input, video_resolution=args.resolution, lsd_scale=args.lsd_scale,
img_bg_color=args.img_bg_color, line_color=args.line_color, line_bg_color=args.line_bg_color)
sb.save_video(args.output)
if __name__ == '__main__':
main()
| true |
c8e17860d1e7873e4589d42b2f8ca75c331bf929 | Python | JosephLevinthal/Research-projects | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/226/users/4148/codes/1800_2565.py | UTF-8 | 407 | 2.96875 | 3 | [] | no_license | from numpy import*
a = array(eval(input("media: ")))
b = array(eval(input("presenca: ")))
c = float(input("carga horaria: "))
cont = zeros(3, dtype=int)
for i in range(size(a)):
if a[i] >= 5 and b[i] >= c * (75/100): #aprovados
cont[0] = cont[0] + 1
elif a[i] <=5 and b[i] >= c * (75/100):
cont[1] = cont[1] +1
elif a[i] >= 5 and b[i] <= c * (75/100):
cont[2] = cont[2] + 1
print(cont) | true |
fb48fd9656915149d8133355706be99ed2db0469 | Python | Ing-Josef-Klotzner/python | /_string_monster2.py | UTF-8 | 1,363 | 3.125 | 3 | [] | no_license | #!/usr/bin/python3
from sys import stdin
def match (ssof, ss):
if ss == "": return True
#print (ssof, ss, end = " ")
for st in ssof:
if ss.startswith (st):
return match (ssof - {st}, ss [len (st):])
return False
# this works with testcases, because strings are included
# in order in sleepy string (hackerearth testcases)
# fails for sample test case where sleepy string chars are scrumbled
def main ():
read = stdin.readline
t = int (read ())
for t_ in range (t):
n = int (read ())
sof = [] # list of strings on floor
lns = [] # list of the string lengths
for n_ in range (n):
s = read ().rstrip ()
sof.append (s)
lns.append (len (s))
ss = read ().rstrip () # sleepy string
lnss = len (ss)
mnl = min (lns)
mxl = max (lns)
justone = 0
allother_max = 0
for n_ in range (n):
if lns [n_] == mnl: justone += 1
elif lns [n_] == mxl: allother_max += 1
if lnss < mnl or lnss > mnl and lnss < 2 * mnl or mnl == mxl and lnss % mnl or justone == 1 and allother_max == n - 1 and lnss % mxl not in {0, mnl}:
print ("NO")
continue
ssof = set (sof)
print ("YES" if match (ssof, ss) else "NO")
if __name__ == "__main__": main ()
| true |
8d37a253294ef8f130699d665bcccb12b89469cb | Python | vivektewari/rentHop | /Booster.py | UTF-8 | 3,430 | 2.765625 | 3 | [] | no_license | import numpy as np
from mathFunctions import sigmoid,sigDeriv
from treatments import getTargetVar
import pandas as pd
class booster(object):
"""
assignes weights to each observation
calculates weighted loss
saves classifier prediction and weight of the classifier
finally returns weighted average of classifier
"""
def __init__(self,maxIteration,classifier,test,trainCopy):#func api is that it returns prediction actialCost and weightedCost
self.classifier=classifier
self.maxIteration=maxIteration
self.prediction=np.zeros(shape=(test.shape[0],3))
self.error=[]
self.weightedError=[]
self.test=test
self.classifierWeight=[]
self.trainCopy=trainCopy
def nniterate(self,train=False):
classi=self.classifier
classi.weight = (1 / float(classi.actualOutput.shape[0])) * np.ones(shape=(classi.actualOutput.shape[0], 1))
for i in range(0,self.maxIteration):
for j in range(0,len(classi.layers)):classi.layers[j].cofficient=np.random.rand(classi.layers[j].cofficient.shape[0],classi.layers[j].cofficient.shape[1])
classi.findEstimates()
classifierWeight=np.log((2.5-classi.cost)/classi.cost)
tes=classi.predict(self.test)
self.prediction =tes*classifierWeight+self.prediction
self.classifierWeight.append(classifierWeight)
costMatrix=(self.classifier.analyseObservation(self.trainCopy))['cost'].as_matrix()
y=sigmoid(classifierWeight*(costMatrix.reshape((costMatrix.shape[0],1))>0.55).astype(int))
if (np.isnan(y)).any():print "weight comes out to be nan"
newWeight=classi.weight*y
classi.weight=newWeight/sum(newWeight)
self.prediction = self.prediction.div(np.sum(self.prediction, axis=1), axis=0)
pred=self.prediction
if train:
self.trainCopy.index=self.prediction.index
pred=self.prediction.join(self.trainCopy,how='left',rsuffix='_t')
pred['cost']=np.log(pred['high'])*pred['high_t']+np.log(pred['medium'])*pred['medium_t']+np.log(pred['low'])*pred['low_t']
print np.sum(pred['cost'])/self.trainCopy.shape[0],self.trainCopy.shape[0]
return pred
def weightSelecter(self,train=False):
classi=self.classifier
classi.weight = (1 / float(classi.actualOutput.shape[0])) * np.ones(shape=(classi.actualOutput.shape[0], 1))
best=5.0
for i in range(0,self.maxIteration):
for j in range(0,len(classi.layers)):classi.layers[j].cofficient=np.random.rand(classi.layers[j].cofficient.shape[0],classi.layers[j].cofficient.shape[1])
classi.findEstimates()
if classi.cost<best:
best=classi.cost
self.prediction = classi.predict(self.test)
classifierWeight=np.log((2.5-classi.cost)/classi.cost)
self.classifierWeight.append(classifierWeight)
costMatrix=(self.classifier.analyseObservation(self.trainCopy))['cost'].as_matrix()
y=sigmoid(classifierWeight*(costMatrix.reshape((costMatrix.shape[0],1))>0.55).astype(int))
if (np.isnan(y)).any():print "weight comes out to be nan"
newWeight=classi.weight*y
classi.weight=newWeight/sum(newWeight)
print best,classi.input.shape[0]
return self.prediction
| true |
fc01eddba5b16c90f35c108e81ac63e99e9181ec | Python | dbychkar/python_lessons | /python_coursera/2_week/34_Максимальное число подряд идущих равных.py | UTF-8 | 175 | 3.328125 | 3 | [] | no_license | n = int(input())
m = n
i = k = 1
while n:
n = int(input())
if n == m:
i += 1
elif m != n:
m = n
i = 1
if k < i:
k = i
print(k)
| true |
e5bba35ee9b61a70debae47ec5f2d6ec2cfe3704 | Python | pradoz/leetcode_pset | /py/reverse-words-in-a-string-iii.py | UTF-8 | 1,016 | 4.15625 | 4 | [] | no_license | '''
Given a string, you need to reverse the order of characters in each word within
a sentence while still preserving whitespace and initial word order.
Example 1:
Input: "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
'''
# Naive/brute force solution
# Time Complexity: O(n), since work is done in a linear pass
# Space Complexity: O(n), since we store every character in the list
class Solution:
def reverseWords(self, string_to_reverse: str) -> str:
words = string_to_reverse.split(' ')
reverse = ''
for word in words:
word = word[::-1]
if reverse == '':
reverse = word
else:
reverse += ' ' + word
return reverse
# Less code, not much improvement.
class Solution:
def reverseWords(self, string_to_reverse: str) -> str:
if len(string_to_reverse) < 2:
return string_to_reverse
return ' '.join(word for word in string_to_reverse[::-1].split(' ')[::-1])
| true |
1c8430683c5a4ead41a1b58011e79579e6a3b4ea | Python | artempyanykh/prac-5sem-2016 | /submissions/task1/penin-grinko/task1.py | WINDOWS-1251 | 1,524 | 2.953125 | 3 | [] | no_license | import numpy as np
from scipy.optimize import linprog
def nash_equilibrium(a):
A = a.transpose()
rownum, colnum = np.shape(A)
# c1 ,
c1 = np.array([1] * colnum)
# c2 ,
c2 = np.array([-1] * rownum)
# , - linprog 1
min_a = np.amin(A)
if (min_a < 0):
A -= min_a - 1
A *= -1
p = linprog(c1, A, c2).x
v = np.dot(p, c1)
p_opt = p*(1/v)
value = 1/v
if (min_a < 0):
value += min_a - 1
# , - linprog 2
A *= -1
# b1 ,
b1 = np.array([-1] * rownum)
# b2 ,
b2 = np.array([1] * colnum)
q = linprog(b1, a, b2).x
q_opt = q*(1/v)
return value, p_opt, q_opt
| true |
68498c3e5f31ee4171785d73d46edcd5c6f2b2c0 | Python | sergiorgiraldo/Python-lang | /baseballStats/baseballStats1.py | UTF-8 | 13,466 | 3.265625 | 3 | [] | no_license | # Import libraries
import pandas as pd
import numpy as np
from sklearn import linear_model
from scipy import stats
import matplotlib.pyplot as plt
#datasets: http://www.retrosheet.org/gamelogs/index.html
#labels for the data set: http://www.retrosheet.org/gamelogs/glfields.txt
'''
Field(s) Meaning
1 Date in the form "yyyymmdd"
2 Number of game:
"0" -- a single game
"1" -- the first game of a double (or triple) header
including seperate admission doubleheaders
"2" -- the second game of a double (or triple) header
including seperate admission doubleheaders
"3" -- the third game of a triple-header
"A" -- the first game of a double-header involving 3 teams
"B" -- the second game of a double-header involving 3 teams
3 Day of week ("Sun","Mon","Tue","Wed","Thu","Fri","Sat")
4-5 Visiting team and league
6 Visiting team game number
For this and the home team game number, ties are counted as
games and suspended games are counted from the starting
rather than the ending date.
7-8 Home team and league
9 Home team game number
10-11 Visiting and home team score (unquoted)
12 Length of game in outs (unquoted). A full 9-inning game would
have a 54 in this field. If the home team won without batting
in the bottom of the ninth, this field would contain a 51.
13 Day/night indicator ("D" or "N")
14 Completion information. If the game was completed at a
later date (either due to a suspension or an upheld protest)
this field will include:
"yyyymmdd,park,vs,hs,len" Where
yyyymmdd -- the date the game was completed
park -- the park ID where the game was completed
vs -- the visitor score at the time of interruption
hs -- the home score at the time of interruption
len -- the length of the game in outs at time of interruption
All the rest of the information in the record refers to the
entire game.
15 Forfeit information:
"V" -- the game was forfeited to the visiting team
"H" -- the game was forfeited to the home team
"T" -- the game was ruled a no-decision
16 Protest information:
"P" -- the game was protested by an unidentified team
"V" -- a disallowed protest was made by the visiting team
"H" -- a disallowed protest was made by the home team
"X" -- an upheld protest was made by the visiting team
"Y" -- an upheld protest was made by the home team
Note: two of these last four codes can appear in the field
(if both teams protested the game).
17 Park ID
18 Attendance (unquoted)
19 Time of game in minutes (unquoted)
20-21 Visiting and home line scores. For example:
"010000(10)0x"
Would indicate a game where the home team scored a run in
the second inning, ten in the seventh and didn't bat in the
bottom of the ninth.
22-38 Visiting team offensive statistics (unquoted) (in order):
at-bats
hits
doubles
triples
homeruns
RBI
sacrifice hits. This may include sacrifice flies for years
prior to 1954 when sacrifice flies were allowed.
sacrifice flies (since 1954)
hit-by-pitch
walks
intentional walks
strikeouts
stolen bases
caught stealing
grounded into double plays
awarded first on catcher's interference
left on base
39-43 Visiting team pitching statistics (unquoted)(in order):
pitchers used ( 1 means it was a complete game )
individual earned runs
team earned runs
wild pitches
balks
44-49 Visiting team defensive statistics (unquoted) (in order):
putouts. Note: prior to 1931, this may not equal 3 times
the number of innings pitched. Prior to that, no
putout was awarded when a runner was declared out for
being hit by a batted ball.
assists
errors
passed balls
double plays
triple plays
50-66 Home team offensive statistics
67-71 Home team pitching statistics
72-77 Home team defensive statistics
78-79 Home plate umpire ID and name
80-81 1B umpire ID and name
82-83 2B umpire ID and name
84-85 3B umpire ID and name
86-87 LF umpire ID and name
88-89 RF umpire ID and name
If any umpire positions were not filled for a particular game
the fields will be "","(none)".
90-91 Visiting team manager ID and name
92-93 Home team manager ID and name
94-95 Winning pitcher ID and name
96-97 Losing pitcher ID and name
98-99 Saving pitcher ID and name--"","(none)" if none awarded
100-101 Game Winning RBI batter ID and name--"","(none)" if none
awarded
102-103 Visiting starting pitcher ID and name
104-105 Home starting pitcher ID and name
106-132 Visiting starting players ID, name and defensive position,
listed in the order (1-9) they appeared in the batting order.
133-159 Home starting players ID, name and defensive position
listed in the order (1-9) they appeared in the batting order.
160 Additional information. This is a grab-bag of informational
items that might not warrant a field on their own. The field
is alpha-numeric. Some items are represented by tokens such as:
"HTBF" -- home team batted first.
Note: if "HTBF" is specified it would be possible to see
something like "01002000x" in the visitor's line score.
Changes in umpire positions during a game will also appear in
this field. These will be in the form:
umpchange,inning,umpPosition,umpid with the latter three
repeated for each umpire.
These changes occur with umpire injuries, late arrival of
umpires or changes from completion of suspended games. Details
of suspended games are in field 14.
161 Acquisition information:
"Y" -- we have the complete game
"N" -- we don't have any portion of the game
"D" -- the game was derived from box score and game story
"P" -- we have some portion of the game. We may be missing
innings at the beginning, middle and end of the game.
Missing fields will be NULL.
'''
input_df = pd.read_table("GL2015.TXT", sep=",", header=None)
# Method to rename columns of an input dataframe (for readability)
# Input type: dataframe
# Output type: dataframe
def rename_cols(input_df):
input_df.rename(columns = {3: 'Visiting Team', 6: 'Home Team', 9: 'Runs Visitor', 10: 'Runs Home'}, inplace=True)
return input_df
# Invoke function to rename columns
input_df = rename_cols(input_df)
# Method to add new columns to indicate whether home team or visiting team won the game
# Input type: dataframe
# Output type: dataframe
def add_new_cols(input_df):
input_df['Home Win'] = (input_df['Runs Home'] > input_df['Runs Visitor'])
input_df['Visitor Win'] = (input_df['Runs Visitor'] > input_df['Runs Home'])
return input_df
# Method to group data by home team and compute relevant statistics
# Input type: dataframe
# Output type: dataframe (with stats grouped by home team)
def proc_home_team_data(input_df):
# Group by home team
home_group = input_df.groupby(input_df['Home Team'])
# Compute stats: Number of games, runs scored, runs conceded, wins, run differential
home_df = home_group[['Runs Visitor', 'Runs Home', 'Home Win']].apply(sum)
home_df['Home Games'] = home_group['Home Win'].count()
home_df.rename(columns = {'Runs Visitor': 'Runs by Visitor', 'Runs Home': 'Runs at Home', 'Home Win': 'Wins at Home'}, inplace=True)
home_df['RD at Home'] = home_df['Runs at Home'] - home_df['Runs by Visitor']
home_df.index.rename('Team', inplace=True)
home_df.reset_index(inplace=True)
return home_df
# Method to group data by visiting team and compute relevant statistics
# Input type: dataframe
# Output type: dataframe (with stats grouped by visiting team)
def proc_visiting_team_data(input_df):
# Group by visiting team
visit_group = input_df.groupby(input_df['Visiting Team'])
# Compute stats: Number of games, runs scored, runs conceded, wins, run differential
visit_df = visit_group[['Runs Visitor', 'Runs Home', 'Visitor Win']].apply(sum)
visit_df['Road Games'] = visit_group['Visitor Win'].count()
visit_df.rename(columns = {'Runs Visitor': 'Runs as Visitor', 'Runs Home': 'Runs by Home',
'Visitor Win': 'Wins as Visitor'}, inplace=True)
visit_df['RD as Visitor'] = visit_df['Runs as Visitor'] - visit_df['Runs by Home']
visit_df.index.rename('Team', inplace=True)
visit_df.reset_index(inplace=True)
return visit_df
# Method to merge dataframes with statistics grouped by home and visiting teams
# and to explicitly compute explanatory and response variables
# Input type: dataframe, dataframe
# Output type: dataframe
def merge_data_frames(home_df, visit_df):
# Compute explanatory and response variables
overall_df = home_df.merge(visit_df, how='outer', left_on='Team', right_on='Team')
overall_df['RD'] = overall_df['RD at Home'] + overall_df['RD as Visitor']
overall_df['Win Pct'] = (overall_df['Wins at Home'] + overall_df['Wins as Visitor']) / (overall_df['Home Games'] + overall_df['Road Games'])
overall_df['Pythagorean expectation'] = 162 * (1 / (1 + np.power(
(overall_df['Runs by Visitor'] + overall_df['Runs by Home'])/
(overall_df['Runs as Visitor'] + overall_df['Runs at Home']), 1.83)))
# Return dataframe with explanatory and response variables
return overall_df
# Method to collate all data preprocessing steps
# Input type: dataframe
# Output type: dataframe
def extract_linear_reg_inputs(input_df):
# Rename columns
input_df = rename_cols(input_df)
# Add new columns
input_df = add_new_cols(input_df)
# Group and process data by home team
home_df = proc_home_team_data(input_df)
# Group and process data by visiting team
visit_df = proc_visiting_team_data(input_df)
# Merge home and visitor dataframes
overall_df = merge_data_frames(home_df, visit_df)
return overall_df
# Get training data from 2011-2015 to train the linear regression model
# Initialize arrays to hold training data
train_run_diff = np.empty([0, 1])
train_win_pct = np.empty([0, 1])
# Loop
for year in range(2011, 2016):
# Construct log file name
log_file = "GL" + str(year) + ".TXT"
# Read log into a dataframe
df = pd.read_table(log_file, sep=",", header=None)
# Extract relevant stats into another dataframe
df_proc = extract_linear_reg_inputs(df)
# Add to training set
train_run_diff = np.vstack([train_run_diff, df_proc['RD'].values.reshape([-1, 1])])
train_win_pct = np.vstack([train_win_pct, df_proc['Win Pct'].values.reshape([-1, 1])])
# Instantiate an object
lin_regr = linear_model.LinearRegression(fit_intercept=True)
# Compute model parameters with training data
lin_regr.fit(train_run_diff, train_win_pct)
# Access and display model parameters
print("Slope (a) = ", float(lin_regr.coef_), " Intercept (b) = ", float(lin_regr.intercept_))
# Get regression score (R-squared)
r_squared = lin_regr.score(train_run_diff, train_win_pct)
print("R-squared for linear fit = ", r_squared)
# Visualize
x_ax = np.array(range(int(np.min(train_run_diff)), int(np.max(train_run_diff)))).reshape(-1, 1)
y_ax = lin_regr.coef_ * x_ax + lin_regr.intercept_
plt.plot(train_run_diff, train_win_pct, 'bo', label="training_data")
plt.plot(x_ax, y_ax, 'r', label="model_fit")
plt.plot([-300, 300], [0.5, 0.5], "k--")
plt.plot([0, 0], [0.30, 0.65], "k--")
plt.ylim([0.30, 0.65])
plt.xlabel("Run differential")
plt.ylabel("Win percentage")
plt.legend(loc="lower right")
plt.show()
# Construct test dataset
log_file = "GL2016.TXT"
df = pd.read_table(log_file, sep=",", header=None)
df_proc = extract_linear_reg_inputs(df)
test_run_diff = df_proc['RD'].values.reshape([-1, 1])
test_win_pct = df_proc['Win Pct'].values.reshape([-1, 1])
# Predict outcomes using regression model
predict_win_pct = lin_regr.predict(test_run_diff)
# Compute percentage error for linear regression model on test set
mean_abs_error_test = np.mean(np.abs(predict_win_pct - test_win_pct))
print("Percentage error on test set = ", 100. * mean_abs_error_test, "%")
# Compute percentage error for linear regression model on training set
model_fit_train = lin_regr.predict(train_run_diff)
mean_abs_error_training = np.mean(np.abs(model_fit_train - train_win_pct))
print("Percentage error on training set ", 100. * mean_abs_error_training, "%")
# Visualize
plt.plot(test_win_pct, predict_win_pct, 'bo')
plt.plot([0.35, 0.7], [0.35, 0.7], 'r')
plt.xlabel("Actual win percentage")
plt.ylabel("Predicted win percentage")
plt.title("MLB 2016 season")
plt.show()
| true |
d5bd32593c6de979a0e64197857d857519a56abf | Python | sanchezmaxar/ArtifitialIntelligenceCourse05 | /python/individual.py | UTF-8 | 662 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 26 09:22:10 2016
@author: stan
"""
class Individual:
def __init__(self,genotype=None,phenotype=None):
self.genotype = genotype
self.phenotype = phenotype
def __str__(self):
return \
(('genotype: '+ str(self.genotype)) \
if self.genotype is not None else '') + \
(('phenotype: '+ str(self.phenotype)) \
if self.phenotype is not None else '') + \
(('fitness: ' + str(self.fitness)) \
if hasattr(self,'fitness') else '') + \
(('objectives: '+str(self.objectives))
if hasattr(self,'objectives') else '')
| true |
a2e85a97254ee481b628162475844e17e1e43753 | Python | JinfengChen/chm1_scripts | /ReorderClusters.py | UTF-8 | 877 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import sys
inFile = open(sys.argv[1], 'r')
outFile = open(sys.argv[2], 'w')
def Print(name, a, b):
outFile.write(name)
for val in a:
outFile.write("\t" + val)
for val in b:
outFile.write("\t" + val)
outFile.write("\n")
for line in inFile:
vals = line.split()
# the format is:
# 0 1 2 3 4 5 6 7 8 9 10
#m130216_080418_42134_c100465732550000001523050605101337_s1_p0/38995/5748_10266 -7740 0 chr1 16611 18883 -7803 1 chr1 16739 18961
intv1 = (vals[1], vals[2], vals[3], vals[4], vals[5])
intv2 = (vals[6], vals[7], vals[8], vals[9], vals[10].strip())
if (int(vals[4]) < int(vals[9])):
Print(vals[0], intv1, intv2)
else:
Print(vals[0], intv2, intv1)
| true |
1cc0d93026d37b2d61893952f3ef598160f9051d | Python | aldslvda/readings | /Fluent-Python/9. A Pythonic Object/vector2d_v1.py | UTF-8 | 365 | 2.578125 | 3 | [] | no_license | from vector2d_v0 import Vector2d as vec
class Vector2d(vec):
@classmethod
def frombytes(cls, octets):
typecode = chr(octets[0])
memv = memoryview(octets[1:]).cast(typecode)
return cls(*memv)
def __format__(self, fmt_spec=''):
components = (format(c, fmt_spec) for c in self)
return '({}, {})'.format(*components) | true |
07506b81de727e2c4cda0917105a7a1a83f2d2be | Python | Gurmindermultani/ml | /assigments/neural_networks.py | UTF-8 | 258 | 2.609375 | 3 | [
"MIT"
] | permissive | import numpy as np
import plotly.express as px
# print(np.linspace(-1.5, 1.5, num=10))
# df = px.data.gapminder().query("continent == 'Oceania'")
# fig = px.line(df, x='year', y='lifeExp', color='country')
# fig.show()
print(np.linspace(-1.5, 1.5, num=10)) | true |
3f8ae153e210741f008e3b4aa0b212c5d5294879 | Python | ashish-bisht/Ds-and-Algo | /dp/min_coin.py | UTF-8 | 500 | 3.640625 | 4 | [] | no_license | def min_coin(denoms, amount):
ways = [float("inf") for i in range(amount+1)]
ways[0] = 0
for denom in denoms:
for current_amount in range(1, amount+1):
if denom <= current_amount:
ways[current_amount] = min(
ways[current_amount], 1+ways[current_amount-denom])
return ways[amount] if ways[amount] != float("inf") else -1
if __name__ == "__main__":
amount = 7
denoms = [1, 5, 10]
print(min_coin(denoms, amount))
| true |
cf0e021b0ab7b69eb70e52462455810e5c6d880e | Python | BrandaoDeSerra/python | /Exercicio IA/NLP/aula_02.py | UTF-8 | 1,283 | 2.90625 | 3 | [] | no_license | import nltk.corpus
nltk.download('mac_morpho')
sentencas = nltk.corpus.mac_morpho.tagged_sents()
# print(len(sentencas)) #51397 total
# print(sentencas[10])
sentencas_lowercase = [[(p.lower(),t) for (p,t) in sentenca] for sentenca in sentencas if sentenca]
sentencas_treinamento = sentencas_lowercase[5000:] # 46397
sentencas_teste = sentencas_lowercase[:5000] # 5000
# >>>>> Rotuladores
#rotulador base
rotulador0 = nltk.DefaultTagger('N')
print(rotulador0.evaluate(sentencas_teste)) # verificar a curácia
#unigrama
rotulador1 = nltk.UnigramTagger(sentencas_treinamento,backoff=rotulador0)
print(rotulador1.evaluate(sentencas_teste))# verificar a curácia
#Bigrama
rotulador2 = nltk.BigramTagger(sentencas_treinamento,backoff=rotulador1)
print(rotulador2.evaluate(sentencas_teste))# verificar a curácia
#Trigrama
rotulador3 = nltk.TrigramTagger(sentencas_treinamento,backoff=rotulador2)
print(rotulador3.evaluate(sentencas_teste))# verificar a curácia
#N-grama
rotulador4 = nltk.NgramTagger(sentencas_treinamento,backoff=rotulador3)
print(rotulador4.evaluate(sentencas_teste))# verificar a curácia
# HMM modelo oculto de MARKOV
rotulador5 = nltk.HiddenMarkovModelTagger(sentencas_treinamento,)
print(rotulador5.evaluate(sentencas_teste))# verificar a curácia
| true |
d55677737e31c9b92d9d62431f1a91f44ec1fd58 | Python | jverganz/implementation-solid | /segregation_interface_principle/penguin.py | UTF-8 | 258 | 2.546875 | 3 | [] | no_license | from bird_interface import BirdInterface
from swimming_interface import SwimmingBirdInterface
class Penguin(BirdInterface, SwimmingBirdInterface):
def eat(self):
return 'I can eat!'
def swim(self):
return 'I can swim!' | true |
e59dfce930c8d690e92cebe94973882646f0591b | Python | Jerrydepon/LeetCode | /2_string/easy/819. Most Common Word.py | UTF-8 | 1,267 | 3.296875 | 3 | [] | no_license | # replace "!?',;." with space
# loop through word in paragraph (split) and ignore the banned word
# count by dictionary, keep track of the largest count
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
for c in "!?',;.":
paragraph = paragraph.replace(c, " ")
dic, res, count = {}, "" , 0
for word in paragraph.lower().split():
if word in banned:
continue;
elif word in dic:
dic[word] += 1
else:
dic[word] = 1
if dic[word] > count:
count = dic[word]
res = word
return res
# import re
# class Solution:
# def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
# dic = {}
# paragraph = re.split(',|\.|!|,|\?|;|:|\'| ', paragraph)
# for word in paragraph:
# word = word.lower()
# if word == '':
# continue
# if word in dic:
# dic[word] += 1
# else:
# dic[word] = 1
# for k in dic:
# if k in banned:
# dic[k] = 0
# return max(dic, key=dic.get)
| true |
17c65bb560e5108478888caa6e6a790334c5f38e | Python | DavidBetteridge/AdventOfCode2019 | /Day23/Computer.py | UTF-8 | 7,058 | 3.34375 | 3 | [] | no_license | import enum
class OpCode(enum.Enum):
ADD = 1
MULTIPLY = 2
INPUT = 3
OUTPUT = 4
JUMP_IF_TRUE = 5
JUMP_IF_FALSE = 6
LESS_THAN = 7
EQUALS = 8
ADJUST_RELATIVE_BASE = 9
HALT = 99
class ParameterMode(enum.Enum):
POSITION = 0
IMMEDIATE = 1
RELATIVE = 2
class instruction:
def __init__(self, instruction):
text = str(instruction)
self.opCode = OpCode(int(text[-2:]))
if len(text) < 3:
self.parameter1Mode = ParameterMode.POSITION
else:
self.parameter1Mode = ParameterMode(int(text[-3]))
if len(text) < 4:
self.parameter2Mode = ParameterMode.POSITION
else:
self.parameter2Mode = ParameterMode(int(text[-4]))
if len(text) < 5:
self.parameter3Mode = ParameterMode.POSITION
else:
self.parameter3Mode = ParameterMode(int(text[-5]))
class Computer:
def __init__(self, memory):
self.memory = memory
self.relativeBase = 0
self.resuming = True
def read_value_for_parameter_one(self, command, instructionPointer):
pointer = self.address(instructionPointer + 1, command.parameter1Mode)
if pointer in self.memory:
return self.memory[pointer]
else:
return 0
def read_value_for_parameter_two(self, command, instructionPointer):
pointer = self.address(instructionPointer + 2, command.parameter2Mode)
if pointer in self.memory:
return self.memory[pointer]
else:
return 0
def read_address_for_parameter_one(self, command, instructionPointer):
if command.parameter1Mode == ParameterMode.POSITION:
return self.memory[instructionPointer + 1]
elif command.parameter1Mode == ParameterMode.RELATIVE:
return self.memory[instructionPointer + 1] + self.relativeBase
else:
raise Exception(f"parameterMode must be POSITION or RELATIVE. Not {command.parameter3Mode}.")
def read_address_for_parameter_three(self, command, instructionPointer):
if command.parameter3Mode == ParameterMode.POSITION:
return self.memory[instructionPointer + 3]
elif command.parameter3Mode == ParameterMode.RELATIVE:
return self.memory[instructionPointer + 3] + self.relativeBase
else:
raise Exception(f"parameterMode must be POSITION or RELATIVE. Not {command.parameter3Mode}.")
def address(self, addressOrValue, parameterMode):
if parameterMode == ParameterMode.POSITION:
return self.memory[addressOrValue]
elif parameterMode == ParameterMode.IMMEDIATE:
return addressOrValue
elif parameterMode == ParameterMode.RELATIVE:
return self.memory[addressOrValue] + self.relativeBase
else:
raise Exception(f"parameterMode must be POSITION, IMMEDIATE or RELATIVE. Not {parameterMode}.")
def run_program(self, inputFunction, outputFunction, instructionPointer = 0):
while True:
command = instruction(self.memory[instructionPointer])
if command.opCode == OpCode.ADD:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
parameter2 = self.read_value_for_parameter_two(command, instructionPointer)
parameter3 = self.read_address_for_parameter_three(command, instructionPointer)
self.memory[parameter3] = parameter1 + parameter2
instructionPointer += 4
elif command.opCode == OpCode.MULTIPLY:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
parameter2 = self.read_value_for_parameter_two(command, instructionPointer)
parameter3 = self.read_address_for_parameter_three(command, instructionPointer)
self.memory[parameter3] = parameter1 * parameter2
instructionPointer += 4
elif command.opCode == OpCode.INPUT:
if self.resuming:
parameter1 = self.read_address_for_parameter_one(command, instructionPointer)
self.memory[parameter1] = inputFunction()
instructionPointer += 2
self.resuming = False
else:
self.resuming = True
return instructionPointer
elif command.opCode == OpCode.OUTPUT:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
outputFunction(parameter1)
instructionPointer += 2
elif command.opCode == OpCode.JUMP_IF_TRUE:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
parameter2 = self.read_value_for_parameter_two(command, instructionPointer)
if parameter1 != 0:
instructionPointer = parameter2
else:
instructionPointer += 3
elif command.opCode == OpCode.JUMP_IF_FALSE:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
parameter2 = self.read_value_for_parameter_two(command, instructionPointer)
if parameter1 == 0:
instructionPointer = parameter2
else:
instructionPointer += 3
elif command.opCode == OpCode.LESS_THAN:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
parameter2 = self.read_value_for_parameter_two(command, instructionPointer)
parameter3 = self.read_address_for_parameter_three(command, instructionPointer)
if parameter1 < parameter2:
self.memory[parameter3] = 1
else:
self.memory[parameter3] = 0
instructionPointer += 4
elif command.opCode == OpCode.EQUALS:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
parameter2 = self.read_value_for_parameter_two(command, instructionPointer)
parameter3 = self.read_address_for_parameter_three(command, instructionPointer)
if parameter1 == parameter2:
self.memory[parameter3] = 1
else:
self.memory[parameter3] = 0
instructionPointer += 4
elif command.opCode == OpCode.ADJUST_RELATIVE_BASE:
parameter1 = self.read_value_for_parameter_one(command, instructionPointer)
self.relativeBase += parameter1
instructionPointer += 2
elif command.opCode == OpCode.HALT:
break
else:
print(f"Error {self.memory[instructionPointer]}") | true |
4125a7c4487d9be3de43ac60f3c00f9f04bca17b | Python | Blake2912/30DaysOfCode | /Day3/TuplesRevision.py | UTF-8 | 252 | 3.6875 | 4 | [] | no_license | # Here in this section I couldn't find any interesting project to implement
# So just i am using comments tuples are immutable
subject = ("Physics", "Chemistry", "Mathematics", "Economics")
for i in subject:
print(i, end=" ")
print()
print(subject) | true |
32f1935d358d28421fa31aca30ea9e4f58870f7f | Python | samskeller/GDPWebApp | /createInsertStatements.py | UTF-8 | 1,068 | 3.171875 | 3 | [] | no_license | ''' createInsertStatements.py
Justin Norden, Cole Stephan, Sam Keller
A script for converting some GDP data into SQL for use in MySQL.
'''
import sys
import csv
makeTables = '''DROP TABLE IF EXISTS gdpData;
CREATE TABLE gdpData (
fip text,
industryString text,
yearString text,
gdpString text
);
'''
print makeTables
reader = csv.reader(open(sys.argv[1]))
titleRow = reader.next()[4:]
states = {}
for row in reader:
row = map(str.strip, row)
# this got rid of our information on sub industries (spaces indicated a sub ind)
states[row[0],row[2]] = row[1:]
overallStates = []
for state in states:
for k in range(len(titleRow)):
fip = state[0]
stateRow = states[state]
stateName = stateRow[0]
yearString = titleRow[k]
gdpString = stateRow[k+3]
industryString = stateRow[2]
queryGDPData = "INSERT INTO gdpData (fip, industryString, yearString, gdpString)"
queryGDPData += " VALUES ('%s', '%s', '%s', '%s');" % (fip, industryString, yearString, gdpString)
print queryGDPData | true |
f7ee6a2b5bd9cd032f3cd578a4507490cef390e7 | Python | sachinaybhujbal/Python | /Assignment 1/Check_even_odd.py | UTF-8 | 164 | 3.75 | 4 | [] | no_license | def ChkNum(no):
a = no % 2;
if a == 0:
print("Even Number");
else:
print("Odd Number");
print("Enter a number to check");
no = int(input());
ChkNum(no);
| true |
83cd46d7dc3b9350b2907eca98711a3cf959e20a | Python | JDavid121/Script-Curso-Cisco-Python | /208 errors failures.py | UTF-8 | 527 | 4.0625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 14:09:24 2020
errors and exception handle
@author: David
"""
# program that handle a various types of exceptions
try:
x = int(input("Enter a number: "))
y = 1 / x
print(y)
except ZeroDivisionError: # Divide by zero error
print("You cannot divide by zero, sorry.")
except ValueError: # Value entered must be a number.
print("You must enter an integer value.")
except:
print("Oh dear, something went wrong...")
print("THE END.")
| true |
ef732e9b873c649d26465b8d4d83db66c4e9a5b1 | Python | NicolaiFinstadLarsen/Hackerrank | /2.Basic Data Types/Hackerrank oppgave 4(Finding the percentage).py | UTF-8 | 321 | 3.109375 | 3 | [] | no_license | if __name__ == '__main__':
for i in range(int(input())):
name = input("Name: ")
score = int(float(input("Score: ")))
name.append(score)
print(python_students)
#stud_and_grade.sort()
#print(stud_and_grade[0])
| true |
74d735bc9a99566510b3d7f6aa17eee9b0b1a12a | Python | zhulf0804/Coding.Python | /leetcode/371_两整数之和.py | UTF-8 | 296 | 3.28125 | 3 | [] | no_license | class Solution:
def getSum(self, a: int, b: int) -> int:
carry = (a & b) << 1
cur = a ^ b
ind = 1
while carry:
a, b = cur, carry
carry = (a & b) << 1
cur = a ^ b
return cur
a, b = -1, 1
s = Solution()
s.getSum(a, b) | true |
bfd97815436bd58daaed9e221558b1cff86e25da | Python | ericdill/conda-lock | /conda_lock/src_parser/lockfile.py | UTF-8 | 3,248 | 2.515625 | 3 | [
"BSD-3-Clause",
"MIT"
] | permissive | import json
import pathlib
from textwrap import dedent
import yaml
from . import Lockfile
def parse_conda_lock_file(
path: pathlib.Path,
) -> Lockfile:
if not path.exists():
raise FileNotFoundError(f"{path} not found")
with path.open() as f:
content = yaml.safe_load(f)
version = content.pop("version", None)
if not (isinstance(version, int) and version <= Lockfile.version):
raise ValueError(f"{path} has unknown version {version}")
return Lockfile.parse_obj(content)
def write_conda_lock_file(
content: Lockfile, path: pathlib.Path, include_help_text: bool = True
) -> None:
content.toposort_inplace()
with path.open("w") as f:
if include_help_text:
categories = set(p.category for p in content.package)
def write_section(text: str) -> None:
lines = dedent(text).split("\n")
for idx, line in enumerate(lines):
if (idx == 0 or idx == len(lines) - 1) and len(line) == 0:
continue
print(("# " + line).rstrip(), file=f)
write_section(
f"""
This lock file was generated by conda-lock (https://github.com/conda-incubator/conda-lock). DO NOT EDIT!
A "lock file" contains a concrete list of package versions (with checksums) to be installed. Unlike
e.g. `conda env create`, the resulting environment will not change as new package versions become
available, unless you explicitly update the lock file.
Install this environment as "YOURENV" with:
conda-lock install -n YOURENV --file {path.name}
"""
)
if "dev" in categories:
write_section(
f"""
This lock contains optional development dependencies. Include them in the installed environment with:
conda-lock install --dev-dependencies -n YOURENV --file {path.name}
"""
)
extras = sorted(categories.difference({"main", "dev"}))
if extras:
write_section(
f"""
This lock contains optional dependency categories {', '.join(extras)}. Include them in the installed environment with:
conda-lock install {' '.join('-e '+extra for extra in extras)} -n YOURENV --file {path.name}
"""
)
write_section(
f"""
To update a single package to the latest version compatible with the version constraints in the source:
conda-lock lock --lockfile {path.name} --update PACKAGE
To re-solve the entire environment, e.g. after changing a version constraint in the source file:
conda-lock {' '.join('-f '+path for path in content.metadata.sources)} --lockfile {path.name}
"""
)
yaml.dump(
{
"version": Lockfile.version,
**json.loads(content.json(by_alias=True, exclude_unset=True)),
},
f,
)
| true |
0fea6b2bdef9235ab1c3c5c7bd9d51d8b2dd0460 | Python | gustavodfaguiar/learning-python | /py-tricks/how_to_sort_a_python_dict_by_value.py | UTF-8 | 335 | 4.09375 | 4 | [] | no_license | # How to sort a Python dict by value
# (== get a representation sorted by value)
list_order = {'a': 4, 'b': 3, 'c': 2, 'd': 1}
order_list = sorted(list_order.items(), key=lambda value: value[1])
print(order_list)
# OR
import operator
order_list_two = sorted(list_order.items(), key=operator.itemgetter(1))
print(order_list_two)
| true |
21e0662d302a735e5e3cf87ae771271415f7dabb | Python | ZhaoKaidii/leetcode-my-solutions- | /#7. Reverse Integer.py | UTF-8 | 189 | 2.671875 | 3 | [] | no_license | class Solution:
def reverse(self, x: int) -> int:
sig=1 if x>0 else -1
s=str(abs(x))
ans=int(s[::-1])*sig
return ans if ans in range(-2**31,2**31) else
| true |
13dea7044299edb30c2f55f3e5231376b53cd5f3 | Python | jblairkiel/UA-CS-150 | /exam1/ichi/maxswap.py | UTF-8 | 884 | 3.96875 | 4 | [] | no_license | def main():
arr1 = [0,0,0]
print("Initial state of array: ",arr1)
i1 = eval(input("What is the first number? "))
i2 = eval(input("What is the second number? "))
i3 = eval(input("What is the third number? "))
arr2 = [i1,i2,i3]
print("State of array after input = ", arr2)
if (i1>i2 and i1>i3):
print("The largest number is", i1," and at location 1")
maxIndex = i1
print("State of array after swap: " [i1,i2,i3])
elif (i2>i3):
print("The largest number is", i2," and at location 1")
maxIndex = i2
i2 = i1
i1 = maxIndex
print("State of array after swap: ", [i1,i2,i3])
else:
print("The largest number is", i3," and at location 1")
maxIndex = i3
i3 = i1
i1 = maxIndex
print("State of array after swap: ", [i1,i2,i3])
main()
| true |
66135e6640096522a78328efa5e07b15eb691c9f | Python | sunmory/bert_text_classification | /bert/my_data_loader.py | UTF-8 | 7,242 | 2.859375 | 3 | [] | no_license | # encoding: utf-8
import os
import csv
import collections
import pandas as pd
import tensorflow as tf
from bert import tokenization
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class MentionProcessor(DataProcessor):
def __init__(self):
pass
def get_examples(self, data_path, label_path):
"""
read sample for original dataset and store in class InputExample
:param data_path:
:param label_path:
:return:
"""
examples = []
dataframe = pd.read_csv(data_path, encoding='utf-8', index_col='id')
labelframe = pd.read_csv(label_path, encoding='utf-8', index_col='id')
for i in range(dataframe.shape[0]):
text_id = dataframe.index[i]
text = dataframe.loc[text_id, 'title'] + dataframe.loc[text_id, 'content']
# text = text[:max_seq_length]
guid = "train-%d" % (i)
text = tokenization.convert_to_unicode(text)
label = labelframe.loc[text_id, 'label']
examples.append(
InputExample(guid=guid, text_a=text, text_b=None, label=label)
)
return examples
def convert_single_example(self, ex_index, example, label_list, max_seq_length, tokenizer):
token_a = tokenizer.tokenize(example.text_a)
token_b = None
if example.text_b is not None:
token_b = tokenizer.tokenize(example.text_a)
if token_b:
self._truncate_seq_pair(token_a, token_b, max_seq_length - 3)
else:
if len(token_a) > max_seq_length - 2:
token_a = token_a[:max_seq_length - 2]
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in token_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if token_b:
for token in token_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_masks = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_masks.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_masks) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = example.label
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_masks]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(input_ids=input_ids, input_mask=input_masks, segment_ids=segment_ids, label_id=int(label_id))
return feature
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def file_based_convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer, output_file):
writer = tf.python_io.TFRecordWriter(output_file)
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = self.convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
rd_feature = collections.OrderedDict()
rd_feature['input_ids'] = create_int_feature(feature.input_ids)
rd_feature['input_mask'] = create_int_feature(feature.input_mask)
rd_feature['segment_ids'] = create_int_feature(feature.segment_ids)
rd_feature['label_ids'] = create_int_feature([feature.label_id])
rd_example = tf.train.Example(features=tf.train.Features(feature=rd_feature))
writer.write(rd_example.SerializeToString())
| true |
576830303c952eb95b61836366a78295a94b6d27 | Python | humorbeing/python_github | /evolutionary_algorithms/course/chapter 02/mu+lambda-EA_V1003.py | UTF-8 | 2,988 | 2.90625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from time import sleep
plt.ion()
def function_f(x):
return np.sin(10*np.pi*x)*x+2.0
mu = 20
laaambda = 20 # 'lambda' is conflicting with python's function.
sigma = 0.5
maxgen = 500
min_x = -1
max_x = 2
range_x = abs(max_x-min_x)
generation = []
fitness = []
representation = []
mating_pool = []
def generation_initialization():
new_generation = np.random.uniform(size=mu)
new_generation = new_generation*range_x+min_x
return np.array(new_generation)
def generation_representation():
representations = generation
return np.array(representations)
def generation_evaluation():
fit = []
for individual in representation:
fit.append((function_f(individual), individual))
return np.array(fit)
def parent_selection():
pool = []
for i in range(laaambda):
x1 = np.random.randint(0, mu)
while True:
x2 = np.random.randint(0, mu)
if x2 != x1:
break
pool.append(x1)
pool.append(x2)
return np.array(pool)
def lambda_variation_operation():
lambda_generation = []
for i in range(laaambda):
x = (generation[mating_pool[i*2]]+generation[mating_pool[(i*2)+1]])/2.0
x = x + sigma*np.random.normal()
x = fix_x(x)
lambda_generation.append(x)
return np.array(lambda_generation)
def pick_new_generation(gen_fit):
new_gen = []
for i in range(mu):
n = np.argmax(gen_fit[0])
new_gen.append(gen_fit[1][n])
gen_fit = np.delete(gen_fit, n, 1)
return np.array(new_gen)
def fix_x(x):
if x < min_x:
return min_x
elif x > max_x:
return max_x
else:
return x
def draw_reps(lins_in, canv_in, reps):
if manual:
input('Press any key to continue.')
else:
sleep(sleeping)
# lins_in, = ax_in.plot([], [], 'ro')
lins_in.set_xdata(reps)
fit = []
for i in reps:
fit.append(function_f(i))
lins_in.set_ydata(fit)
canv_in.draw()
canv_in.flush_events()
manual = False
sleeping = 0.1
x = np.arange(min_x, max_x, 0.001)
y = function_f(x)
figure, ax = plt.subplots()
ax.plot(x, y, lw=2)
bluelines, = ax.plot([],[], 'bo')
redlines, = ax.plot([], [], 'ro')
figure.canvas.draw()
figure.canvas.flush_events()
generation = generation_initialization()
for _ in range(maxgen):
draw_reps(redlines, figure.canvas, generation)
mating_pool = parent_selection()
new_gen = lambda_variation_operation()
generation = np.concatenate((generation, new_gen))
representation = generation_representation()
draw_reps(bluelines, figure.canvas, generation)
fitness = generation_evaluation().T
generation = pick_new_generation(fitness)
# plt.ioff()
# plt.gcf().clear()
# plt.plot(best_fitness)
# plt.ylim(0, 4)
plt.show()
| true |
fe92017babffb6270a5b98ec570602323661f578 | Python | marthinajonsson/pythonCourse | /python_160615/05-bounding-box.py | UTF-8 | 620 | 3.421875 | 3 | [] | no_license | import unittest
def boundingBox(points):
xs = [p[0] for p in points]
ys = [p[1] for p in points]
left = min(xs)
top = max(ys)
right = max(xs)
bottom = min(ys)
return left, top, right, bottom
class test(unittest.TestCase):
def test_bunding_box(self):
polygon = [(0, 4), (3, 2), (2, -2), (-2, -2), (-3, 2)]
left, top, right, bottom = boundingBox(polygon)
self.assertEqual(left, -3)
self.assertEqual(top, 4)
self.assertEqual(right, 3)
self.assertEqual(bottom, -2)
if __name__ == '__main__':
unittest.main() | true |
44d395d0451d2610fdb6f7cd37983bc0ad099504 | Python | ISISComputingGroup/JSON_bourne | /tests/test_data_source_reader.py | UTF-8 | 2,835 | 2.984375 | 3 | [] | no_license | import unittest
from mock import MagicMock, patch
from external_webpage.data_source_reader import DataSourceReader
from hamcrest import *
import zlib
import binascii
def patch_page_contents(request_response, json):
page = MagicMock()
page.content = json
request_response.return_value = page
def compress_and_hex(value):
"""Compresses the inputted string and encodes it as hex.
Args:
value (str): The string to be compressed
Returns:
bytes : A compressed and hexed version of the inputted string
"""
assert type(value) == str, \
"Non-str argument passed to compress_and_hex, maybe Python 2/3 compatibility issue\n" \
"Argument was type {} with value {}".format(value.__class__.__name__, value)
compr = zlib.compress(bytes(value, "utf-8"))
return binascii.hexlify(compr)
class TestDataSourceReader(unittest.TestCase):
def setUp(self):
self.reader = DataSourceReader("HOST", "PREFIX")
@patch("requests.get")
@patch("external_webpage.data_source_reader.caget")
def test_GIVEN_JSON_with_single_quotes_WHEN_read_THEN_conversion_successful(self, caget, request_response):
patch_page_contents(request_response, b"{'data': 'some_data'}")
json_object = self.reader.read_config()
assert_that(json_object, is_({"data": "some_data"}))
@patch("requests.get")
@patch("external_webpage.data_source_reader.caget")
def test_GIVEN_JSON_with_None_WHEN_read_THEN_conversion_successful(self, caget, request_response):
patch_page_contents(request_response, b'{"data": None}')
json_object = self.reader.read_config()
assert_that(json_object, is_({"data": None}))
@patch("requests.get")
@patch("external_webpage.data_source_reader.caget")
def test_GIVEN_JSON_with_True_WHEN_read_THEN_conversion_successful(self, caget, request_response):
patch_page_contents(request_response, b'{"data": True}')
json_object = self.reader.read_config()
assert_that(json_object, is_({"data": True}))
@patch("requests.get")
@patch("external_webpage.data_source_reader.caget")
def test_GIVEN_JSON_with_False_WHEN_read_THEN_conversion_successful(self, caget, request_response):
patch_page_contents(request_response, b'{"data": False}')
json_object = self.reader.read_config()
assert_that(json_object, is_({"data": False}))
@patch("requests.get")
@patch("external_webpage.data_source_reader.caget")
def test_GIVEN_valid_config_from_caget_WHEN_read_THEN_webserver_is_not_tried(self, caget, request_response):
caget.return_value = compress_and_hex('{"data": false}')
json_object = self.reader.read_config()
assert_that(json_object, is_({"data": False}))
request_response.assert_not_called()
| true |
ee431c5bb80ebc57d8a6e21195313736413a1b9b | Python | nwannaji/GIT-FOLDER | /Face-Recognition-Attendance-System/env/Lib/site-packages/cssutils/tests/test_x.py | UTF-8 | 1,630 | 2.625 | 3 | [
"MIT"
] | permissive | """Testcases for cssutils.css.CSSValue and CSSPrimitiveValue."""
import xml.dom
import pytest
import cssutils
class XTestCase:
def setup(self):
cssutils.ser.prefs.useDefaults()
def teardown(self):
cssutils.ser.prefs.useDefaults()
@pytest.mark.xfail(reason="not implemented")
def test_priority(self):
"Property.priority"
s = cssutils.parseString('a { color: red }')
assert s.cssText == b'a {\n color: red\n }'
assert '' == s.cssRules[0].style.getPropertyPriority('color')
s = cssutils.parseString('a { color: red !important }')
assert 'a {\n color: red !important\n }' == s.cssText
assert 'important' == s.cssRules[0].style.getPropertyPriority('color')
cssutils.log.raiseExceptions = True
p = cssutils.css.Property('color', 'red', '')
assert p.priority == ''
p = cssutils.css.Property('color', 'red', '!important')
assert p.priority == 'important'
with pytest.raises(xml.dom.SyntaxErr):
cssutils.css.Property('color', 'red', 'x')
cssutils.log.raiseExceptions = False
p = cssutils.css.Property('color', 'red', '!x')
assert p.priority == 'x'
p = cssutils.css.Property('color', 'red', '!x')
assert p.priority == 'x'
cssutils.log.raiseExceptions = True
# invalid but kept!
# cssutils.log.raiseExceptions = False
s = cssutils.parseString('a { color: red !x }')
assert 'a {\n color: red !x\n }' == s.cssText
assert 'x' == s.cssRules[0].style.getPropertyPriority('color')
| true |
27e0019a23db38d1eca6f19b51e629813b8a93f5 | Python | jcborras/withdb | /test/test_psql.py | UTF-8 | 8,057 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8; mode: python; -*-
from datetime import datetime, timedelta, timezone
from io import StringIO
from json import load
from logging import Formatter, StreamHandler, getLogger, getLevelName
from os.path import expanduser
from unittest import TestCase, expectedFailure, main, skip
from pytz import timezone
from withdb import factory, run_select
from withdb.psql import PostgreSQLconnection
FMT = '%(asctime)s %(name)s %(filename)s:%(lineno)d '
FMT += '%(levelname)s:%(levelno)s %(funcName)s: %(message)s'
def logger_output_for(logger_name, logging_level):
"""Returns a StringIO that holds what logger_name will log for a given
logging level"""
logger = getLogger(logger_name)
logger.setLevel(getLevelName(logging_level))
iostr = StringIO()
sh = StreamHandler(iostr)
sh.setLevel(getLevelName(logging_level))
sh.setFormatter(Formatter(FMT))
logger.addHandler(sh)
return iostr
class TestDrive(TestCase):
CFG_FILE = expanduser('~/.withdb_test_psql.cfg')
def setUp(self):
with open(self.CFG_FILE, 'r') as f:
self.cfg = load(f)
def test_00_psql(self):
self.assertIsNotNone(PostgreSQLconnection('foo'))
def test_10_open_config_file(self):
self.assertIsNotNone(self.cfg)
self.assertEqual(self.cfg['type'], 'psql')
def test_20_factory_psql_1(self):
self.assertIsNotNone(factory(self.cfg))
self.assertIsInstance(factory(self.cfg), PostgreSQLconnection)
def test_30_factory_psql_and_select(self):
"""Check that a SELECT query runs fine"""
self.assertIsInstance(factory(self.cfg), PostgreSQLconnection)
with factory(self.cfg) as conn:
qry = "SELECT * FROM information_schema.attributes;"
rows, cols = conn(qry)
self.assertEqual(len(rows), 0)
self.assertIn('attribute_name', cols)
self.assertIn('scope_catalog', cols)
def test_31_factory_psql_and_select_is_logged(self):
"""Check that a SELECT query runs fine and logged as DEBUG"""
iostr = logger_output_for(logger_name='withdb', logging_level='DEBUG')
qry = "SELECT * FROM information_schema.attributes;"
lod, colnames = run_select(self.cfg, qry)
self.assertEqual(len(lod), 0)
self.assertIn('attribute_name', colnames)
self.assertIn('scope_catalog', colnames)
_ = iostr.getvalue()
_ = [i for i in _.splitlines() if 'DEBUG' in i and 'run_select' in i]
r = 'withdb __init__.py:\\d+ DEBUG:\\d+ run_select: SELECT'
self.assertRegex(_[0], r)
self.assertIn(qry, _[0])
def test_32_factory_psql_and_select_logs_info(self):
"""Check that a SELECT query runs fine and logged as DEBUG"""
iostr = logger_output_for(logger_name='withdb', logging_level='INFO')
qry = "SELECT * FROM information_schema.attributes;"
lod, colnames = run_select(self.cfg, qry)
self.assertEqual(len(lod), 0)
self.assertIn('attribute_name', colnames)
self.assertIn('scope_catalog', colnames)
_ = iostr.getvalue()
_ = [i for i in _.splitlines() if 'INFO' in i and 'run_select' in i]
__ = 'withdb __init__.py:\\d+ INFO:\\d+ run_select:\\s'
__ += 'SELECT query completed after'
self.assertRegex(_[0], __)
def test_40_create_table(self):
with factory(self.cfg) as conn:
create_qry = """
CREATE TABLE on_timezones (
entry_id INT NOT NULL PRIMARY KEY,
tz_at_insert VARCHAR(24) DEFAULT CURRENT_SETTING('TIMEZONE'),
ts_without_tz timestamp without time zone,
ts_with_tz timestamp with time zone
);"""
conn(create_qry)
with factory(self.cfg) as conn:
rows, cols = conn("SELECT COUNT(*) AS row_count FROM on_timezones")
self.assertEqual(rows[0], (0,))
def test_50_drop_table(self):
with factory(self.cfg) as conn:
conn("DROP TABLE IF EXISTS on_timezones")
with factory(self.cfg) as conn:
self.assertRaises(
RuntimeError, conn,
"SELECT COUNT(*) AS row_count FROM on_timezones")
def test_60_load_lod(self):
utc_tz = timezone('UTC')
hel_tz = timezone('Europe/Helsinki')
cph_tz = timezone('Europe/Copenhagen')
NOW = datetime.utcnow()
# timezone.localize() just adds a timezone to a datetime w/o tz
lod = [
{
'entry_id': 101,
'tz_at_insert': 'Etc/UTC',
'ts_without_tz': utc_tz.localize(NOW),
'ts_with_tz': utc_tz.localize(NOW),
},
{
'entry_id': 102,
'tz_at_insert': 'Europe/Helsinki',
'ts_without_tz': hel_tz.localize(NOW),
'ts_with_tz': hel_tz.localize(NOW),
},
{
'entry_id': 103,
'tz_at_insert': 'Europe/Copenhagen',
'ts_without_tz': cph_tz.localize(NOW),
'ts_with_tz': cph_tz.localize(NOW),
},
]
KEYS = ['entry_id', 'tz_at_insert', 'ts_without_tz', 'ts_with_tz']
for i in lod:
self.assertEqual(
i['ts_without_tz'], i['ts_with_tz'].astimezone(utc_tz))
with factory(self.cfg) as conn:
create_qry = """
CREATE TABLE on_timezones (
entry_id INT NOT NULL PRIMARY KEY,
tz_at_insert VARCHAR(24) DEFAULT CURRENT_SETTING('TIMEZONE'),
ts_without_tz timestamp without time zone,
ts_with_tz timestamp with time zone
);"""
conn(create_qry)
with factory(self.cfg) as conn:
rows, cols = conn("SELECT COUNT(*) AS row_count FROM on_timezones")
self.assertEqual(rows[0], (0,))
with factory(self.cfg) as conn:
_ = ['entry_id', 'tz_at_insert', 'ts_without_tz', 'ts_with_tz']
conn.bulkload_lod(
lod, _, 'on_timezones', self.cfg['tmp_dir'] + '/test_psql_')
with factory(self.cfg) as conn:
rows, cols = conn("SELECT * FROM on_timezones")
self.assertEqual(len(rows), 3)
_ = [dict(zip(cols, i)) for i in rows]
for i in _:
_ = i['ts_with_tz'].astimezone(timezone(i['tz_at_insert']))
self.assertEqual(i['ts_without_tz'], _.replace(tzinfo=None))
with factory(self.cfg) as conn:
conn("DROP TABLE IF EXISTS on_timezones")
def test_61_logging_at_load_lod(self):
iostr = logger_output_for(logger_name='withdb', logging_level='DEBUG')
self.test_60_load_lod()
_ = iostr.getvalue()
_ = [i for i in _.splitlines() if 'withdb' in i and 'bulkload' in i]
self.assertRegex(_[0], 'COPY\\s\\w+\\sFROM')
def test_70_logs_when_calling_nrow(self):
"""Check that the nrows() method logs messages"""
iostr = logger_output_for(logger_name='withdb', logging_level='DEBUG')
with factory(self.cfg) as conn:
n = conn.nrows("information_schema.attributes")
_ = iostr.getvalue()
self.assertIn('withdb', _, 'Missing "withdb.base" logger')
_ = [i for i in _.splitlines() if 'INFO' in i and 'nrows' in i]
self.assertEqual(len(_), 1)
__ = 'withdb\\s\\w+.py:\\d+ INFO:\\d+ nrows: Row count:\\s'
self.assertRegex(_[0], __)
def test_80_bad_sql(self):
"Nicer report when bad SQL is used"
iostr = logger_output_for(logger_name='withdb', logging_level='DEBUG')
with factory(self.cfg) as conn:
self.assertRaises(RuntimeError, conn, 'ASELECT * FROM foobar')
_ = iostr.getvalue()
_ = [i for i in _.splitlines() if 'ERROR' in i]
self.assertTrue(len([i for i in _ if 'message_primary' in i]) > 0)
self.assertTrue(len([i for i in _ if 'sqlstate' in i]) > 0)
if __name__ == '__main__':
main()
| true |
ca63b7f5e89df991cd8f5f9c6c5676063bd01142 | Python | sekakpuC/hanoi_tower_simulator | /test_hanoi_tower_simulator_v1.py | UTF-8 | 434 | 2.53125 | 3 | [] | no_license | from unittest import TestCase
from hanoi_tower_simulator_v1 import get_extra_bar
class Test(TestCase):
def test_get_extra_bar(self):
self.assertEqual(1, get_extra_bar(0, 2))
self.assertEqual(1, get_extra_bar(2, 0))
self.assertEqual(2, get_extra_bar(0, 1))
self.assertEqual(2, get_extra_bar(1, 0))
self.assertEqual(0, get_extra_bar(1, 2))
self.assertEqual(0, get_extra_bar(2, 1))
| true |
848f8ef718cb86e2d522f202a26eed48c338bc2c | Python | dhruvsharma1999/data-structures-algorithms | /hakerrank-ds/array-ds.py | UTF-8 | 1,328 | 3.78125 | 4 | [
"MIT"
] | permissive | """
An array is a type of data structure that stores elements of the same type in a contiguous block of memory. In an array, , of size , each memory location has some unique index, (where ), that can be referenced as (you may also see it written as ).
Given an array, , of integers, print each element in reverse order as a single line of space-separated integers.
Note: If you've already solved our C++ domain's Arrays Introduction challenge, you may want to skip this.
Input Format
The first line contains an integer, (the number of integers in ).
The second line contains space-separated integers describing .
Constraints
Output Format
Print all integers in in reverse order as a single line of space-separated integers.
Sample Input 1
CopyDownload
Array: arr
1
4
3
2
4
1 4 3 2
Sample Output 1
2 3 4 1"""
import math
import os
import random
import re
import sys
# Complete the reverseArray function below.
def reverseArray(a):
temp = []
size = len(a)
for i in range(size-1, -1, -1):
temp.append(a[i])
return temp
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input())
arr = list(map(int, input().rstrip().split()))
res = reverseArray(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close()
| true |
00a6c916c8a86f15aae2ccdb4aedf7877c0f9dbc | Python | onurozdem/udacity_data_engineer_project4 | /etl.py | UTF-8 | 6,357 | 2.625 | 3 | [] | no_license | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config["aws"]['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config["aws"]['AWS_SECRET_ACCESS_KEY']
#os.environ['AWS_REGION']=config["aws"]['AWS_REGION']
def create_spark_session():
"""
This function create Spark Session for usage of Spark.
Parameters:
This function don't take any argument.
Returns:
SparkSession: Session of Spark env. connection
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
This function prepare songs and artist tables data and load to AWS S3 bucket on parquet format(without data duplication).
Parameters:
spark (SparkSession): Session of Spark env. connection
input_data (string): Description of arg1
output_data (string): Description of arg1
"""
# get filepath to song data file
song_data = "{}/song_data/*/*/*".format(input_data)
# read song data file
df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = df.select(["song_id", "title", "artist_id", "year", "duration"]).dropDuplicates()
songs_table = songs_table.dropDuplicates()
# write songs table to parquet files partitioned by year and artist
songs_table.write.parquet(os.path.join(output_data, "songs_table/"), mode = "overwrite", partitionBy=["year", "artist_id"])
# extract columns to create artists table
artists_table = df.select(["artist_id", "name", "location", "lattitude", "longitude"])
artists_table = artists_table.dropDuplicates(['artist_id'])
# write artists table to parquet files
artists_table.write.parquet(os.path.join(output_data, "artists_table/"), mode = "overwrite")
def process_log_data(spark, input_data, output_data):
"""
This function prepare users, time and songplays tables data and load to AWS S3 bucket on parquet format(without data duplication).
Parameters:
spark (SparkSession): Session of Spark env. connection
input_data (string): Description of arg1
output_data (string): Description of arg1
"""
# get filepath to log data file
log_data = "{}/log-data/*".format(input_data)
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df.filter(df.page == 'NextSong')
# extract columns for users table
users_table = df.select(["user_id", "first_name", "last_name", "gender", "level"])
users_table = users_table.dropDuplicates(['user_id'])
# write users table to parquet files
users_table.write.parquet(os.path.join(output_data, "users_table/"), mode = "overwrite")
# create timestamp column from original timestamp column
get_timestamp = udf(lambda x: str(int(int(x)/1000)))
df = df.withColumn('timestamp', get_timestamp(df.ts))
# create datetime column from original timestamp column
get_datetime = udf(lambda x: str(datetime.fromtimestamp(int(x) / 1000)))
df = df.withColumn('datetime', get_datetime(df.ts))
# extract columns to create time table
time_table = df.select('datetime').withColumn('start_time', actions_df.datetime) \
.withColumn('hour', hour('datetime')) \
.withColumn('day', dayofmonth('datetime')) \
.withColumn('week', weekofyear('datetime')) \
.withColumn('month', month('datetime')) \
.withColumn('year', year('datetime')) \
.withColumn('weekday', dayofweek('datetime'))
time_table = time_table.dropDuplicates()
# write time table to parquet files partitioned by year and month
time_table.write.parquet(os.path.join(output_data, "time_table/"), mode = "overwrite", partitionBy=["year", "month"])
# read in song data to use for songplays table
song_data = "{}/song_data/*/*/*".format(input_data)
song_df = spark.read.json(song_data)
# extract columns from joined song and log datasets to create songplays table
songplays_table = df.alias('logs').join(song_df.alias('songs'),col('log.artist') == col('songs.artist_name'))
#songplay_id, start_time, user_id, level, song_id, artist_id, session_id, location, user_agent
songplays_table = songplays_table.select(col('log_df.datetime').alias('start_time'),
col('log_df.userId').alias('user_id'),
col('log_df.level').alias('level'),
col('song_df.song_id').alias('song_id'),
col('song_df.artist_id').alias('artist_id'),
col('log_df.sessionId').alias('session_id'),
col('log_df.location').alias('location'),
col('log_df.userAgent').alias('user_agent'),
year('log_df.datetime').alias('year'),
month('log_df.datetime').alias('month')).withColumn('songplay_id', monotonically_increasing_id())
# write songplays table to parquet files partitioned by year and month
songplays_table.write.parquet(os.path.join(output_data, "songplays_table/"), mode = "overwrite", partitionBy=["year", "month"])
def main():
"""
This function is main block of etl process. All process execute sequentially. Don't take any argument and return value.
"""
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-dend/project4-out/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
| true |
c242e6fe47aa9ea044db8346197da01c63bda05e | Python | ElliottYan/Dialog | /restore_from_seq.py | UTF-8 | 1,038 | 2.84375 | 3 | [] | no_license | import codecs
import sys
from collections import defaultdict
def restore(f1, f2, out_f):
with codecs.open(f1, 'r', encoding='gb18030', errors='ignore') as f:
lines1 = f.readlines()
with codecs.open(f2, 'r', encoding='gb18030', errors='ignore') as f:
lines2 = f.readlines()
ret_dict = defaultdict(list)
count = 0
for ix in range(len(lines1)):
role, id = lines2[ix].strip().split('\t')
id = int(id)
if not ret_dict[id]:
if role != 'p':
count += 1
ret_dict[id].append(role+':'+lines1[ix].strip())
print("There are {} sessions don't start with p!".format(count))
with codecs.open(out_f, 'w', encoding='gb18030', errors='ignore') as f:
count = 0
for _, val in ret_dict.items():
count += 1
if count % 1000000 == 0:
print('Processed {} lines.'.format(count))
f.write("\t".join(val) + '\n')
f1 = sys.argv[1]
f2 = sys.argv[2]
out_f = sys.argv[3]
restore(f1, f2, out_f)
| true |
048ad6a7121184716a1e8e375c1bc87734c90764 | Python | GRSEB9S/HyperSpectral-10 | /HyperSpectral/04_model_training.py | UTF-8 | 4,967 | 3.828125 | 4 | [] | no_license |
# coding: utf-8
# # Training a machine learning model with scikit-learn
# *From the video series: [Introduction to machine learning with scikit-learn](https://github.com/justmarkham/scikit-learn-videos)*
# ## Agenda
#
# - What is the **K-nearest neighbors** classification model?
# - What are the four steps for **model training and prediction** in scikit-learn?
# - How can I apply this pattern to **other machine learning models**?
# ## Reviewing the iris dataset
# In[9]:
from IPython.display import IFrame
from sklearn.utils.multiclass import check_classification_targets, type_of_target
IFrame('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', width=300, height=200)
# - 150 **observations**
# - 4 **features** (sepal length, sepal width, petal length, petal width)
# - **Response** variable is the iris species
# - **Classification** problem since response is categorical
# - More information in the [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/datasets/Iris)
# ## K-nearest neighbors (KNN) classification
# 1. Pick a value for K.
# 2. Search for the K observations in the training data that are "nearest" to the measurements of the unknown iris.
# 3. Use the most popular response value from the K nearest neighbors as the predicted response value for the unknown iris.
# ### Example training data
#
# 
# ### KNN classification map (K=1)
#
# 
# ### KNN classification map (K=5)
#
# 
# *Image Credits: [Data3classes](http://commons.wikimedia.org/wiki/File:Data3classes.png#/media/File:Data3classes.png), [Map1NN](http://commons.wikimedia.org/wiki/File:Map1NN.png#/media/File:Map1NN.png), [Map5NN](http://commons.wikimedia.org/wiki/File:Map5NN.png#/media/File:Map5NN.png) by Agor153. Licensed under CC BY-SA 3.0*
# ## Loading the data
# In[10]:
# import load_iris function from datasets module
from sklearn.datasets import load_iris
# save "bunch" object containing iris dataset and its attributes
iris = load_iris()
# store feature matrix in "X"
X = iris.data
# store response vector in "y"
y = iris.target
print(type(y))
# In[11]:
# print the shapes of X and y
print(X.shape)
print(y.shape)
# ## scikit-learn 4-step modeling pattern
# **Step 1:** Import the class you plan to use
# In[12]:
from sklearn.neighbors import KNeighborsClassifier
# **Step 2:** "Instantiate" the "estimator"
#
# - "Estimator" is scikit-learn's term for model
# - "Instantiate" means "make an instance of"
# In[13]:
knn = KNeighborsClassifier(n_neighbors=1)
# - Name of the object does not matter
# - Can specify tuning parameters (aka "hyperparameters") during this step
# - All parameters not specified are set to their defaults
# In[16]:
print(knn)
# **Step 3:** Fit the model with data (aka "model training")
#
# - Model is learning the relationship between X and y
# - Occurs in-place
# In[26]:
print(type(y))
print(y)
print(type_of_target(y))
knn.fit(X, y)
# In[18]:
knn.predict([[3, 5, 4, 2]])
# - Returns a NumPy array
# - Can predict for multiple observations at once
# In[19]:
X_new = [[3, 5, 4, 2], [5, 4, 3, 2]]
knn.predict(X_new)
# ## Using a different value for K
# In[11]:
# instantiate the model (using the value K=5)
knn = KNeighborsClassifier(n_neighbors=5)
# fit the model with data
knn.fit(X, y)
# predict the response for new observations
knn.predict(X_new)
# ## Using a different classification model
# In[12]:
# import the class
from sklearn.linear_model import LogisticRegression
# instantiate the model (using the default parameters)
logreg = LogisticRegression()
# fit the model with data
logreg.fit(X, y)
# predict the response for new observations
logreg.predict(X_new)
# ## Resources
#
# - [Nearest Neighbors](http://scikit-learn.org/stable/modules/neighbors.html) (user guide), [KNeighborsClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html) (class documentation)
# - [Logistic Regression](http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression) (user guide), [LogisticRegression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) (class documentation)
# - [Videos from An Introduction to Statistical Learning](http://www.dataschool.io/15-hours-of-expert-machine-learning-videos/)
# - Classification Problems and K-Nearest Neighbors (Chapter 2)
# - Introduction to Classification (Chapter 4)
# - Logistic Regression and Maximum Likelihood (Chapter 4)
# ## Comments or Questions?
#
# - Email: <kevin@dataschool.io>
# - Website: http://dataschool.io
# - Twitter: [@justmarkham](https://twitter.com/justmarkham)
# In[1]:
# from IPython.core.display import HTML
# def css_styling():
# styles = open("styles/custom.css", "r").read()
# return HTML(styles)
# css_styling()
#
| true |
8a75389eac1acc15dfe8294a10e25826b8e76aab | Python | MameYongo/projet2-cartesbancaires | /banqu/AmericanCardVerificateur.py | UTF-8 | 425 | 2.953125 | 3 | [] | no_license | from banqu.AlgoDeLuhn import Luhn
from banqu.Verificateur import Verificateur
class AmericanCardVerificateur(Verificateur):
def VerifierCarte(self):
def VerifierCarte(self, cardNumber):
if (len(cardNumber) == 17 & (cardNumber.startwith(39) | cardNumber.startwith(30))):
Luhn(cardNumber)
print("C'est une carte American Express")
print(" La Carte invalide")
| true |
f7d8aeb63190f98f4e2b268a900fdd0dda81a2d6 | Python | Tobijoe/LPTHW | /ex15.py | UTF-8 | 645 | 3.609375 | 4 | [
"MIT"
] | permissive |
#from sys import argv argument
from sys import argv
#add arguments to argv, in this case script name (ex15.py)and the text sample (ex15sample.txt)
script, filename = argv
#set var txt to open filename given in argument
txt = open(filename)
#print formatted string, with filename
print(f"Here is your file {filename}")
#print contents of filename
print(txt.read())
#prompt for user input
print("Type the filename again:")
#set var file_again to ask for user input of filename
file_again = input("->")
#set text_again var to open given filename
txt_again = open(file_again)
#print contents of given filename in text_again
print(txt_again.read())
| true |
cfc312a40b2a43a38cf800bd1086fe11fb39d874 | Python | rafflypohan/Kantin-ITK | /Titipan.py | UTF-8 | 1,233 | 3.125 | 3 | [] | no_license | import datetime
class Titipan:
# Constructor
def __init__(self, idBarang = None, namaPenitip = None, namaBarang = None, harga = None, jumlahTitipan = None, tanggal = None):
self.__idBarang = idBarang
self.__namaPenitip = namaPenitip
self.__namaBarang = namaBarang
self.__harga = harga
self.__jumlahTitipan = jumlahTitipan
self.__tanggal = tanggal
# Getter dan Setter
def getIdBarang(self):
return self.__idBarang
def setIdBarang(self, x):
self.__idBarang = x
def getNamPenitip(self):
return self.__namaPenitip
def setNamaPenitip(self, x):
self.__namaPenitip = x
def getNamaBarang(self):
return self.__namaBarang
def setNamaBarang(self, x):
self.__namaBarang = x
def getHarga(self):
return self.__harga
def setHarga(self, x):
self.__harga = x
def getJumlahTitipan(self):
return self.__jumlahTitipan
def setJumlahTitipan(self, x):
self.__jumlahTitipan = x
def getTanggal(self):
return self.__tanggal
def setTanggal(self, y, m, d):
self.__tanggal = datetime.date(y, m, d)
| true |
c05c011bf0d542b2e23dcc46e5e9ecfb7cba02c2 | Python | Hvids/NumericalMethods | /kp/result_mean_test.py | UTF-8 | 991 | 2.546875 | 3 | [] | no_license | import numpy as np
def read_time(name):
times = []
with open(name, 'r') as f:
for line in f:
times.append(float(line))
return np.array(times)
def write_time(data,name):
with open(name, 'w') as f:
for value in data:
f.write(f'{value}\n')
PATH_IN = './global_test/'
PATH_OUT = './'
times_mean_simple_out_name = 'mean_simple'
times_mean_omp_out_name = 'mean_omp'
times_simple_names = [PATH_IN + f'test_simple_{i}' for i in range(0,9)]
times_omp_names = [PATH_IN + f'test_omp_{i}' for i in range(0,9)]
times_omps = []
times_simples = []
for to_name,ts_name in zip(times_omp_names, times_simple_names):
to = read_time(to_name)
ts = read_time(ts_name)
times_omps.append(to)
times_simples.append(ts)
times_omps = np.array(times_omps).T.mean(1)
times_simples = np.array(times_simples).T.mean(1)
write_time(times_omps, PATH_OUT+times_mean_omp_out_name)
write_time(times_simples,PATH_OUT+times_mean_simple_out_name)
| true |
9b393e2398ce002acbccbc14858ead9b443f8653 | Python | igorsubbotin/leetcode_python | /problem_067.py | UTF-8 | 735 | 3.515625 | 4 | [] | no_license | # Add Binary - https://leetcode.com/problems/add-binary/
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
overflow = False
res = ""
i = 0
while i < max(len(a), len(b)):
v = getValue(i, a) + getValue(i, b)
if overflow:
v += 1
overflow = False
if v / 2 > 0:
overflow = True
v = v % 2
res = str(v) + res
i += 1
if overflow:
res = str(1) + res
return res
def getValue(i, s):
n = len(s)
ix = n - i - 1
if ix < 0:
return 0
return int(s[ix]) | true |
380afd33d360abe23cf1ce118203c3b9fbcaadee | Python | abid-sayyad/py_beginners | /RotateArrayRight.py | UTF-8 | 247 | 3.46875 | 3 | [] | no_license | def rightRotateArray(arr, d):
result = [0]*(len(arr))
for i in range(len(arr)):
result[(i + d) % len(arr)] = arr[i]
return result
if __name__=="__main__":
arr = [2,1,2,3,4]
arr = rightRotateArray(arr, 1)
print(arr) | true |
ce2d8ba1b4221841d0d7b8315fdc5a4d75a094d9 | Python | kaist-plrg/jstar | /tests/compile/basic/recent/Decode.spec | UTF-8 | 2,755 | 3.09375 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | 1. Let _strLen_ be the length of _string_.
1. Let _R_ be the empty String.
1. Let _k_ be 0.
1. Repeat,
1. If _k_ = _strLen_, return _R_.
1. Let _C_ be the code unit at index _k_ within _string_.
1. If _C_ is not the code unit 0x0025 (PERCENT SIGN), then
1. Let _S_ be the String value containing only the code unit _C_.
1. Else,
1. Let _start_ be _k_.
1. If _k_ + 2 ≥ _strLen_, throw a *URIError* exception.
1. If the code units at index (_k_ + 1) and (_k_ + 2) within _string_ do not represent hexadecimal digits, throw a *URIError* exception.
1. Let _B_ be the 8-bit value represented by the two hexadecimal digits at index (_k_ + 1) and (_k_ + 2).
1. Set _k_ to _k_ + 2.
1. Let _n_ be the number of leading 1 bits in _B_.
1. If _n_ = 0, then
1. Let _C_ be the code unit whose value is _B_.
1. If _C_ is not in _reservedSet_, then
1. Let _S_ be the String value containing only the code unit _C_.
1. Else,
1. Let _S_ be the substring of _string_ from _start_ to _k_ + 1.
1. Else,
1. If _n_ = 1 or _n_ > 4, throw a *URIError* exception.
1. If _k_ + (3 × (_n_ - 1)) ≥ _strLen_, throw a *URIError* exception.
1. Let _Octets_ be a List whose sole element is _B_.
1. Let _j_ be 1.
1. Repeat, while _j_ < _n_,
1. Set _k_ to _k_ + 1.
1. If the code unit at index _k_ within _string_ is not the code unit 0x0025 (PERCENT SIGN), throw a *URIError* exception.
1. If the code units at index (_k_ + 1) and (_k_ + 2) within _string_ do not represent hexadecimal digits, throw a *URIError* exception.
1. Let _B_ be the 8-bit value represented by the two hexadecimal digits at index (_k_ + 1) and (_k_ + 2).
1. Set _k_ to _k_ + 2.
1. Append _B_ to _Octets_.
1. Set _j_ to _j_ + 1.
1. Assert: The length of _Octets_ is _n_.
1. If _Octets_ does not contain a valid UTF-8 encoding of a Unicode code point, throw a *URIError* exception.
1. Let _V_ be the code point obtained by applying the UTF-8 transformation to _Octets_, that is, from a List of octets into a 21-bit value.
1. Let _S_ be UTF16EncodeCodePoint(_V_).
1. Set _R_ to the string-concatenation of _R_ and _S_.
1. Set _k_ to _k_ + 1. | true |
5e22e94e4e2cb94486ad9dcaa5bd27eb31b50ce8 | Python | LindnerLab/DenseSuspensions | /ParticleShape/Z_shape_determination.py | UTF-8 | 2,473 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 21 11:42:01 2020
@author: Lars Kool
"""
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy
import os
def cart2pol(x, y):
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(r, phi)
def pol2cart(r, phi):
x = r * np.cos(phi)
y = r * np.sin(phi)
return(x, y)
def radialAvg(r, Img, bins):
if len(Img.shape) == 3:
nFiles = np.shape(Img)[2]
else:
nFiles = 1
Avg_I = np.zeros([len(bins)-1, nFiles],dtype='float64')
for i in range(len(bins)-1):
idx = np.array([r > bins[i], r < bins[i+1]]).all(axis=0)
Avg_I [i,:] = np.mean(Img[idx], axis=0)
Avg_centered = Avg_I - np.mean(Avg_I,axis=0)
return Avg_centered
if __name__ == '__main__':
path = r'F:\Lars\Particle Shape\20201015 Size Determination\20x\RawData\\'
files = [file for file in os.listdir(path) if file.endswith('.tif')]
nFiles = len(files)
pixel_size = 1/(0.3974*8) #um/px
# offset = [1182.1, 1232.8]
offset = [[1176.505, 1148.349],[1158.660, 1424.716],[1143.457, 1191.207]]
# bins = np.arange(175, 250, 2)
bins = np.arange(150, 300, 0.5)
# Load example image (to determine size)
Img = plt.imread(path+files[0])
Img_size = np.shape(Img)
# Load the images and determine the radial distribution of the intensities
# around offset
Img = np.zeros([2304,2304,nFiles])
Avg_I = np.zeros([len(bins)-1,nFiles])
r_particles = np.zeros([nFiles])
for i in range(0,nFiles):
Img[:,:,i] = plt.imread(path+files[i])
# Create meshgrid for cart -> pol conversion, and subtract the centoid of the ROI
[xv, yv] = np.meshgrid(range(Img_size[0]),range(Img_size[1]))
xv = xv - offset[i][0]
yv = yv - offset[i][1]
# Convert the xy positions of the pixels to polar coordinates
[r, phi] = cart2pol(xv, yv)
Avg_I[:,i] = radialAvg(r, Img[:, :, i], bins).reshape(len(bins)-1)
r_particles[i] = np.argmin(Avg_I[:,i])
plt.figure(dpi=500)
plt.plot(bins[1:]*pixel_size,Avg_I[:,2])
plt.xlabel(r'r $(\mu m)$')
plt.ylabel('I (a.u.)')
| true |
5b57cec625ff6adb8d3872a00b3b5b289d9cf325 | Python | ananyamaurya/Python-and-Some-Python-Programming | /PalindromeSubset.py | UTF-8 | 1,516 | 3.1875 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
def isPalindrome(s):
return s == s[::-1]
def distinctSubstring(str):
# Put all distinct substring in a HashSet
result = [];
# List All Substrings
for i in range(len(str)):
for j in range(i + 1, len(str) + 1):
# Add each substring in Set
result.append(str[i:j]);
# Return the HashSet
return result;
nq = input().split()
n = int(nq[0])
q = int(nq[1])
s = str(input())
for z in range(q):
abc=input().split()
a=int(abc[0])
b=int(abc[1])
c=int(abc[2])
t=0
if(a==1):
t=int(abc[3])
l = []
print(a,b,c,t)
for i in range(b,c):
if((ord(s[i])+t)>122):
l.append(chr(97+ (ord(s[i])+t)%123))
else:
l.append(chr(ord(s[i])+t))
if(b==0):
print(str(''.join(l))+s[c:])
else:
s= str(s[:b]+str(''.join(l))+s[c:])
print(s)
if(a==2):
if(b==0):
count=0
x= s[:c+1]
w= distinctSubstring(x)
for i in w:
print(i)
if(isPalindrome(i)):
count+=1
print(count)
else:
x= s[b:c+1]
# w= distinctSubstring(x)
# for i in w:
# if(isPalindrome(i)):
# count+=1
count = CountPS(x,len(x))
print(count)
| true |
ecb434b23fd238e2048274e33b817876deed0536 | Python | stavros11/Learning-DMRG | /dmrg.py | UTF-8 | 10,049 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 3 11:48:26 2018
@author: Admin
"""
import numpy as np
import tensorflow as tf
from graphs import Operations
from numpy.linalg import svd
## Using the general eigh function because eigh_tridiagonal does not support complex! ##
## Change in the future for better performance ##
from scipy.linalg import eigh
def diagonalize(alpha, beta):
## Takes diagonal (alpha) and off-diagonal (beta) elements of
## tridiagonal matrix and returns its eigenvalues and eigenvectors
d = len(alpha)
## Create matrix
A = np.diag(alpha)
A[np.arange(d-1), np.arange(1, d)] = beta
A[np.arange(1,d), np.arange(d-1)] = beta
return eigh(A)
###################################################################
### Only DMRG_Hlist is supported currently because tf.einsum ###
### requires placeholders of specific shape ###
###################################################################
class DMRG(object):
def __init__(self, D, d, H0, Hs, HN, lcz_k):
### DMRG parameters ###
## d: Physical dimension of each degree of freedom
## D: List with MPS matrices dimensions (the list ignores first and last site where d=D)
## lcz_k: k-number for Lanczos
self.d, self.D = d, D
## Initialize and normalize states to canonical form
self.initialize_states()
self.normalize_states()
print('\nStates succesfully initialized in right canonical form!\n')
## Create Ops object
self.ops = Operations(D, H0, Hs, HN, lcz_k)
## Open tensorflow session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
## Initialize R, L matrices and calculate R
self.initialize_RL()
print('R succesfully initialized!')
#############################################
#### Functions that run basic operations ####
#############################################
def initialize_states(self):
## Initialize MPS: List of complex (D, D, d) tensors
self.state = [(np.random.random(size=(D1, D2, self.d)) + 1j * np.random.random(
size=(D1, D2, self.d))).astype(np.complex64) for (D1, D2) in zip(self.D[:-1], self.D[1:])]
## First and last states are (d, d) (for boundary D=d in normal form)
self.state = ([(np.random.random(size=(self.d, self.d)) + 1j * np.random.random(size=(self.d, self.d))).astype(np.complex64)] +
self.state +
[(np.random.random(size=(self.d, self.d)) + 1j * np.random.random(size=(self.d, self.d))).astype(np.complex64)])
def normalize_states(self):
## Start from right
U, S, self.state[-1] = svd(self.state[-1], full_matrices=False)
# (d x d)(d x d stored as (d,))(d x d)
self.state[-2] = np.einsum('abi,bc->aci', self.state[-2], U.dot(np.diag(S)))
# (D, D', d) = (D, D', d) x (D', D') (contraction of second index)
## Repeat for middle states
for i in range(len(self.D) - 1, 1, -1):
U, S, V = svd(self.state[i].reshape(self.D[i-1], self.D[i]*self.d), full_matrices=False)
self.state[i] = V.reshape(self.D[i-1], self.D[i], self.d)
self.state[i-1] = np.einsum('abi,bc->aci', self.state[i-1], U.dot(np.diag(S)))
## Normalize final state
U, S, V = svd(self.state[1].reshape(self.D[0], self.D[1]*self.d), full_matrices=False)
self.state[1] = V.reshape(self.D[0], self.D[1], self.d)
self.state[0] = np.einsum('bi,bc->ci', self.state[0], U.dot(np.diag(S)))
U, S, self.state[0] = svd(self.state[0], full_matrices=False)
def initialize_RL(self):
self.R, self.L = (self.ops.N - 2) * [None], (self.ops.N - 2) * [None]
self.update_R_boundary()
for i in range(self.ops.N - 4, -1, -1):
self.update_R(i)
def sweep(self):
## Update left boundary
energy_list = [self.apply_lanczos0()]
self.update_L_boundary()
## Sweep to right
energy_list.append(self.apply_lanczosM_to_right(1))
self.update_L(1)
for i in range(2, self.ops.N - 2):
energy_list.append(self.apply_lanczosM_to_right(i))
self.update_R(i-2)
self.update_L(i)
print('Site %d'%(i+1))
print('\nRight sweep completed!\n')
## Update right boundary
energy_list.append(self.apply_lanczosN())
self.update_R_boundary()
## Sweep to left
energy_list.append(self.apply_lanczosM_to_left(self.ops.N - 3))
self.update_R(self.ops.N - 4)
for i in range(self.ops.N - 4, 0, -1):
energy_list.append(self.apply_lanczosM_to_left(i))
self.update_R(i-1)
self.update_L(i+1)
print('Site %d'%(i+2))
print('\nLeft sweep completed!\n')
return energy_list
#################################
##### Functions that assist #####
#################################
def apply_lanczos0(self):
## Apply Lanczos
V_lz, alpha, beta = self.sess.run(self.ops.lanczos0, feed_dict={self.ops.plc.R[0] : self.R[0],
self.ops.plc.state[0] : self.state[0],
self.ops.plc.state[1] : self.state[1]})
#V: Lanczos vectors (see lanczos_algorithm functions in lanczos.py)
# alpha: diagonal elements of the tridiagonal matrix, beta: off-diagonal elements
## Diagonalize k x k matrix
eig_vals, eig_vec = diagonalize(alpha, beta)
## Transform the ground state eigenvector to B
B = np.einsum('a,abcd->bcd', eig_vec[0], V_lz)
## Update states by doing SVD on the updated B
self.energy = eig_vals[0]
self.state[0], S, V = svd(B.reshape(self.d, self.D[1]*self.d), full_matrices=False)
self.state[1] = np.einsum('ab,bcd->acd', np.diag(S), V.reshape(self.d, self.D[1], self.d))
return self.energy
def apply_lanczosN(self):
V_lz, alpha, beta = self.sess.run(self.ops.lanczosN, feed_dict={self.ops.plc.L[-1] : self.L[-1],
self.ops.plc.state[-1] : self.state[-2],
self.ops.plc.state[0] : self.state[-1]})
## Diagonalize k x k matrix
eig_vals, eig_vec = diagonalize(alpha, beta)
## Transform the ground state eigenvector to B
B = np.einsum('a,abcd->bcd', eig_vec[0], V_lz)
## Updates
self.energy = eig_vals[0]
U, S, self.state[-1] = svd(B.reshape(self.D[-2]*self.d, self.d), full_matrices=False)
self.state[-2] = np.einsum('abc,cd->adb', U.reshape(self.D[-2], self.d, self.d), np.diag(S))
return self.energy
def apply_lanczos_for_B(self, i):
V_lz, alpha, beta = self.sess.run(self.ops.lanczosM[i-1], feed_dict={self.ops.plc.L[i-1] : self.L[i-1],
self.ops.plc.R[i] : self.R[i],
self.ops.plc.state[i] : self.state[i],
self.ops.plc.state[i+1] : self.state[i+1]})
## Diagonalize k x k matrix
eig_vals, eig_vec = diagonalize(alpha, beta)
## Transform the ground state eigenvector to B
B = np.einsum('a,abcde->bcde', eig_vec[0], V_lz)
return B, eig_vals[0]
def apply_lanczosM_to_right(self, i):
## Here i is the index of the state to be updated: Hence 1 <= i <= N-3
## For i=0 use lanczos0, for i=N-1 use lanczosN
B, self.energy = self.apply_lanczos_for_B(i)
U, S, V = svd(B.reshape(self.D[i-1]*self.d, self.D[i+1]*self.d))
## Assume Di < d D_{i-1} and truncate
U, S, V = U[:, :self.D[i]], S[:self.D[i]], V[:self.D[i]]
## Updates
self.state[i] = np.transpose(U.reshape(self.D[i-1], self.d, self.D[i]), axes=(0, 2, 1))
self.state[i+1] = np.transpose((np.diag(S).dot(V)).reshape(self.D[i], self.d, self.D[i+1]),
axes=(0, 2, 1))
return self.energy
def apply_lanczosM_to_left(self, i):
## Here i+1 is the index of the state to be updated. 1 <= i <= N-3
B, self.energy = self.apply_lanczos_for_B(i)
U, S, V = svd(B.reshape(self.D[i-1]*self.d, self.D[i+1]*self.d))
## Assume Di < d D_{i-1} and truncate
U, S, V = U[:,:self.D[i]], S[:self.D[i]], V[:self.D[i]]
## Updates
self.state[i+1] = np.transpose(V.reshape(self.D[i], self.d, self.D[i+1]), axes=(0, 2, 1))
self.state[i] = np.transpose((U.dot(np.diag(S))).reshape(self.D[i-1], self.d, self.D[i]),
axes=(0, 2, 1))
return self.energy
def update_L(self, i):
## Here i is the index of L to be updated: 1 <= i <= N-3
## For i=0 use boundary function
self.L[i] = self.sess.run(self.ops.L[i], feed_dict={self.ops.plc.L[i-1] : self.L[i-1],
self.ops.plc.state[i] : self.state[i]})
def update_R(self, i):
## Here i is the index of L to be updated: 0 <= i <= N-4
## For i=N-3 use boundary function
self.R[i] = self.sess.run(self.ops.R[i], feed_dict={self.ops.plc.R[i+1] : self.R[i+1],
self.ops.plc.state[i+2] : self.state[i+2]})
def update_R_boundary(self):
self.R[-1] = self.sess.run(self.ops.R[-1], feed_dict={self.ops.plc.state[0] : self.state[-1]})
def update_L_boundary(self):
self.L[0] = self.sess.run(self.ops.L[0], feed_dict={self.ops.plc.state[0] : self.state[0]})
| true |
74f6f1bad8a4febf7c4e809343f71d20f9770418 | Python | nylakhalil/geosense | /process/Raster.py | UTF-8 | 1,316 | 2.609375 | 3 | [] | no_license | import logging
from osgeo import gdal
from model.GeoInfo import GeoInfo
from config.Settings import LOG_FORMAT
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
def process(out_filepath, src_filepath, process_type, color_file=None):
"""
Transform src raster file and output to new raster file
Args:
output_file: File path for output file
src_filepath: File path for source raster file
process_type: Type of process to apply
"""
options = gdal.DEMProcessingOptions(zeroForFlat=True, colorFilename=color_file)
dataset = gdal.DEMProcessing(out_filepath, src_filepath, process_type, options=options)
logging.info("Dataset Processed: {}".format(out_filepath))
dataset = None
def info(filepath):
"""
Get Raster metadata as String
Args:
filepath: String file path to raster
Returns:
String metadata output from GDAL Info
"""
metadata = gdal.Info(filepath)
logging.info("Dataset Metadata: {}".format(metadata))
return metadata
def read(filepath):
"""
Get Raster metadata and data as GeoInfo object
Args:
filepath: String file path to raster
Returns:
GeoInfo object with raster metadata and data
"""
dataset = gdal.Open(filepath)
geoinfo = GeoInfo(dataset=dataset, datatype='GEOTIFF')
logging.info("Dataset Loaded: {}".format(geoinfo))
dataset = None
return geoinfo | true |
303f81c749ca25fe2ccb42bcf18b3ad5ac254278 | Python | zeromtmu/practicaldatascience.github.io | /2016/tutorial_final/6/visualize.py | UTF-8 | 6,320 | 2.671875 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | import matplotlib
import matplotlib.pyplot as plt
import seaborn # or alternatively plt.style.use('ggplot') for a similar look
import numpy as np
import math
from textwrap import fill
from espncricinfo.player import Player
import matplotlib
major_countries = set(["Australia", "Sri Lanka", "West Indies", "Zimbabwe", "New Zealand",
"India", "Pakistan", "Bangladesh", "South Africa", "England"])
#get which team a player belongs to
def get_team(player):
for team in player.major_teams:
for country in major_countries:
if country in team:
return country
return player.major_teams[0]
#get average of batsmen for relevant format of game
def get_bat_average(player):
avg = 0
debut = True
try: #account for API error
for data in player.batting_fielding_averages:
#get appropriate average if non-debutee
if match_class in data.keys()[0]:
debut = False
for level, average in data.iteritems():
avg=float(average[5][1]) #average is in the 6th index
return avg
#get estimate of other performances if debutee
if debut:
count = 0
for data in b.batting_fielding_averages:
for level, average in data.iteritems():
avg+=float(average[5][1]) #average is in the 6th index
count+=1
avg/= count if count > 0 else 1
return avg
except:
return 0
#get average of bowlers for relevant format of game
def get_bowl_average(player):
avg = 0
debut = True
try: #account for API error
for data in player.bowling_averages:
#get appropriate average if non-debutee
if match_class in data.keys()[0]:
debut = False
for level, average in data.iteritems():
avg=float(average[7][1]) #average is in the 8th index
return avg
#get estimate of other performances if debutee
if debut:
count = 0
for data in player.bowling_averages:
for level, average in data.iteritems():
avg+=float(average[7][1]) #average is in the 8th index
count+=1
avg/= count if count > 0 else 1
return avg
except:
return 0
#get strike rate of bowlers for relevant format of game
def get_bowl_sr(player):
sr = 0
debut = True
try: #account for API error
for data in player.bowling_averages:
#get appropriate average if non-debutee
if match_class in data.keys()[0]:
debut = False
for level, average in data.iteritems():
sr=float(average[9][1]) #average is in the 8th index
return sr
#get estimate of other performances if debutee
if debut:
count = 0
for data in player.bowling_averages:
for level, average in data.iteritems():
sr+=float(average[9][1]) #average is in the 8th index
count+=1
sr/= count if count > 0 else 1
return sr
except:
return 0
#displays top 5 players from both teams in bar chart
def display_top(bat, bowl, all_round, match_info):
bat_avgs = []
bowl_avgs = []
all_avgs = []
width = 0.25
d = dict()
#get batting info
for b in bat:
bat_avg = get_bat_average(b)
d[bat_avg] = b
bat_avgs.append(bat_avg)
bat_avgs = [avg for avg in bat_avgs if avg!=None]
#get top in match
num = min(len(bat_avgs),5)
top_bat_avg = sorted(bat_avgs)[-num:]
top_bat = [d[avg].name + " (" + get_team(d[avg]) + ")" for avg in top_bat_avg]
top_bat = [s.replace(" ", "\n") for s in top_bat]
#visualize
f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(20,20))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=1)
ax1.bar(range(len(top_bat_avg)), top_bat_avg)
pos = np.arange(len(top_bat))
ax3.set_xlim(min(pos)-width, max(pos)+width*4)
ax1.set_xticks([x + 1.5*width for x in pos])
ax1.set_xticklabels(top_bat)
ax1.set_ylabel("Average")
ax1.set_xlabel("Batsmen (Higher averages are ranked higher)")
ax1.set_title(fill("Top Batsmen in " + match_info + "\n", 30))
#get bowling info
for b in bowl:
bowl_avg = get_bowl_average(b)
d[bowl_avg] = b
bowl_avgs.append(bowl_avg)
bowl_avgs = [avg for avg in bowl_avgs if avg!=None]
#get top in match
num = min(len(bowl_avgs),5)
top_bowl_avg = (sorted(bowl_avgs)[:num])
top_bowl_avg.reverse()
top_bowl = [d[avg].name + " (" + get_team(d[avg]) + ")" for avg in top_bowl_avg]
top_bowl = [s.replace(" ", "\n") for s in top_bowl]
#visualize
ax2.bar(range(len(top_bowl_avg)), top_bowl_avg)
pos = np.arange(len(top_bowl))
ax3.set_xlim(min(pos)-width, max(pos)+width*4)
ax2.set_xticks([x + 1.5*width for x in pos])
ax2.set_xticklabels(top_bowl)
ax2.set_ylabel("Average")
ax2.set_xlabel("Bowlers (Lower averages are ranked higher)")
ax2.set_title(fill("Top Bowlers in " + match_info + "\n",30))
#get all_round info
for a in all_round:
all_avg = get_bat_average(a) - get_bowl_average(a)
d[all_avg] = a
all_avgs.append(all_avg)
#get top in match
num = min(len(all_avgs),5)
top_all_avg = (sorted(all_avgs)[-num:])
top_all = [d[avg].name + " (" + get_team(d[avg]) + ")" for avg in top_all_avg]
top_all = [s.replace(" ", "\n") for s in top_all]
#visualize
ax3.bar(range(len(top_all_avg)), top_all_avg)
pos = np.arange(len(top_all))
ax3.set_xlim(min(pos)-4*width, max(pos)+width*8)
ax3.set_xticks([x + 1.5*width for x in pos])
ax3.set_xticklabels(top_all)
ax3.set_ylabel("Average")
ax3.set_xlabel("Allrounders (Higher averages are ranked higher)")
ax3.set_title(fill("Top Allrounders in " + match_info + "\n",30))
| true |
ffca60e55868a94a5914fd4e5f6d0f8da6f2fd73 | Python | Triomphi/Python | /tkinter/grid.py | UTF-8 | 436 | 3.671875 | 4 | [] | no_license | from tkinter import *
# everything is a widget in tkinter
root = Tk() #create the layout
my_label1 = Label(root, text = 'Hello world!') #create a label
my_label2 = Label(root, text = 'this is my second tkinter work') #create a label
my_label1.grid(row = 0, column = 0) #grid puts the label on the screen in aspecified row and column space
my_label2.grid(row = 1, column = 1)
root.mainloop() #an event loop to know cursor position | true |
a62247a94e776258beb7f903e28800ed08dfd244 | Python | kwakjeeyoon/AI-model | /Basic Computer Vision/transfer_learning/transer_learning.py | UTF-8 | 2,656 | 2.578125 | 3 | [] | no_license | from torchvision import datasets
import torchvision.transforms as transforms
import torch
import torch.optim as optim
dataset = datasets.ImageFolder(root="data/",
transform=transforms.Compose([
transforms.Scale(244),
transforms.CenterCrop(244),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
]))
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=2,
shuffle=True,
num_workers=8)
from torch import nn
from torchvision import models
class MyNewNet(nn.Module):
def __init__(self):
super(MyNewNet, self).__init__()
self.vgg19 = models.vgg19(pretrained=True)
self.linear_layers = nn.Linear(1000, 1)
# Defining the forward pass
def forward(self, x):
x = self.vgg19(x)
return self.linear_layers(x)
EPOCHS = 100
BATCH_SIZE = 64
LEARNING_RATE = 0.001
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=8)
def binary_acc(y_pred, y_test):
y_pred_tag = torch.round(torch.sigmoid(y_pred))
correct_results_sum = (y_pred_tag == y_test).sum().float()
acc = correct_results_sum / y_test.shape[0]
acc = torch.round(acc * 100)
return acc
my_model = MyNewNet()
my_model = my_model.to(device)
for param in my_model.parameters():
param.requires_grad = False
for param in my_model.linear_layers.parameters():
param.requires_grad = True
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(my_model.parameters(), lr=LEARNING_RATE)
for e in range(1, EPOCHS + 1):
epoch_loss = 0
epoch_acc = 0
for X_batch, y_batch in dataloader:
X_batch, y_batch = X_batch.to(device), y_batch.to(device).type(torch.cuda.FloatTensor)
optimizer.zero_grad()
y_pred = my_model(X_batch)
loss = criterion(y_pred, y_batch.unsqueeze(1))
acc = binary_acc(y_pred, y_batch.unsqueeze(1))
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
print(f'Epoch {e + 0:03}: | Loss: {epoch_loss / len(dataloader):.5f} | Acc: {epoch_acc / len(dataloader):.3f}') | true |
dbecdd529dd29315bc486e8dcd1708710ec1aded | Python | JONNY-ME/my-kattis-solution | /palindromicpassword.py | UTF-8 | 515 | 2.953125 | 3 | [] | no_license |
for _ in range(int(input())):
s = input()
a = int(s[:3])
s = int(s)
ds = float("Inf")
n = []
if a - 1 >= 100:
x = int(str(a-1)+str(a-1)[::-1])
k = abs(s-x)
if k == ds:
n.append(x)
elif k < ds:
ds = k
n = [x]
if a + 1 < 1000:
x = int(str(a+1)+str(a+1)[::-1])
k = abs(s-x)
if k == ds:
n.append(x)
elif k < ds:
ds = k
n = [x]
x = int(str(a)+str(a)[::-1])
k = abs(s-x)
if k == ds:
n.append(x)
elif k < ds:
ds = k
n = [x]
print(min(n))
| true |
a8262433e7340b54ef05db6162a1da0c359edadb | Python | liuzi/cs5242_deep_learning | /e0210497_assignment1/codes/nn/operations.py | UTF-8 | 18,507 | 3.078125 | 3 | [] | no_license | import numpy as np
# Attension:
# - Never change the value of input, which will change the result of backward
class operation(object):
"""
Operation abstraction
"""
def forward(self, input):
"""Forward operation, reture output"""
raise NotImplementedError
def backward(self, out_grad, input):
"""Backward operation, return gradient to input"""
raise NotImplementedError
class relu(operation):
def __init__(self):
super(relu, self).__init__()
def forward(self, input):
output = np.maximum(0, input)
return output
def backward(self, out_grad, input):
in_grad = (input >= 0) * out_grad
return in_grad
class flatten(operation):
def __init__(self):
super(flatten, self).__init__()
def forward(self, input):
batch = input.shape[0]
output = input.copy().reshape(batch, -1)
return output
def backward(self, out_grad, input):
in_grad = out_grad.copy().reshape(input.shape)
return in_grad
class matmul(operation):
def __init__(self):
super(matmul, self).__init__()
def forward(self, input, weights):
"""
# Arguments
input: numpy array with shape (batch, in_features)
weights: numpy array with shape (in_features, out_features)
# Returns
output: numpy array with shape(batch, out_features)
"""
return np.matmul(input, weights)
def backward(self, out_grad, input, weights):
"""
# Arguments
out_grad: gradient to the forward output of fc layer, with shape (batch, out_features)
input: numpy array with shape (batch, in_features)
weights: numpy array with shape (in_features, out_features)
# Returns
in_grad: gradient to the forward input with same shape as input
w_grad: gradient to weights, with same shape as weights
"""
in_grad = np.matmul(out_grad, weights.T)
w_grad = np.matmul(input.T, out_grad)
return in_grad, w_grad
class add_bias(operation):
def __init__(self):
super(add_bias, self).__init__()
def forward(self, input, bias):
'''
# Arugments
input: numpy array with shape (batch, in_features)
bias: numpy array with shape (in_features)
# Returns
output: numpy array with shape(batch, in_features)
'''
return input + bias.reshape(1, -1)
def backward(self, out_grad, input, bias):
"""
# Arguments
out_grad: gradient to the forward output of fc layer, with shape (batch, out_features)
input: numpy array with shape (batch, in_features)
bias: numpy array with shape (out_features)
# Returns
in_grad: gradient to the forward input with same shape as input
b_bias: gradient to bias, with same shape as bias
"""
in_grad = out_grad
b_grad = np.sum(out_grad, axis=0)
return in_grad, b_grad
class fc(operation):
def __init__(self):
super(fc, self).__init__()
self.matmul = matmul()
self.add_bias = add_bias()
def forward(self, input, weights, bias):
"""
# Arguments
input: numpy array with shape (batch, in_features)
weights: numpy array with shape (in_features, out_features)
bias: numpy array with shape (out_features)
# Returns
output: numpy array with shape(batch, out_features)
"""
output = self.matmul.forward(input, weights)
output = self.add_bias.forward(output, bias)
# output = np.matmul(input, weights) + bias.reshape(1, -1)
return output
def backward(self, out_grad, input, weights, bias):
"""
# Arguments
out_grad: gradient to the forward output of fc layer, with shape (batch, out_features)
input: numpy array with shape (batch, in_features)
weights: numpy array with shape (in_features, out_features)
bias: numpy array with shape (out_features)
# Returns
in_grad: gradient to the forward input of fc layer, with same shape as input
w_grad: gradient to weights, with same shape as weights
b_bias: gradient to bias, with same shape as bias
"""
# in_grad = np.matmul(out_grad, weights.T)
# w_grad = np.matmul(input.T, out_grad)
# b_grad = np.sum(out_grad, axis=0)
out_grad, b_grad = self.add_bias.backward(out_grad, input, bias)
in_grad, w_grad = self.matmul.backward(out_grad, input, weights)
return in_grad, w_grad, b_grad
def im2col(input, kernel_h, kernel_w, stride, pad):
N, C, H, W = input.shape
output_h = (H + 2 * pad - kernel_h) // stride + 1
output_w = (W + 2 * pad - kernel_w) // stride + 1
input_padded = np.pad(input, [(0, 0), (0, 0), (pad, pad), (pad, pad)], 'constant')
col = np.zeros((N, C, kernel_h, kernel_w, output_h, output_w))
for h in range(kernel_h):
h_max = h + stride * output_h
for w in range(kernel_w):
w_max = w + stride * output_w
col[:, :, h, w, :, :] = input_padded[:, :, h:h_max:stride, w:w_max:stride]
col_input = col.transpose(0, 4, 5, 1, 2, 3).reshape(N * output_h * output_w, -1)
return col_input, N, output_h, output_w
def col2im(d_col, input_shape, kernel_h, kernel_w, stride, pad):
N,C,H,W = input_shape
output_h = (H + 2*pad - kernel_h)//stride + 1
output_w = (W + 2*pad - kernel_w)//stride + 1
col = d_col.reshape(N, output_h, output_w, C, kernel_h, kernel_w).transpose(0, 3, 4, 5, 1, 2)
in_grad = np.zeros((N, C, H, W))
for h in range(kernel_h):
h_max = h +stride*output_h
for w in range(kernel_w):
w_max = w + stride*output_w
in_grad[:, :, h:h_max:stride, w:w_max:stride] += col[:, :, h, w, :, :]
return in_grad
class conv(operation):
def __init__(self, conv_params):
"""
# Arguments
conv_params: dictionary, containing these parameters:
'kernel_h': The height of kernel.
'kernel_w': The width of kernel.
'stride': The number of pixels between adjacent receptive fields in the horizontal and vertical directions.
'pad': The number of pixels padded to the bottom, top, left and right of each feature map. Here, pad = 2 means a 2-pixel border of padded with zeros
'in_channel': The number of input channels.
'out_channel': The number of output channels.
"""
super(conv, self).__init__()
self.conv_params = conv_params
self.col_input = None
self.col_kernel = None
def forward(self, input, weights, bias):
"""
# Arguments
input: numpy array with shape (batch, in_channel, in_height, in_width)
weights: numpy array with shape (out_channel, in_channel, kernel_h, kernel_w)
bias: numpy array with shape (out_channel)
# Returns
output: numpy array with shape (batch, out_channel, out_height, out_width)
"""
kernel_h = self.conv_params['kernel_h'] # height of kernel
kernel_w = self.conv_params['kernel_w'] # width of kernel
pad = self.conv_params['pad']
stride = self.conv_params['stride']
in_channel = self.conv_params['in_channel']
out_channel = self.conv_params['out_channel']
output = None
#########################################
# TODO:code here
# N,_,H,W = input.shape
# output_h=(H+2*pad-kernel_h)//stride+1
# output_w=(W+2*pad-kernel_w)//stride+1
#
# input_padded = np.pad(input, [(0,0),(0,0),(pad,pad),(pad,pad)], 'constant')
# col = np.zeros((N, in_channel, kernel_h, kernel_w, output_h, output_w))
#
# for h in range(kernel_h):
# h_max = h + stride*output_h
# for w in range(kernel_w):
# w_max = w+stride*output_w
# col[:,:,h,w,:,:] = input_padded[:, :, h:h_max:stride, w:w_max:stride]
#
# col_input = col.transpose(0, 4, 5, 1, 2, 3).reshape(N*output_h*output_w, -1)
col_input, N, output_h, output_w = im2col(input, kernel_h, kernel_w, stride, pad)
col_kernel = weights.reshape(out_channel, -1).T
self.col_input = col_input
self.col_kernel = col_kernel
output = np.matmul(col_input, col_kernel) + bias
output = output.reshape(N, output_h, output_w, -1).transpose(0,3,1,2)
#########################################
return output
def backward(self, out_grad, input, weights, bias):
"""
# Arguments
out_grad: gradient to the forward output of conv layer, with shape (batch, out_channel, out_height, out_width)
input: numpy array with shape (batch, in_channel, in_height, in_width)
weights: numpy array with shape (out_channel, in_channel, kernel_h, kernel_w)
bias: numpy array with shape (out_channel)
# Returns
in_grad: gradient to the forward input of conv layer, with same shape as input
w_grad: gradient to weights, with same shape as weights
b_bias: gradient to bias, with same shape as bias
"""
kernel_h = self.conv_params['kernel_h'] # height of kernel
kernel_w = self.conv_params['kernel_w'] # width of kernel
pad = self.conv_params['pad']
stride = self.conv_params['stride']
in_channel = self.conv_params['in_channel']
out_channel = self.conv_params['out_channel']
in_grad = None
w_grad = None
b_grad = None
#########################################
# TODO: code here
out_grad=out_grad.transpose(0, 2, 3, 1).reshape(-1, out_channel)
b_grad = np.sum(out_grad, axis=0, keepdims=True)
w_grad = np.matmul(self.col_input.T, out_grad)
w_grad = w_grad.transpose(1,0).reshape(out_channel,in_channel,kernel_h, kernel_w)
d_col = np.matmul(out_grad, self.col_kernel.T)
in_grad = col2im(d_col, input.shape, kernel_h, kernel_w, stride, pad)
# N,C,H,W = input.shape
# output_h = (H + 2*pad - kernel_h)//stride + 1
# output_w = (W + 2*pad - kernel_w)//stride + 1
# col = d_col.reshape(N, output_h, output_w, C, kernel_h, kernel_w).transpose(0, 3, 4, 5, 1, 2)
# in_grad = np.zeros((N, C, H, W))
# for h in range(kernel_h):
# h_max = h +stride*output_h
# for w in range(kernel_w):
# w_max = w + stride*output_w
# in_grad[:, :, h:h_max:stride, w:w_max:stride] += col[:, :, h, w, :, :]
#########################################
return in_grad, w_grad, b_grad
class pool(operation):
def __init__(self, pool_params):
"""
# Arguments
pool_params: dictionary, containing these parameters:
'pool_type': The type of pooling, 'max' or 'avg'
'pool_h': The height of pooling kernel.
'pool_w': The width of pooling kernel.
'stride': The number of pixels between adjacent receptive fields in the horizontal and vertical directions.
'pad': The number of pixels that will be used to zero-pad the input in each x-y direction. Here, pad = 2 means a 2-pixel border of padding with zeros.
"""
super(pool, self).__init__()
self.pool_params = pool_params
self.arg_max = None
def forward(self, input):
"""
# Arguments
input: numpy array with shape (batch, in_channel, in_height, in_width)
# Returns
output: numpy array with shape (batch, in_channel, out_height, out_width)
"""
pool_type = self.pool_params['pool_type']
pool_height = self.pool_params['pool_height']
pool_width = self.pool_params['pool_width']
stride = self.pool_params['stride']
pad = self.pool_params['pad']
output = None
#########################################
# TODO: code here
col, N, output_h, output_w = im2col(input, pool_height, pool_width, stride, pad)
col = col.reshape(-1, pool_height*pool_width)
if pool_type == 'max':
self.arg_max = np.argmax(col, axis=1)
output = np.max(col, axis=1)
elif pool_type == 'avg':
output = np.average(col, axis=1)
else:
raise ValueError('Doesn\'t support \'%s\' pooling.' %
pool_type)
output = output.reshape(N, output_h, output_w, input.shape[1]).transpose(0, 3, 1, 2)
#########################################
return output
def backward(self, out_grad, input):
"""
# Arguments
out_grad: gradient to the forward output of conv layer, with shape (batch, in_channel, out_height, out_width)
input: numpy array with shape (batch, in_channel, in_height, in_width)
# Returns
in_grad: gradient to the forward input of pool layer, with same shape as input
"""
pool_type = self.pool_params['pool_type']
pool_height = self.pool_params['pool_height']
pool_width = self.pool_params['pool_width']
stride = self.pool_params['stride']
pad = self.pool_params['pad']
in_grad = None
#########################################
# TODO: code here
out_grad = out_grad.transpose(0, 2, 3, 1)
pool_size = pool_height*pool_width
if pool_type == 'max':
in_grad = np.zeros((out_grad.size, pool_size))
in_grad[np.arange(self.arg_max.size), self.arg_max.flatten()] = out_grad.flatten()
in_grad = in_grad.reshape(out_grad.shape + (pool_size,))
elif pool_type == 'avg':
avg_out_grad = out_grad.flatten / out_grad.size
avg_grad = np.zeros((out_grad.size, pool_size))
in_grad[range(out_grad.size), :] = avg_out_grad
else:
raise ValueError('Doesn\'t support \'%s\' pooling.' %
pool_type)
grad_col = in_grad.reshape(in_grad.shape[0]*in_grad.shape[1]*in_grad.shape[2], -1)
in_grad = col2im(grad_col, input.shape, pool_height, pool_width, stride, pad)
#########################################
return in_grad
class dropout(operation):
def __init__(self, rate, training=True, seed=None):
"""
# Arguments
rate: float[0, 1], the probability of setting a neuron to zero
training: boolean, apply this layer for training or not. If for training, randomly drop neurons, else DO NOT drop any neurons
seed: int, random seed to sample from input, so as to get mask, which is convenient to check gradients. But for real training, it should be None to make sure to randomly drop neurons
mask: the mask with value 0 or 1, corresponding to drop neurons (0) or not (1). same shape as input
"""
self.rate = rate
self.seed = seed
self.training = training
self.mask = None
def forward(self, input):
"""
# Arguments
input: numpy array with any shape
# Returns
output: same shape as input
"""
output = None
if self.training:
np.random.seed(self.seed)
p = np.random.random_sample(input.shape)
#########################################
# TODO: code here
self.mask = p > self.rate
output = input * self.mask / (1-p)
#########################################
else:
output = input
return output
def backward(self, out_grad, input):
"""
# Arguments
out_grad: gradient to forward output of dropout, same shape as input
input: numpy array with any shape
mask: the mask with value 0 or 1, corresponding to drop neurons (0) or not (1). same shape as input
# Returns
in_grad: gradient to forward input of dropout, same shape as input
"""
if self.training:
#########################################
# TODO: code here
in_grad = None
np.random.seed(self.seed)
p = np.random.random_sample(input.shape)
in_grad = out_grad * self.mask / (1-p)
#########################################
else:
in_grad = out_grad
return in_grad
class softmax_cross_entropy(operation):
def __init__(self):
super(softmax_cross_entropy, self).__init__()
def forward(self, input, labels):
"""
# Arguments
input: numpy array with shape (batch, num_class)
labels: numpy array with shape (batch,)
eps: float, precision to avoid overflow
# Returns
output: scalar, average loss
probs: the probability of each category
"""
# precision to avoid overflow
eps = 1e-12
batch = len(labels)
input_shift = input - np.max(input, axis=1, keepdims=True)
Z = np.sum(np.exp(input_shift), axis=1, keepdims=True)
log_probs = input_shift - np.log(Z+eps)
probs = np.exp(log_probs)
output = -1 * np.sum(log_probs[np.arange(batch), labels]) / batch
return output, probs
def backward(self, input, labels):
"""
# Arguments
input: numpy array with shape (batch, num_class)
labels: numpy array with shape (batch,)
eps: float, precision to avoid overflow
# Returns
in_grad: gradient to forward input of softmax cross entropy, with shape (batch, num_class)
"""
# precision to avoid overflow
eps = 1e-12
batch = len(labels)
input_shift = input - np.max(input, axis=1, keepdims=True)
Z = np.sum(np.exp(input_shift), axis=1, keepdims=True)
log_probs = input_shift - np.log(Z+eps)
probs = np.exp(log_probs)
in_grad = probs.copy()
in_grad[np.arange(batch), labels] -= 1
in_grad /= batch
return in_grad
| true |
42d0708630d29728ea350fec56fb4f2462414320 | Python | mitcaizi/Leetcode | /Easy/198. House Robber.py | UTF-8 | 591 | 3.46875 | 3 | [] | no_license | """Example 1:
Input: [1,2,3,1]
Output: 4
Explanation: Rob house 1 (money = 1) and then rob house 3 (money = 3).
Total amount you can rob = 1 + 3 = 4.
Example 2:
Input: [2,7,9,3,1]
Output: 12
Explanation: Rob house 1 (money = 2), rob house 3 (money = 9) and rob house 5 (money = 1).
Total amount you can rob = 2 + 9 + 1 = 12."""
class Solution:
def rob(self, nums: List[int]) -> int:
last, now = 0, 0
for i in nums:
last, now = now, max(last + i, now)
return now
if __name__ == '__main__':
print(Solution().rob([8, 4, 8, 5, 9, 6, 5, 4, 4, 10]))
| true |
e309dd69b79486e32ede72898f969285a2266755 | Python | pdhruv93/deep-learning-scripts | /pi-car/Car.py | UTF-8 | 1,384 | 3.484375 | 3 | [] | no_license | import RPi.GPIO as GPIO
import time
class Car():
def __init__(self):
GPIO.setmode(GPIO.BCM)
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
print("Created Car Object. Ready To Accept Controls!")
def __exit__(self, exc_type, exc_value, traceback):
GPIO.cleanup()
print("Car Object Cleaned!!")
def stopCar(self):
print("Stopping Car...")
GPIO.output(17, False)
GPIO.output(22, False)
GPIO.output(23, False)
GPIO.output(24, False)
print("Car Stopped!!")
def forward(self):
print("Moving Forward...")
GPIO.output(17, True)
GPIO.output(22, False)
GPIO.output(23, True)
GPIO.output(24, False)
def reverse(self):
print("Moving Reverse...")
GPIO.output(17, False)
GPIO.output(22, True)
GPIO.output(23, False)
GPIO.output(24, True)
def turn_right(self):
print("Moving Right...")
GPIO.output(17, False)
GPIO.output(22, True)
GPIO.output(23, True)
GPIO.output(24, False)
def turn_left(self):
print("Moving Left...")
GPIO.output(17, True)
GPIO.output(22, False)
GPIO.output(23, False)
GPIO.output(24, True) | true |
1d3c8f2a5d664966b59ce0fdfb4f0df0e88fb014 | Python | andrewblim/advent-of-code-2020 | /py/advent_of_code_2020/day04.py | UTF-8 | 1,948 | 2.890625 | 3 | [
"MIT"
] | permissive | import sys
import re
def parse_passport_data(full_data):
return [dict([x.split(":") for line in data.split("\n") for x in line.split(" ")])
for data in full_data.split("\n\n")]
def validate_passport(fields):
return all(map(lambda x: x in fields,
["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"]))
def validate_passport2(fields):
try:
if not re.match(r"^\d\d\d\d$", fields["byr"]):
return False
byr = int(fields["byr"])
if byr < 1920 or byr > 2002:
return False
if not re.match(r"^\d\d\d\d$", fields["iyr"]):
return False
iyr = int(fields["iyr"])
if iyr < 2010 or iyr > 2020:
return False
if not re.match(r"^\d\d\d\d$", fields["eyr"]):
return False
eyr = int(fields["eyr"])
if eyr < 2020 or eyr > 2030:
return False
if not re.match(r"^\d+(cm|in)$", fields["hgt"]):
return False
hgt = int(fields["hgt"][:-2])
hgt_unit = fields["hgt"][-2:]
if hgt_unit == "cm" and (hgt < 150 or hgt > 193):
return False
elif hgt_unit == "in" and (hgt < 59 or hgt > 76):
return False
if not re.match(r"^#([0-9]|[a-f]){6}$", fields["hcl"]):
return False
if not fields["ecl"] in "amb blu brn gry grn hzl oth".split(" "):
return False
if not re.match(r"^([0-9]){9}$", fields["pid"]):
return False
except KeyError:
return False
return True
if __name__ == "__main__":
with open(sys.argv[1], "r") as fp:
passport_data = fp.read().strip()
print("Part 1:")
data = parse_passport_data(passport_data)
valid = [validate_passport(x) for x in data]
print(sum(valid))
print("Part 2:")
data = parse_passport_data(passport_data)
valid = [validate_passport2(x) for x in data]
print(sum(valid))
| true |
180caed8db8ae30957310109808ab0e8fad32aa2 | Python | 2021202025/Programming | /Binary.py | UTF-8 | 161 | 3.109375 | 3 | [] | no_license | N = int(input())
a = []
p = []
for _ in range(N):
n = int(input())
a.append(n)
print(a)
for i in range(1, N+1):
x = N//i
p.append(x)
print(p) | true |