blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1758bddace21ecba08db34451cbdc1add9c3559b | Python | sakura-fly/learnpyqt | /src/子菜单.py | UTF-8 | 795 | 2.671875 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import QMainWindow, QMenu, QAction, QApplication
class Exmaple(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
menuBar = self.menuBar()
fileNume = menuBar.addMenu("文件")
# 创建一个新菜单
impMenu = QMenu("import", self)
# addAction添加动作
impAct = QAction("import mail", self)
impMenu.addAction(impAct)
# 新建一个
newAct = QAction("new", self)
# 添加到file菜单
fileNume.addAction(newAct)
fileNume.addMenu(impMenu)
self.resize(600, 600)
self.setWindowTitle("子菜单")
self.show()
app = QApplication(sys.argv)
ex = Exmaple()
sys.exit(app.exec_())
| true |
e0fb50eaacad227d1ee77aec3c97f90543838fc9 | Python | Luqiqy/TF_Basis | /Learn1/L12_2Ndarry2tensor.py | UTF-8 | 206 | 2.71875 | 3 | [] | no_license | import tensorflow.compat.v1 as tf
import numpy as np
# 创建一维的ndarray
array = np.array([0, 1, 2], np.float32)
# ndarray转换为tensor
t = tf.convert_to_tensor(array, tf.float32, name='t')
print(t)
| true |
5bfb97a07e0a843820d0b340ea148c7cab5e1a49 | Python | brydenm/simple-github-user-search | /api_practice_github_user_search.py | UTF-8 | 677 | 3.4375 | 3 | [] | no_license | #import the necessary libraries
import requests
import json
import datetime
print('.......................')
print('..Github user search...')
print('.......................\n\n')
username = input("Enter username to search GITHUB: ")
searchurl = 'https://api.github.com/users/' + username
#instantiate a get request
response = requests.get(searchurl)
#print the status code showing if API call was successful (200) or not (usually 400-500?)
print("Response status code: {} \n".format(response.status_code))
if response.status_code == 200:
print('Search success! \n Displaying github user details:')
print(response.json())
else:
print('Search not successful :(')
| true |
8e05f855ad8a99b7955586bdc2102de1c433759f | Python | lucaswu/WebScraping | /chapter1/link_crawler.py | UTF-8 | 1,887 | 3.140625 | 3 | [] | no_license | import urllib.request
from urllib.error import URLError,HTTPError,ContentTooShortError
import re
import itertools
from urllib.parse import urljoin
def download(url,num_retries=2,user_agent='lucas',charset='utf-8'):
print('Downloading:',url)
request = urllib.request.Request(url)
request.add_header('User-agent',user_agent)
try:
resp = urllib.request.urlopen(request)
cs = resp.headers.get_content_charset()
if not cs:
cs =charset
html = resp.read().decode(cs)
except (URLError,HTTPError,ContentTooShortError) as e:
print('Download error:',e.reason)
html = None
if num_retries>0:
if hasattr(e,'code') and 500 <= e.code <600:
return download(url,num_retries-1)
return html
def get_links(html):
" Return a list of links from html "
# a regular expression to extract all links from the webpage
webpage_regex = re.compile("""<a[^>]+href=["'](.*?)["']""", re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
def link_crawler(start_url,link_regex):
"""
crawl from the given start URL fllowing links matched by link_regex
"""
crawl_queue=[start_url]
seen = set(crawl_queue)
while crawl_queue:
url = crawl_queue.pop()
html = download(url)
if html is None:
continue
for link in get_links(html):
print(link,'==',link_regex)
#if re.match(link_regex, link):
if re.match(link,link_regex):
print('ok')
abs_link = urljoin(start_url, link)
if abs_link not in seen:
seen.add(abs_link)
crawl_queue.append(abs_link)
else:
print('error!')
link_crawler('http://example.webscraping.com', '/(index|view)/')
| true |
32f5a86ac147233d7e26c417631b0ceb38571bc0 | Python | Lrizika/Algorithms | /eating_cookies/eating_cookies.py | UTF-8 | 2,962 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env python
import sys
import json
from typing import Hashable, Any, Iterable
class Memoizer:
'''
Class to facilitate memoization of function returns.
Attributes:
functions (
Dict[
callable: Dict[
Tuple[frozenset, frozenset]: Object
]
]
)
A dictionary of functions: dictionary of args: results
Methods:
get_result: Gets the result of a function call, either
by returning the stored result or by running the
function if no stored results are found.
'''
def __init__(self):
'''
Inits a new Memoizer.
'''
self.functions = {}
def get_result(self, function: callable, *args, assume_hashable_args=True, **kwargs) -> Any:
'''
Gets the result of a function call with specific arguments.
If the function has been called through get_result before with these
parameters in this Memoizer, this will return the memoized result.
Otherwise, it will run the function and memoize the new result.
Args:
function (callable): The function to run.
This should *always* be idempotent or nullipotent.
*args: Variable length argument list. Passed to function.
**kwargs: Arbitrary keyword arguments. Passed to function.
Returns:
Object: The return value of function.
'''
if function not in self.functions:
self.functions[function] = {}
if assume_hashable_args:
params = (tuple(args), self.make_hashable(kwargs))
else:
params = (self.make_hashable(args), self.make_hashable(kwargs))
if params in self.functions[function]:
return self.functions[function][params]
else:
self.functions[function][params] = function(*args, **kwargs)
return self.functions[function][params]
@staticmethod
def make_hashable(obj) -> Hashable:
try:
hash(obj) # Isinstance Hashable fails on nested objects
return obj
except TypeError:
if isinstance(obj, dict):
return tuple(sorted((Memoizer.make_hashable((key, value)) for key, value in obj.items())))
elif isinstance(obj, Iterable):
return tuple((Memoizer.make_hashable(value) for value in obj))
return json.dumps(obj)
# The cache parameter is here for if you want to implement
# a solution that is more efficient than the naive
# recursive solution
def eating_cookies_recursive(n, cache=None):
if n < 0:
return 0
elif n == 0:
return 1
if cache is None:
cache = Memoizer()
for i in range(n):
cache.get_result(
eating_cookies_recursive,
i,
cache=cache
)
permutations = 0
can_eat = [1, 2, 3]
for cookie_count in can_eat:
permutations += cache.get_result(
eating_cookies_recursive,
n - cookie_count,
cache=cache
)
return permutations
eating_cookies = eating_cookies_recursive
if __name__ == "__main__":
if len(sys.argv) > 1:
num_cookies = int(sys.argv[1])
print("There are {ways} ways for Cookie Monster to eat {n} cookies.".format(ways=eating_cookies(num_cookies), n=num_cookies))
else:
print('Usage: eating_cookies.py [num_cookies]')
| true |
93b3956a630c69b53f0ba4a6b68635af9b980857 | Python | LearningPygame/LearningPygameBasics | /1st jogo/o crl.py | UTF-8 | 2,068 | 3.078125 | 3 | [] | no_license | import pygame, sys, random
from pygame.locals import *
w = 800
h = 480
z = 0
screen = pygame.display.set_mode((w,h))
pygame.display.update()
class Ball:
def __init__(self, radius, y, x, color, size, maxforce, force, life):
self.y=y
self.x=x
self.size=size
self.maxforce=maxforce
self.force=force
self.radius=radius
self.color=color
self.life=life
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius)
def fall(self):
if self.y < h-self.radius:
self.y+=self.force
if self.force < self.maxforce:
self.force+=1
elif self.y > h-self.radius or self.y == h-self.radius:
self.y = h-self.radius-1
self.force = self.force*-1
self.maxforce = self.maxforce/2
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius)
self.life-=1
if self.life < 0:
ball.remove(self)
clock = pygame.time.Clock()
ball = []
ball.append(Ball(25, 250, 250, (random.randint(1,255),random.randint(1,255),random.randint(1,255)),"L", 25, 1 ,100))
while 1:
clock.tick(60)
x,y = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
z = 1
elif event.type == MOUSEBUTTONUP:
z = 0
if z == 1:
ball.append(Ball(25, y, x, (random.randint(1,255),random.randint(1,255),random.randint(1,255)),"L", 25, 1 ,100))
z = 3
elif z > 1:
z-=1
screen.fill((0,0,0))
for i in ball:
i.fall()
pygame.display.update()
| true |
c7744d8125c09ed2ba150b7f07a3c7b56abb78f7 | Python | jiuzheyangyo/yiziton | /mongo_client.py | UTF-8 | 1,036 | 2.515625 | 3 | [] | no_license | import pymongo
import config
# client = config.get_mongo_client()
def getClient(env = "prod"):
return config.get_mongo_client(env)
def get_col_op(dbName,colName,check_db=True,check_col=True,env = "localhost"):
client = getClient(env)
dbNames = client.list_database_names()
db_flag = False
for x in dbNames:
if(dbName == x):
db_flag = True
if(not db_flag & check_db):
raise NameError("db of %s is no exists" % dbName)
db = client[dbName]
colNames = db.list_collection_names()
col_flag = False
for x in colNames:
if (colName == x):
col_flag = True
if(not col_flag & check_col):
raise NameError("col of %s is no exists" % colName)
col_op = db[colName]
return col_op
def get_col_op_prod(dbName,colName,check_db=True,check_col=True):
return get_col_op(dbName,colName,check_db,check_col,"prod")
def get_col_op_test(dbName,colName,check_db=True,check_col=True):
return get_col_op(dbName,colName,check_db,check_col,"test")
| true |
620fc74a374adc8bcdc89bbced0d109679db36eb | Python | hugobowne/noworkflow | /tests/test_func.py | UTF-8 | 143 | 2.859375 | 3 | [
"MIT"
] | permissive | def h(a):
return a
def f(a):
return h
g = f
a = 1
b = 2
c = g(3)(2)
d = (f(a) if a else g(a))(a)
d = [
h(a) + h(b),
h(a), h(c)
]
print(d) | true |
28656adeea2b0e79829383ce79c36950a1922aa2 | Python | developer69K/RealSense | /Arduino/libraries/RasPiBot202V2/pi/astar.py | UTF-8 | 9,631 | 2.515625 | 3 | [
"MIT"
] | permissive | # astar.py
# Source: https://github.com/DrGFreeman/RasPiBot202V2
#
# MIT License
#
# Copyright (c) 2017 Julien de la Bruere-Terreault <drgfreeman@tuta.io>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This module defines the AStar class as a python interface to the Pololu
# A-Star 32U4 robot controller with Raspberry Pi bridge.
import time
import threading
import smbus
import struct
import sys
class AStar:
def __init__(self):
self._bus = smbus.SMBus(1)
self._version = sys.version_info.major
self.ledYellow = 0
self.ledGreen = 0
self.ledRed = 0
self._buttonA = 0
self._buttonB = 0
self._buttonC = 0
self._fwdSpeed = 0
self._turnRate = 0
self._lockSpeeds = False
self._x = 0
self._y = 0
self._phi = 0
self._lockOdometer = False
self._batteryMV = 0
self._lockBattery = False
self._panServo = 0 # Servo is disabled by default
self._tiltServo = 0 # Servo is disabled by default
self._mastServo = 0 # Servo is disabled by default
self._lockServos = False
self._notes = ''
self._resetOdometer = True
self.run()
# Wait to ensure we can read/write the buffer once before starting
time.sleep(.05)
# Print battery level
print("RPB202")
print("Battery level: " + str(round(self.getBatteryVolts(), 2)) + "V")
def _read_unpack(self, address, size, format):
"""Reads data from the I2C bus."""
self._bus.write_byte(20, address)
time.sleep(0.0001)
byte_list = [self._bus.read_byte(20) for _ in range(size)]
if self._version == 3:
# Python version 3
return struct.unpack(format, bytes(byte_list))
else:
# Python version 2
return struct.unpack(format, bytes(bytearray(byte_list)))
def _write_pack(self, address, format, *data):
"""Writes data to the I2C bus."""
if self._version == 3:
# Python version 3
data_array = list(struct.pack(format, *data))
else:
# Python version 2
data_array = map(ord, list(struct.pack(format, *data)))
self._bus.write_i2c_block_data(20, address, data_array)
time.sleep(0.0001)
def close(self):
"""Stops the I2C communication with the A-Star controller. This method
also stops the motors and turns off the A-Star LEDs."""
# Stop the running thread
self._active = False
# Stop the motors
self.setSpeeds(0, 0)
# Write the motors speeds directly to the I2C bus
self._write_pack(6, 'hh', 0, 0)
# Turn LEDs off
self.setYellowLED(0)
self.setGreenLED(0)
self.setRedLED(0)
# Write the LED values directly to the I2C bus
self._write_pack(0, 'BBB', 0, 0, 0)
def run(self):
"""Starts continuous I2C communication with A-Star controller in a
dedicated thread."""
self._active = True
th = threading.Thread(target = self._run, args = [])
th.start()
def _run(self):
"""Runs continuous I2C communication with A-Star controller. Runs as
long as AStar._active attribute is True. Call AStar.close() to stop the
thread."""
while self._active:
try:
# Read from buffer
# Buttons
self._buttonA, self._buttonB, self._buttonC = \
self._read_unpack(3, 3, '???')
# Odometer
self._lockOdometer = True
self._x, self._y, phi = self._read_unpack(10, 6, 'hhh')
# Convert phi reading from 1/1000 of radians to radians
self._phi = phi / 1000.
self._lockOdometer = False
# Battery level
self._lockBattery = True
self._batteryMV = self._read_unpack(17, 2, 'H')[0]
self._lockBattery = False
# Write to buffer
# Reset odometer on start-up
if self._resetOdometer:
self._resetOdometer = False
self._write_pack(16, 'B', 1)
time.sleep(.02)
else:
self._write_pack(16, 'B', 0)
# LEDs
self._write_pack(0, 'BBB', self.ledYellow, self.ledGreen, \
self.ledRed)
# Servos
self._lockServos = True
self._write_pack(34, 'HHH', self._panServo, self._tiltServo, \
self._mastServo)
self._lockServos = False
# Notes
if self._notes != "":
self._write_pack(19, 'B15s', 1, self._notes.encode('ascii'))
self._notes = ""
# Motors (turn rate in 1/1000 of radians / s)
self._lockSpeeds = True
turnRate = int(self._turnRate * 1000)
self._write_pack(6, 'hh', self._fwdSpeed, turnRate)
self._lockSpeeds = False
except IOError:
# Handle I2C communication error
raise IOError("IOError in AStar class")
self.close()
def buttonAIsPressed(self):
"""Returns True if the A-Star button A is pressed, False otherwise."""
return self._buttonA
def buttonBIsPressed(self):
"""Returns True if the A-Star button B is pressed, False otherwise."""
return self._buttonB
def buttonCIsPressed(self):
"""Returns True if the A-Star button C is pressed, False otherwise."""
return self._buttonC
def getBatteryVolts(self):
"""Returns the robot battery level in Volts."""
while self._lockBattery:
# Wait while battery attribute is locked
pass
return self._batteryMV / 1000.
def getOdometerPhi(self):
"""Returns the phi angle of the robot from the odometer in radians
(0 <= phi < 2*Pi). 0 corresponds to the robot pointing in the positive x
direction. The angle increases turning in direction of the positive y
axis (left turn).
"""
while self._lockOdometer:
# Wait while odometer attributes are locked
pass
return self._phi
def getOdometerXY(self):
"""Returns the x and y position of the robot from the odometer in mm."""
while self._lockOdometer:
# Wait while odometer attributes are locked
pass
return self._x, self._y
def setYellowLED(self, value = 0):
"""Sets the A-Star yellow led status (0 = Off, 1 = On)."""
if value == 0:
self.ledYellow = 0
else:
self.ledYellow = 1
def setGreenLED(self, value = 0):
"""Sets the A-Star green led status (0 = Off, 1 = On)."""
if value == 0:
self.ledGreen = 0
else:
self.ledGreen = 1
def setRedLED(self, value = 0):
"""Sets the A-Star red led status (0 = Off, 1 = On)."""
if value == 0:
self.ledRed = 0
else:
self.ledRed = 1
def setPanServo(self, us_4 = 0):
"""Sets the pan servo pulse width value in quarter-microseconds."""
while self._lockServos:
# Wait while servos attributes are locked
pass
self._panServo = us_4
def setTiltServo(self, us_4 = 0):
"""Sets the tilt servo pulse width value in quarter-microseconds."""
while self._lockServos:
# Wait while servos attributes are locked
pass
self._tiltServo = us_4
def setMastServo(self, us_4 = 0):
"""Sets the mast servo pulse width value in quarter-microseconds."""
while self._lockServos:
# Wait while servos attributes are locked
pass
self._mastServo = us_4
def playNotes(self, notes):
"""Play the specified notes on the A-Star buzzer. Refer to the Pololu
Buzzer documentation for details on how to use the buzzer."""
self._notes = notes
def resetOdometer(self):
"""Resets the odometer on the A-Star."""
self._resetOdometer = True
def setSpeeds(self, fwdSpeed = 0, turnRate = 0):
"""Sets the robot speed in mm/s and turn rate in radians/s"""
while self._lockSpeeds:
# Wait while speds attributes are locked
pass
self._fwdSpeed = fwdSpeed
self._turnRate = turnRate
| true |
da4c7ba99040e1d8e1da9b6d01cd78871dba8fd0 | Python | RogerZhangsc/PhotoRestoration | /python/sparse_representation/find_similar_faces.py | UTF-8 | 1,615 | 2.703125 | 3 | [] | no_license | import cv2 as cv
from scipy.sparse import csc_matrix, lil_matrix
from scipy.optimize import minimize
import numpy as np
import os
faces_path = '/home/ave/Documents/10kfaces/10k_US_Adult_Faces_Database/Face_Images_Grayscale/'
query_img_path = "/home/ave/Downloads/avery.jpg"
query_img = cv.imread(query_img_path,0)
query_img = query_img[query_img.shape[0]/2-128:query_img.shape[0]/2+128,
query_img.shape[1]/2-75:query_img.shape[1]/2+75]
img_paths = [path for path in os.listdir(faces_path) if path.endswith(".jpg")]
num_faces = len(img_paths)
#38400 is len of cropped image vector, original image is 256 x 150
vectorized_images = np.zeros((38400,num_faces))
#cannot get sparse representation to work with minimize for some reason
#coeffecient_vec = csc_matrix((num_faces),dtype = np.float64)
coeffecient_vec = np.zeros(num_faces)
min_func = lambda x: abs(x).sum(axis=0).sum()
print min_func(coeffecient_vec)
for i in range(len(img_paths)):
face_img = cv.imread(faces_path + img_paths[i], 0)
#Smallest pic only has 150 width, so lets try just grabbing that much to start
center = face_img.shape[1]/2
cropped_face = face_img[:, center-75:center+75].flatten()
try:
vectorized_images[:,i] = cropped_face
except:
print "Image Sizer Error"
print "Image path:", img_paths[i]
print "Image num:", i
print "Uncropped Shape:", face_img.shape
print "Cropped Shape:", cropped_face.shape
min_func = lambda x: abs(x).sum(axis=0).sum()
cons = ({'type': 'eq', 'fun': lambda x: vectorized_images * coeffecient_vec - query_img})
res = minimize(min_func, coeffecient_vec, method='SLSQP',options={'disp': True}) | true |
c75eaaf75ae9405234448a204c84e0fcd9095f56 | Python | surendrasah/python-send-stats | /3-stats-wavefront.py | UTF-8 | 2,018 | 2.96875 | 3 | [] | no_license | import time
import socket
import math
import random
import atexit
def format_measurement_data_wavefront(data):
lines = []
for key, value in data.items():
line = (
f'prefix_metric_name.{key} {value} '
f'{math.floor(time.time())} '
f'source=localhost format="wavefront"\n'
)
lines.append(line)
return ''.join(lines)
class StatsReporter:
def __init__(
self,
socket_type,
socket_address,
encoding='utf-8',
formatter=None
):
self._socket_type = socket_type
self._socket_address = socket_address
self._encoding = encoding
self._formatter = formatter if formatter else lambda d: str(d)
self.create_socket()
def create_socket(self):
try:
sock = socket.socket(*self._socket_type)
sock.connect(self._socket_address)
self._sock = sock
print('Created socket')
except socket.error as e:
print(f'Got error while creating socket: {e}')
def close_socket(self):
try:
self._sock.close()
print('Closed socket')
except (AttributeError, socket.error) as e:
print(f'Got error while closing socket: {e}')
def send_data(self, data):
try:
sent = self._sock.send(
self._formatter(data).encode(self._encoding)
)
print(f'Sending sample data... {sent}')
except (AttributeError, socket.error) as e:
print(f'Got error while sending data on socket: {e}')
# attempt to recreate socket on error
self.close_socket()
self.create_socket()
reporter = StatsReporter(
(socket.AF_INET, socket.SOCK_STREAM),
('127.0.0.1', 8094),
formatter=format_measurement_data_wavefront
)
atexit.register(reporter.close_socket)
while True:
reporter.send_data({'value1': 10, 'value2': random.randint(1, 10)})
time.sleep(1)
| true |
00c1d5f5967d071ba2bb09d8c9a59bc490782d07 | Python | leechuanfeng/annxor | /annxor.py | UTF-8 | 3,904 | 2.796875 | 3 | [] | no_license | # the structure of neural network:
# input layer with 2 inputs
# 1 hidden layer with 2 units, tanh()
# output layer with 1 unit, sigmoid()
import numpy as np
import scipy
from scipy.special import expit
import math
def run():
X = loadData("XOR.txt")
W = paraIni()
#print(X.shape)
intermRslt = feedforward(X,W)
#print(intermRslt[2])
Y = X[:, len(X[0])-1:len(X[0])]
#print(Y)
Yhat = intermRslt[2]
#print(Yhat)
#print(errCompute(Y, Yhat))
B=backpropagate(X, W, intermRslt, 0.5)
#print(B)
numIter = [100, 1000, 5000, 10000]
alp = [0.01, 0.5]
for i in range(len(numIter)):
for j in range(len(alp)):
R=FFMain("XOR.txt", numIter[i], alp[j])
np.savetxt('Error(numIter=' + repr(numIter[i]) + ',alp='+ repr(alp[j])+ ')' + '.txt', R[0], fmt="%.8f")
np.savetxt('Output(numIter=' + repr(numIter[i]) + ',alp='+ repr(alp[j])+ ')' + '.txt', R[1], fmt="%.8f")
np.savetxt('NewHidden(numIter=' + repr(numIter[i]) + ',alp='+ repr(alp[j])+ ')' + '.txt', R[2][0], fmt="%.8f")
np.savetxt('NewOutput(numIter=' + repr(numIter[i]) + ',alp='+ repr(alp[j])+ ')' + '.txt', R[2][1], fmt="%.8f")
#R=FFMain("XOR.txt", 10000, 0.5)
#print("R",R[1])
def loadData(Filename):
X=[]
count = 0
text_file = open(Filename, "r")
lines = text_file.readlines()
for line in lines:
X.append([])
words = line.split(' ')
#print(words)
# convert value of first attribute into float
for word in words:
#print(word)
X[count].append(float(word))
count += 1
X = np.asarray(X)
n,m = X.shape # for generality
X0 = np.ones((n,1))
X = np.hstack((X0,X))
return X
def paraIni():
#code for fixed network and initial values
# parameters for hidden layer, 3 by 3
#wh=np.array([[0.1859,-0.7706,0.6257],[-0.7984,0.5607,0.2109]])
#wh=np.random.random_sample((2,3))
wh=np.random.uniform(low = -1.0001, high=1.0001, size=(2,3))
wh[wh>1.0] = 1.0
wh[wh<-1.0] = -1.0
# parameter for output layer 1 by 3
#wo=np.array([[0.1328,0.5951,0.3433]])
#wo=np.random.random_sample((1,3))
wo=np.random.uniform(low = -1.0001, high=1.0001, size=(1,3))
wo[wo>1.0] = 1.0
wo[wo<-1.0] = -1.0
return [wh,wo]
def feedforward(X,paras):
tempX = X[:, 0:len(X[0])-1] #x,y
tempY = X[:, len(X[0])-1:len(X[0])]
oh = np.tanh(np.dot(paras[0], tempX.transpose()))
n,m = oh.shape # for generality
X0 = np.ones((m,1))
ino = np.vstack((X0.transpose(),oh))
oo = expit(np.dot(paras[1],ino))
return [oh,ino,oo]
def errCompute(Y,Yhat):
#this will not have the output from the original array
sum = 0
Yo = Yhat[0]
for k in range(len(Y)):
sum += pow((Y[k] - Yo[k]),2)
#sum all values & find error value
J = sum / (2 * len(Yo))
return J
def backpropagate(X,paras,intermRslt,alpha):
#Initializing
Y = X[:, len(X[0])-1:len(X[0])]
tempX = X[:, 0:len(X[0])-1]
oo = intermRslt[2][0]
ino = intermRslt[1]
oh = intermRslt[0]
wh = paras[0]
wo = paras[1]
delta = np.multiply(np.multiply((Y.transpose() - oo), oo), (1.0-oo))
wo = wo + (alpha * np.dot(delta,ino.transpose()))/4.0
wop = wo[:, 1:len(wo[0])]
dot =np.dot(wop.transpose(), delta)
deltah = np.multiply(dot, (1.0-oh*oh))
wh = wh + alpha * np.dot(deltah, tempX) / 4.0
return [wh,wo]
def FFMain(filename,numIteration, alpha):
#data load
X = loadData(filename)
#
W = paraIni()
#number of features
n = X.shape[1]
#error
errHistory = np.zeros((numIteration,1))
for i in range(numIteration):
#feedforward
intermRslt=feedforward(X,W)
#Cost function
errHistory[i,0]=errCompute(X[:,n-1:n],intermRslt[2])
#backpropagate
W=backpropagate(X,W,intermRslt,alpha)
Yhat=np.around(intermRslt[2])
return [errHistory,intermRslt[2],W] | true |
7577909dadf95cdb1b2fb823b041fdf3488ea8e6 | Python | buildtesters/buildtest | /buildtest/exceptions.py | UTF-8 | 2,136 | 3.203125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"OpenPBS-2.3"
] | permissive | import yaml
class BuildTestError(Exception):
"""Class responsible for error handling in buildtest. This is a sub-class
of Exception class."""
def __init__(self, msg, *args):
"""This class is used for printing error message when exception is raised.
Args:
msg (str): message to print
*args (list): extra arguments to class for printing message
"""
self.msg = [msg]
if args:
for arg in args:
self.msg.append(str(arg))
self.msg = "\n".join(self.msg)
def __str__(self):
return repr(self.msg)
class BuildspecError(Exception):
"""Exception if there is an issue with parsing a Buildspec or building test"""
def __init__(self, msg, buildspec=None):
self.msg = msg
if buildspec:
self.msg = f"[{buildspec}]: {msg}"
def get_exception(self):
return repr(self.msg)
"""
def __str__(self):
if not self.buildspec:
return f"{self.msg}"
return repr(f"[{self.buildspec}]: {self.msg}")
"""
class InvalidBuildspec(BuildspecError):
"""This class raises exception for InvalidBuildspec"""
class InvalidBuildspecSchemaType(BuildspecError):
"""This exception is raised when buildspec contains invalid schema 'type'"""
class InvalidBuildspecExecutor(BuildspecError):
"""This exception is raised when there is invalid 'executor' in buildspec"""
class ExecutorError(Exception):
"""This class raises an error with Executor class and its operation"""
class RuntimeFailure(Exception):
"""The RuntimeFailure exception is raised when their is an error running test"""
class ConfigurationError(Exception):
"""ConfigurationError is raised when their is an issue with buildtest configuration file"""
def __init__(self, config, settings_file, msg):
self.config = config
self.settings_file = settings_file
self.msg = msg
print(yaml.dump(self.config, default_flow_style=False, sort_keys=False))
def __str__(self):
return repr(f"[{self.settings_file}]: {self.msg}")
| true |
bf12560e8837d7fab23cfd96ae9a9c96b6eaee20 | Python | Utsav-Raut/ETL | /nike/utils/compare.py | UTF-8 | 1,790 | 2.609375 | 3 | [] | no_license | import findspark
findspark.init()
from pyspark.sql import SparkSession
# from pyspark.sql import functions as f
# from pyspark.sql import HiveContext
# sc = SparkSession.builder.appName('Compare').getOrCreate()
# hc = HiveContext(sc)
# df1 = sc.read.json('/home/boom/Desktop/main_proj_nike/demo_table.json')
# df1.show()
# df2 = hc.table('mock1.demo_table') # reading hive table
# df2.collect()
# df2.show()
# join_df = df1.join(df2,[df1.TransactionID == df2.TransactionID])
# res_df = join_df.filter(df1.salary != df2.salary).select(df1.TransactionID.alias('TransactionID'),df1.salary.alias('json_salary'),df2.salary.alias('table_salary'))
# res_df.show()
def get_file_details():
spark = SparkSession.builder.appName('Read File').getOrCreate()
df = spark.read.json('/home/boom/Desktop/main_proj_nike/demo_table.json')
# df.show()
return df
def get_hive_tbl_details():
spark = SparkSession.builder.appName('Read Table').enableHiveSupport().getOrCreate()
spark.sql('use mock1')
df = spark.sql('select * from demo_table')
# print(df)
# df.show()
return df
def compare_data():
df2 = get_hive_tbl_details()
df1 = get_file_details()
df1.show()
df2.show()
join_df = df1.join(df2,[df1.notifications.POSLog.Transaction.TransactionID == df2.notifications.POSLog.Transaction.TransactionID])
res_df = join_df.filter(df1.notifications.POSLog.Transaction.TransactionAmt != df2.notifications.POSLog.Transaction.TransactionAmt).select(df1.notifications.POSLog.Transaction.TransactionID.alias('TransactionID'),df1.notifications.POSLog.Transaction.TransactionAmt.alias('json_TxnAmt'),df2.notifications.POSLog.Transaction.TransactionAmt.alias('table_TxnAmt'))
# res_df.show()
# print(join_df)
# join_df.show()
compare_data() | true |
f5908c74a542725b56b91f8c80ec1d71a4a423a1 | Python | swpshadow/independent-set | /source/gen_alg.py | UTF-8 | 4,406 | 2.953125 | 3 | [] | no_license | from independent_set import DataSet
import random
import sys
import time
def roulette_selection(pool, data_set):
probs = []
sum = data_set.sum_pool_fitness(pool)
for idx in range(0, len(pool)):
probs.append(data_set.fitness(pool[idx])/sum)
return list(random.choices(pool, k=len(pool), weights=probs))
def tournament_selection(pool, data_set, prob = 0.75):
parent_pool = []
while len(parent_pool) < len(pool):
p = random.choices(pool, k=2)
if random.random() < prob:
parent_pool.append(p[data_set.best_in_pool(p)[0]])
else:
parent_pool.append(p[(data_set.best_in_pool(p)[0]+1) % 2])
assert len(parent_pool) == len(pool)
return parent_pool
def cross_over(pool, chromosome_size, crossover = None):
if crossover is None:
crossover = uniform_cross_over #so crossover can be optional
child_pool = []
c1, c2 = [], []
for x in range(0, int(len(pool)/2)):
c1, c2 = crossover(pool[random.randint(0, len(pool)-1)], pool[random.randint(0, len(pool)-1)], chromosome_size)
child_pool.extend([c1,c2])
return child_pool
def single_point_crossover(p1, p2, size):
rand = random.randint(1, size)
c1 = p1[:rand]
c1.extend(p2[rand:])
c2 = p2[:rand]
c2.extend(p1[rand:])
return (c1, c2)
def uniform_cross_over(p1, p2, size):
u = [random.choice([0,1]) for _ in p1]
c1 = [p1[x] if u[x] == 0 else p2[x] for x in range(0,size)]
c2 = [p1[x] if u[x] == 1 else p2[x] for x in range(0,size)]
return (c1, c2)
def random_mutation(pool, mutation_rate = 0.05):
for d in pool:
if random.random() < mutation_rate:
for _ in range(0, random.randint(0, int(len(d) / 10))):
i1 = random.randint(0, len(d)-1)
d[i1] = (d[i1] + 1) % 2 #flips the bit
return pool
def mutation(pool, mutation_rate = .05):
for d in pool:
if random.random() < mutation_rate:
idx = random.randint(0, len(d)-1)
d[idx] = (d[idx] + 1) % 2 #flips the bit
return pool
set_size = 50 #####################
pool_size = int(.6 * set_size) #make even number
if pool_size % 2 == 1:
pool_size += 1
file_name = '50' ################################
optimal = 17
if len(sys.argv) > 1:
file_name = sys.argv[1]
set_size = int(sys.argv[1])
pool_size = int(set_size * .6)
if pool_size % 2 == 1:
pool_size += 1
print(set_size, pool_size)
avg_fit = 0
best_fit = 0
avg_gens = 0
print(time.time())
for _ in range(0,5):
data_set = DataSet(file_name, size = set_size)
pool = data_set.random_pool(pool_size)
best = (pool[0], data_set.fitness(pool[0]))
iterations = 0
max_iterations = 20000
since_change = 0
max_not_changed = 5000
start_time = time.time()
#runs until max iterations or it hasn't changed in max_not_changed steps
while time.time() - start_time < 420 and since_change < max_not_changed and best[1] < optimal:
elites = []
for _ in range(0,2): #always just 2 elites. They are pulled out of parent pool and put back into next gen.
b = data_set.best_in_pool(pool)
elites.append(pool.pop(b[0]))
parent_pool = tournament_selection(pool, data_set) #roulette_selection(pool, data_set) / tournament_selection(pool, data_set)
child_pool = cross_over(parent_pool, set_size, single_point_crossover) #can pass which cross to use. Single or uniform
child_pool = mutation(child_pool, mutation_rate = 0.15) #random_mutation(child_pool, , mutation_rate = .05) / def mutation(child_pool, mutation_rate = .05)
for id in child_pool: #deals with infeasibles
data_set.fix_up(id)
pool = child_pool
pool.extend(elites) #readds the elites to the population
new_best = data_set.best_in_pool(pool)
if new_best[1] > best[1]: #if found something with better fitness, replace best found so far
best = (pool[new_best[0]], new_best[1])
since_change = 0
iterations += 1
since_change += 1
if best[1] > best_fit:
best_fit = best[1]
avg_gens += iterations
print("num gens: ", iterations, "total time: ", time.time() - start_time)
print("best fit: ", best[1])
avg_fit += best[1]
print()
print("best fit: ", best_fit, "avg gens: ", avg_gens / 5, "avg fit: ", avg_fit / 5)
| true |
735dea99c9cefcc4864c7c151016ee90bbb4f835 | Python | lukaszkuczynski/scout | /test_integration/websimple_reader_test_integration.py | UTF-8 | 546 | 2.84375 | 3 | [] | no_license | from unittest import main, TestCase
from application.web_value_reader import WebSimpleReader
class WebSimpleReaderTestI(TestCase):
def test_reader_givenWebResourceWithText_canReadIt(self):
config = {
"address": "https://www.nytimes.com",
"xpath": "//li[contains(@class, 'todays-paper')]/a[1]/text()"
}
reader = WebSimpleReader(config)
value = reader.read()
self.assertIn('Today', str(value))
self.assertIn('Paper', str(value))
if __name__ == '__main__':
main() | true |
8783c4bdc6d7822e8dde55260e81cd6af860e7da | Python | wwhisme/MOI_Robot_Winter | /src/winter_globalplanner/src/MoveBase/python_movebase/PathFilter.py | UTF-8 | 7,001 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
from geometry_msgs.msg import Twist, Point, Quaternion,PoseStamped
from math import radians, copysign, sqrt, pow, pi,cos
from nav_msgs.msg import Path
import PyKDL
import tf
import math
#求两个点之间点斜率 传入参数 坐标点 x y
def Slope(GX,CX,GY,CY):
if (GX-CX)==0.0:
if GY>CY:
return 10000000.0
else:
return -10000000.0
else:
return float(GY-CY)/(GX-CX)
#求两个点之间点斜率 传入参数 坐标点 x y
def SlopePose(pose1,pose2):
GX=pose1.pose.position.x
GY=pose1.pose.position.y
CX=pose2.pose.position.x
CY=pose2.pose.position.y
if (GX-CX)==0.0:
if GY>CY:
return 10000000.0
else:
return -10000000.0
else:
return float(GY-CY)/(GX-CX)
#求两条直线的交点 传入参数 第一条直线的两个点 第二条直线的两个点
def CrossPoint(pose1,pose2,pose3,pose4):
x1=pose1.pose.position.x
y1=pose1.pose.position.y
x2=pose2.pose.position.x
y2=pose2.pose.position.y
x3=pose3.pose.position.x
y3=pose3.pose.position.y
x4=pose4.pose.position.x
y4=pose4.pose.position.y
if (x2==x1):
k2=float(y4-y3)/(x4-x3)
c2=y3-k2*x3
#y=k2*x+c2
Y=k2*x1+c2
return x1,Y
if (x3==x4):
k1=float(y2-y1)/(x2-x1)
c1=y1-k1*x1
Y=k1*x3+c1
return x3,Y
k2=float(y4-y3)/(x4-x3)
c2=y3-k2*x3
k1=float(y2-y1)/(x2-x1)
c1=y1-k1*x1
X=float(c1-c2)/(k2-k1)
Y=float(k1*c2-c1*k2)/(k1-k2)
return X,Y
# 计算目标点与当前点的方向 坐标
def canculateAngle(GX,GY,CX,CY):
ERR_X=GX-CX
ERR_Y=GY-CY
if ERR_X>0:
return math.atan(ERR_Y/ERR_X)
elif ERR_X<0:
if ERR_Y>0:
return math.atan(ERR_Y/ERR_X)+pi
else:
return math.atan(ERR_Y/ERR_X)-pi
else:
if ERR_Y>0:
return pi/2
else:
return 0-pi/2
# 计算目标点与当前点的方向 传入参数目标点goalpose 当前点 currentpose
def canculate_G_C_Angle(gPose,cPose):
ERR_X=gPose.pose.position.x-cPose.pose.position.x
ERR_Y=gPose.pose.position.y-cPose.pose.position.y
if ERR_X>0:
return math.atan(ERR_Y/ERR_X)
elif ERR_X<0:
if ERR_Y>0:
return math.atan(ERR_Y/ERR_X)+pi
else:
return math.atan(ERR_Y/ERR_X)-pi
else:
if ERR_Y>0:
return pi/2
else:
return 0-pi/2
#计算两个点中间的距离
def canculateDistance(pose1,pose2):
return sqrt(pow((pose1.pose.position.x-pose2.pose.position.x), 2)+pow(pose1.pose.position.y-pose2.pose.position.y,2))
#将四元数转化为角度信息
def quat_to_angle(quat):
rot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)
return rot.GetRPY()[2]
#从A*算法得出的路径中再次挑选新的目标点 减少A*的点数
def newPathFromAStar(path):
i=15
#每1米最少一个目标点 防止长距离角度过小的碰撞
poses=[]
length=len(path.poses)
if length>15:
lastj=0
lastGD=quat_to_angle(path.poses[15].pose.orientation)
poses.append(path.poses[15])
while (i<length-20) and (length>15):
GD=quat_to_angle(path.poses[i].pose.orientation)
errDirection=GD-lastGD
if(errDirection>3.14):
errDirection=2*3.1415-errDirection
elif(errDirection<-3.14):
errDirection=2*3.1415+errDirection
#0.175 10du 0.35 20 0.524 30degree
#遇到拐角的地方 向外进行扩展目标点 根据斜率进行扩展
if(abs(errDirection))>0.35:
#向外部扩展目标点
x=path.poses[i].pose.position.x
y=path.poses[i].pose.position.y
x1=path.poses[i+10].pose.position.x
y1=path.poses[i+10].pose.position.y
x2=path.poses[i-10].pose.position.x
y2=path.poses[i-10].pose.position.y
k1=Slope(x1,x,y1,y)
k2=Slope(x,x2,y,y2)
if y1>y2:
if x1>x2:
if k1>k2:
path.poses[i].pose.position.x=x1
path.poses[i].pose.position.y=y2
else:
path.poses[i].pose.position.x=x2
path.poses[i].pose.position.y=y1
else:
if k1>k2:
path.poses[i].pose.position.x=x2
path.poses[i].pose.position.y=y1
else:
path.poses[i].pose.position.x=x1
path.poses[i].pose.position.y=y2
else:
if x1<x2:
if k1>k2:
path.poses[i].pose.position.x=x1
path.poses[i].pose.position.y=y2
else:
path.poses[i].pose.position.x=x2
path.poses[i].pose.position.y=y1
else:
if k1>k2:
path.poses[i].pose.position.x=x2
path.poses[i].pose.position.y=y1
else:
path.poses[i].pose.position.x=x1
path.poses[i].pose.position.y=y2
poses.append(path.poses[i])
lastGD=GD
lastj=i
if(i-lastj>50):
lastj=i
poses.append(path.poses[i])
i+=10
poses.append(path.poses[len(path.poses)-1])
mPath=Path()
mPath.header.frame_id=path.header.frame_id
mPath.poses=poses[:]
return mPath
#再次滤波 主要是对path中的拐角进行再次融合变成直角 将小距离的目标点变成大距离的目标点
def Lvbo(path,D):
mPath=Path()
mPath.header.frame_id=path.header.frame_id
poses=path.poses
P=len(poses)
newPoses=[]
newPoses.append(poses[0])
print '---------------------------'
#如果只有或者小于三个点
if P<4:
for i in range(1,P):
newPoses.append(poses[i])
mPath.poses=newPoses[:]
return mPath
#从第三个点开始计算
i=2
FLAG=0
while i<(P-1):
d=canculateDistance(poses[i],poses[i-1])
if (d<D) :
if d<0.25 and ((abs(poses[i-1].pose.position.x-poses[i].pose.position.x)<0.05) or (abs(poses[i-1].pose.position.y-poses[i].pose.position.y)<0.05)):
newPoses.append(poses[i-1])
i+=2
FLAG=3
else:
#直线角度若相差过小 可能产生尖端
k1=canculate_G_C_Angle(poses[i-2],poses[i-1])
k2=canculate_G_C_Angle(poses[i],poses[i+1])
#30度以内
if abs(normalize_angle(k1-k2))<0.5:
newPoses.append(poses[i-1])
i+=2
FLAG=2
else:
result=CrossPoint(poses[i-2],poses[i-1],poses[i],poses[i+1])
poses[i-1].pose.position.x=result[0]
poses[i-1].pose.position.y=result[1]
newPoses.append(poses[i-1])
i+=2
FLAG=1
else:
newPoses.append(poses[i-1])
i+=1
FLAG=2
if i==(P-1):
newPoses.append(poses[P-2])
newPoses.append(poses[P-1])
mPath.poses=newPoses[:]
return mPath
if i==P :
newPoses.append(poses[P-1])
mPath.poses=newPoses[:]
return mPath
#最后的目标点选择 将小距离的点 变成长距离的目标点
def ChooseMainPath(path):
mPath=Path()
mPath.header.frame_id=path.header.frame_id
poses=path.poses
P=len(poses)
if P<3:
return path
newPoses=[]
newPoses.append(poses[0])
lastAngle=canculate_G_C_Angle(poses[1],poses[0])
for i in range(1,P-1):
angle=canculate_G_C_Angle(poses[i+1],poses[i])
#print 'agnle-'
errA=abs(angle-lastAngle)
if errA>6:
errA=6.283-errA
#print errA
if errA>0.1:
newPoses.append(poses[i])
lastAngle=angle
newPoses.append(poses[P-1])
mPath.poses=newPoses[:]
return mPath
def normalize_angle(angle):
res = angle
while res > pi:
res -= 2.0 * pi
while res < -pi:
res += 2.0 * pi
return res
| true |
1fb50660e9f9ac4042e68406ddd9e7f56f162e79 | Python | christophejacques/flask-by-example | /db_create.py | UTF-8 | 488 | 2.578125 | 3 | [] | no_license | from models import BlogPost, BlogUser, db
# Create the database and the tables
print("Create Database & Tables : ", flush=True, end="")
db.create_all()
print("Ok")
# insert records
print("Insert records in Tables : ", flush=True, end="")
db.session.add(BlogUser("admin", "cle", "password"))
db.session.add(BlogPost("Bon", "je suis bon !"))
db.session.add(BlogPost("Bien", "je vais bien !"))
print("Ok")
# commit
print("Commit : ", flush=True, end="")
db.session.commit()
print("Ok") | true |
5522a3a81181e4250b1c352e29825ea44090711a | Python | cmlzaGk/sampleflask | /randomweb-app/randomwebapp_tests/tests_client.py | UTF-8 | 732 | 2.578125 | 3 | [] | no_license | import unittest
import json
from randomweb_app import create_app
class FlaskClientTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
self.app_context.pop()
def test_home_page(self):
response = self.client.post('/betterrandomservice')
self.assertEqual(response.status_code, 200)
json_output = json.loads(response.get_data(as_text=True))
self.maxDiff = None
self.assertTrue(json_output['random_int'] >= 0 \
and json_output['random_int'] <= 10)
| true |
7e1b5724ed0a35c01fe0d189b89666551bb1820f | Python | toreyo/upgraded-happiness | /automate_the_boring/chapter6_manipulatingStringsEscapeChars.py | UTF-8 | 573 | 4.21875 | 4 | [] | no_license |
# Escape Characters
spam = 'hi this is Alice\'s cat'
print(spam)
print('Hi how are you doing today?\n I\'m feeling fantastic how about yourself? \n Great!')
print('\t this will be tabbed ')
# Raw strings
# place an r before quotation marks of a string to make it a raw string
# good for strings that contain many backslashes such as Windows file paths
print(r'this is Torey\'s raw string')
# F strings
name = 'Torey'
age = '27'
print(f"hi my name is {name} and I'm {age} years old. ")
spam = "hello world"
spam = spam.upper()
print(spam)
print(spam.isupper())
| true |
0884a425dcbfd84058bae82c905dbe388ee5fb74 | Python | andri27-ts/ClassicCartPole | /auxiliar.py | UTF-8 | 836 | 2.765625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import random
def discount_rewards(rewards, discount_factor):
dis_rewards = []
prev_dis_rew = 0
for r in reversed(rewards):
prev_dis_rew = r + prev_dis_rew * discount_factor
dis_rewards.append(prev_dis_rew)
dis_rewards = dis_rewards[::-1]
return dis_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
return [(discounted_rewards - flat_rewards.mean())/flat_rewards.std() for discounted_rewards in all_discounted_rewards]
def get_batch(dataset, batch_size):
dataset = np.array(dataset)
return dataset[random.sample(range(0, len(dataset)), batch_size)] | true |
bc0683cbd07b8da8535ec59cdcb49982acb8594f | Python | brentcas/gcd-django | /apps/stddata/models.py | UTF-8 | 4,280 | 2.734375 | 3 | [] | no_license | from django.db import models
class CountryManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Country(models.Model):
class Meta:
ordering = ('name',)
verbose_name_plural = 'Countries'
objects = CountryManager()
code = models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=255, db_index=True)
def natural_key(self):
"""
Note that this natural key is not technically guaranteed to be unique.
However, it probably is and our use of the natural key concept is
sufficiently limited that this is acceptable.
"""
return (self.code,)
def __unicode__(self):
return self.name
class CurrencyManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Currency(models.Model):
"""Class representing currency for prices."""
class Meta:
ordering = ('name',)
verbose_name_plural = 'Currencies'
objects = CurrencyManager()
code = models.CharField(blank=False, null=False, max_length=3,
unique=True)
name = models.CharField(blank=False, null=False, max_length=100,
db_index=True)
is_decimal = models.BooleanField(default=True)
def natural_key(self):
return (self.code,)
def __unicode__(self):
return unicode(self.code) + u" - " + unicode(self.name)
class Date(models.Model):
"""Class representing dates for gcd with the ability to store partial
information. Blank field means that it's not important. Question marks mean
that this part of a date is not known.
Objects of this class should be deleted together with objects pointing to
them."""
class Meta:
ordering = ('year','month','day',)
verbose_name_plural = 'Dates'
year = models.CharField(blank=True, null=False, max_length=4,
db_index=True)
month = models.CharField(blank=True, null=False, max_length=2,
db_index=True)
day = models.CharField(blank=True, null=False, max_length=2,
db_index=True)
year_uncertain = models.BooleanField(default=False)
month_uncertain = models.BooleanField(default=False)
day_uncertain = models.BooleanField(default=False)
def set(self, year=None, month=None, day=None, year_uncertain=False,
month_uncertain=False, day_uncertain=False, empty=False):
self.year = year
self.month = month
self.day = day
self.year_uncertain=year_uncertain or (not year and not empty) \
or ('?' in year)
self.month_uncertain=month_uncertain or (not month and not empty) \
or ('?' in month)
self.day_uncertain=day_uncertain or (not day and not empty) \
or ('?' in day)
def __unicode__(self):
year = self.year or ''
if self.year_uncertain and not '?' in self.year:
year += '?'
month = self.month or ''
if self.month_uncertain and not '?' in self.month:
month += '?'
day = self.day or ''
if self.day_uncertain and not '?' in self.day:
day += '?'
if year or month or day:
return year+u'-'+month+u'-'+day
else:
return u''
class LanguageManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class Language(models.Model):
class Meta:
ordering = ('name',)
objects = LanguageManager()
code = models.CharField(max_length=10, unique=True)
name = models.CharField(max_length=255, db_index=True)
native_name = models.CharField(max_length=255, blank=True)
def natural_key(self):
"""
Note that this natural key is not technically guaranteed to be unique.
However, it probably is and our use of the natural key concept is
sufficiently limited that this is acceptable.
"""
return (self.code,)
def get_native_name(self):
if self.native_name:
return self.native_name
else:
return self.name
def __unicode__(self):
return self.name
| true |
6deac13dca4bd1f24e6dc0c575a9f66c831209b3 | Python | tahniyat-nisar/while-loop-meraki | /print string 5 times using while loop.py | UTF-8 | 180 | 2.78125 | 3 | [] | no_license | count=1
while count<=5:
print("navgurukul")
count=count+1
print("agar navgurukul ka print yahan par dalo ge\n count toh 5 times karega lekin print baas ek hi baar karega ") | true |
ddf59ea149d446c5d550240b041966c8416de80c | Python | ohcoolitssam/binfdatamining_a2 | /a2.py | UTF-8 | 3,932 | 3.140625 | 3 | [] | no_license | #created and edited by Samuel Phillips
#imports for data, classes and more
from pandas import DataFrame
import numpy as np
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn import tree, metrics
from matplotlib import pyplot as plt
#-- a2p1 starts here --
#iris data is loaded
iris = load_iris()
iData = iris.data
#data is collected from the iris dataset
X = iris.data[:, :2]
y = iris.target
print()
#train-test-split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n = KNeighborsClassifier()
n.fit(X_train, y_train)
p1 = n.predict(X_test)
#0 to 49 -> Setosa
#50 to 99 -> Versicolor
#100 to 149 -> Virginica
#scatterplot size is set
plt.figure(figsize=(8,8))
#first points of each type of flower are plotted so the legend can show correctly
plt.scatter(X[:, :1][0], X[:, 1:][0], facecolors='none', edgecolors='red', label='setosa')
plt.scatter(X[:, :1][50], X[:, 1:][50], facecolors='none', edgecolors='green', label='versicolor')
plt.scatter(X[:, :1][100], X[:, 1:][100], facecolors='none', edgecolors='blue', label='virginica')
#for loop that plots all the points for sepal length and width
for i in range(0, len(X)):
if i < 50:
plt.scatter(X[:, :1][i], X[:, 1:][i], facecolors='none', edgecolors='red')
elif i < 100 and i > 49:
plt.scatter(X[:, :1][i], X[:, 1:][i], facecolors='none', edgecolors='green')
else:
plt.scatter(X[:, :1][i], X[:, 1:][i], facecolors='none', edgecolors='blue')
#lists to hold x and y values for correct and incorrect predictions
corrX, corrY = [], []
incorX, incorY = [], []
#for loop that collects the x and y values of the correct and incorrect predictions
for i in range(0, len(p1)):
if p1[i] == y_test[i]:
corrX.append(X_test[:, :1][i])
corrY.append(X_test[:, 1:][i])
elif p1[i] != y_test[i]:
incorX.append(X_test[:, :1][i])
incorY.append(X_test[:, 1:][i])
#first point of the correct prediciton x and y coordinates is plotted so the legend can show correctly
plt.scatter(corrX[0], corrY[0], color='black', marker=(5, 1), label='correct prediction')
plt.scatter(incorX[0], incorY[0], color='hotpink', marker=(5, 1), label='incorrect prediction')
#collection of all the correct points
for i in range(0, len(corrX)):
plt.scatter(corrX[i], corrY[i], color='black', marker=(5, 1))
plt.scatter(incorX[0], incorY[0], color='hotpink', marker=(5, 1))
#scatterplot legend is made along with x and y axis names
plt.legend()
plt.xlabel('Sepal Length')
plt.ylabel('Sepal Width')
#plot is showed and saved to pdf
plt.show()
plt.savefig('a2p1_scatter.pdf')
#-- a2p1 ends here --
#-- a2p2 starts here --
#iris data is loaded and set
iris = load_iris()
X, y = load_iris(return_X_y=True)
#train-test-split is created from the iris data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n = tree.DecisionTreeClassifier()
n = n.fit(X_train, y_train)
#tree is created
plt.figure(figsize=(8,8))
tree.plot_tree(n, feature_names=iris.feature_names, class_names=iris.target_names, filled=True)
plt.show()
plt.savefig('a2p2_dtree.pdf')
#list for prediction accuracy is made
prediction_accuracy = []
#for loop that makes a train-test-split ten times and stores each prediction accuracy into the pa list
for i in range(0, 10):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
n = tree.DecisionTreeClassifier()
n.fit(X_train, y_train)
yp = n.predict(X_test)
prediction_accuracy.append(metrics.accuracy_score(y_test, yp))
#prediction accuracy list is printed
print(prediction_accuracy)
#mean of all the prediction accuracy values is printed out
print(np.mean(prediction_accuracy))
#-- a2p2 ends here -- | true |
0278d23940dd9933f710db708eb50f6c130346b3 | Python | AmeyaKotibhaskar/sparkWork | /movieRecommender.py | UTF-8 | 3,508 | 2.890625 | 3 | [] | no_license | from pyspark.sql import SparkSession
from pyspark.sql import functions as func
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, LongType
import sys
def calculateSimilarity(spark,moviePairs):
pairScores = moviePairs.withColumn('xx', func.col('rating1') * func.col('rating1')).withColumn('yy', func.col('rating2') * func.col('rating2')).withColumn('xy', func.col('rating1') * func.col('rating2'))
# Compute numerator, denominator and numPairs columns
calculateSimilarity = pairScores.groupBy('movie1', 'movie2').agg(func.sum(func.col('xy')).alias('numerator'),(func.sqrt(func.sum(func.col('xx'))) * func.sqrt(func.sum(func.col('yy')))).alias('denominator'),func.count(func.col('xy')).alias('numPairs'))
# Calculate score and select only needed columns (movie1, movie2, score, numPairs)
result = calculateSimilarity.withColumn('score',func.when(func.col('denominator') != 0, func.col('numerator') / func.col('denominator')).otherwise(0)).select('movie1', 'movie2', 'score', 'numPairs')
return result
def getMovieName(movieName, movieId):
result = movieName.filter(func.col("movie_id") == movieId).select("movie_title").collect()[0]
return result[0]
#Start of the program
spark = SparkSession.builder.master('local[*]').appName('MovieRecommenderSystem').getOrCreate()
#schema for u.item file
movieNameSchema = StructType([StructField('movie_id', IntegerType(),True),
StructField('movie_title', StringType(), True)])
#Schema for u.data file
ratingSchema = StructType([StructField('user_id', IntegerType(), True),
StructField('movie_id', StringType(), True),
StructField('rating', IntegerType(), True),
StructField('timestamp', LongType(), True)])
movieName = spark.read.option('sep','|').option('charset','ISO-8859-1').schema(movieNameSchema).csv('C:/pyspark_prac/ml-100k/u.item')
ratings = spark.read.option('sep', '\t').schema(ratingSchema).csv('C:/pyspark_prac/ml-100k/u.data')
ratings = ratings.select('user_id', 'movie_id', 'rating')
moviePairs = ratings.alias('rating1').join(ratings.alias('rating2'),
(func.col('rating1.user_id') == func.col('rating2.user_id'))
& (func.col('rating1.movie_id') < func.col('rating2.movie_id'))).select(func.col('rating1.movie_id').alias('movie1'),func.col('rating2.movie_id').alias('movie2'),
func.col('rating1.rating').alias('rating1'),
func.col('rating2.rating').alias('rating2'))
moviePairSimilarity = calculateSimilarity(spark,moviePairs)
if (len(sys.argv) > 1):
scoreThreshold = 0.97
coOccurrenceThreshold = 50.0
movieID = int(sys.argv[1])
# Filter for movies with this sim that are "good" as defined by
# our quality thresholds above
filteredResults = moviePairSimilarity.filter(((func.col("movie1") == movieID) | (func.col("movie2") == movieID)) &(func.col("score") > scoreThreshold) & (func.col("numPairs") > coOccurrenceThreshold))
# Sort by quality score.
results = filteredResults.sort(func.col("score").desc()).take(10)
print ("Top 10 similar movies for " + getMovieName(movieName, movieID))
for result in results:
# Display the similarity result that isn't the movie we're looking at
similarMovieID = result.movie1
if (similarMovieID == movieID):
similarMovieID = result.movie2
print(getMovieName(movieName, similarMovieID) + "\tscore: " \
+ str(result.score) + "\tstrength: " + str(result.numPairs))
spark.stop() | true |
39ac4f77aa54944369d4baf1d4df9fc8a0485edf | Python | Tatsuya-yng/Python_BePROUD_study | /1-10-1.py | UTF-8 | 117 | 3.484375 | 3 | [] | no_license | # -*- coding: UTF-8
#数値と文字列を変換する
teika = input('定価を入力せよ')
print( teika * 1.08 )
| true |
5a6416011c89c9adaa15462566b233d267b7c34e | Python | li-poltorak/code_guild_labs | /adventure/world.py | UTF-8 | 944 | 3.640625 | 4 | [] | no_license | from creature import Creature
from item import Weapon, Potion
class Room:
def __init__(self, door, info_text):
self.exit = door
self.items = []
self.creatures = []
self.info_text = info_text
def inspect_items(self):
return ', '.join(x.name for x in self.items)
def inspect_creatures(self):
return ', '.join(['a ' + x.species for x in self.creatures if not x.is_player])
def describe(self):
return 'You are standing in {}. {}.'.format(self.info_text, self.exit)
if __name__ == '__main__':
start = Room('a door', 'a dark room')
player = Creature('Li', 'human')
monster = Creature('Klarg', 'hugbear')
p = Potion()
w = Weapon()
start.creatures.append(player)
start.creatures.append(monster)
start.items.append(p)
start.items.append(w)
print(start.describe())
print(start.inspect_creatures())
print(start.inspect_items())
| true |
8037b8609c93483e95afa34a21ac66d76a62f6ba | Python | sherifsameh/Basic-Data-Structures | /Trees/heap.py | UTF-8 | 6,499 | 3.578125 | 4 | [] | no_license | from abc import ABC, abstractmethod
from binary_tree import TreeNode, BinaryTree
class Heap(ABC):
@abstractmethod
def __init__(self):
self._heap = []
pass
def __len__(self):
return len(self._heap)
def __iter__(self):
for node in self._heap:
yield node
def __repr__(self):
root = self.heapify(self._heap)
btree = BinaryTree(root)
return str( btree )
############################## HEAPIFY ##############################
def heapify(self, lst):
root = TreeNode(lst[0])
q = [root]
idx = 1
length = len(lst)
while (idx < length):
parent_node = q.pop(0)
parent_node.left = TreeNode(lst[idx])
q.append(parent_node.left)
idx += 1
if idx < length:
parent_node.right = TreeNode(lst[idx])
q.append(parent_node.right)
idx += 1
return BinaryTree(root)
############################## INSERTION ##############################
def insert(self, value, min_heap):
# add the new value
self._heap.append(value)
# swap between parents when needed
idx = len(self._heap)-1
while(idx != 0):
parent_idx = (idx-1)//2
current = self._heap[idx]
parent = self._heap[parent_idx]
if (min_heap and parent>current) or (not min_heap and parent<current):
self._heap[parent_idx], self._heap[idx] = \
self._heap[idx], self._heap[parent_idx]
idx = parent_idx
else:
break
############################## REMOVAL ##############################
def __rebalance(self, parent_idx, min_heap):
last_idx = len(self._heap)-1
while(parent_idx < last_idx):
parent = self._heap[parent_idx]
left_child_idx, right_child_idx = (parent_idx*2)+1, (parent_idx*2)+2
# get which child is smaller
if right_child_idx >= last_idx:
if left_child_idx >= last_idx:
break
else:
child_idx = left_child_idx
else:
if min_heap:
if self._heap[left_child_idx] < self._heap[right_child_idx]:
child_idx = left_child_idx
else:
child_idx = right_child_idx
else:
if self._heap[left_child_idx] > self._heap[right_child_idx]:
child_idx = left_child_idx
else:
child_idx = right_child_idx
child = self._heap[child_idx]
if (min_heap and parent>child) or (not min_heap and parent<child):
self._heap[parent_idx], self._heap[child_idx] = \
self._heap[child_idx], self._heap[parent_idx]
parent_idx = child_idx
else:
break
def remove(self, del_val, min_heap):
"""Removes first utterence of given value"""
del_idx = self._heap.index(del_val)
last_idx = len(self._heap)-1
# swap between removed item and last item
self._heap[last_idx], self._heap[del_idx] = \
self._heap[del_idx], self._heap[last_idx]
if min_heap:
# set last item to -inf
self._heap[last_idx] = float('-inf')
else:
# set last item to inf
self._heap[last_idx] = float('inf')
# start swapping when needed
self.__rebalance(del_idx, min_heap)
# remove the (+/-)inf
self._heap.pop()
class MinHeap(Heap):
def __init__(self, value):
if hasattr(value, '__iter__'):
self._heap = sorted(value)
elif type(value) in {int, float}:
self._heap = [value]
else:
raise ValueError("Unsupported datatype!!")
def get_min(self):
return self._heap[0]
def get_max(self):
# TODO: optimize as you don't have to iterate over the whole list
return max(self._heap)
def insert(self, value):
super().insert(value, min_heap=True)
def remove(self, del_value):
super().remove(del_value, min_heap=True)
class MaxHeap(Heap):
def __init__(self, value):
if hasattr(value, '__iter__'):
self._heap = sorted(value, reverse=True)
elif type(value) in {int, float}:
self._heap = [value]
else:
raise ValueError("Unsupported datatype!!")
def get_min(self):
# TODO: optimize as you don't have to iterate over the whole list
return min(self._heap)
def get_max(self):
return self._heap[0]
def insert(self, value):
super().insert(value, min_heap=False)
def remove(self, del_value):
super().remove(del_value, min_heap=False)
if __name__ == "__main__":
# h = Heap()
# test iteration
heap = MinHeap([6, 2, 7, 1])
print(heap, '\n')
for node in heap:
print(node)
print('='*50)
#####################################################
# test MinHeap
heap = MinHeap([1, 3, 5, 4, 6, 13, 10, 9, 8, 15, 17, 90, 100, 102, 190])
heap.remove(102)
heap.remove(4)
heap.remove(1)
print(heap)
print("Min value:", heap.get_min())
print("Max value:", heap.get_max())
print("Heap length:", len(heap))
print('='*50)
#####################################################
# test MaxHeap
heap = MaxHeap([1, 3, 5, 4, 6, 13, 10, 9, 8, 15, 17, 90, 100, 102, 190])
heap.remove(102)
heap.remove(100)
heap.remove(190)
print(heap)
print("Min value:", heap.get_min())
print("Max value:", heap.get_max())
print("Heap length:", len(heap))
print('='*50)
#####################################################
# test insert
heap = MinHeap(35)
heap.insert(33)
heap.insert(42)
heap.insert(10)
heap.insert(14)
heap.insert(19)
heap.insert(27)
heap.insert(44)
heap.insert(26)
heap.insert(31)
print(heap)
print('='*50)
heap = MaxHeap(35)
heap.insert(33)
heap.insert(42)
heap.insert(10)
heap.insert(14)
heap.insert(19)
heap.insert(27)
heap.insert(44)
heap.insert(26)
heap.insert(31)
print(heap)
| true |
95ebf1b03cecf67dcd5e4d6d965fd6071615574a | Python | msieder/RecSys2020 | /src/RecSys_RFbaseline.py | UTF-8 | 6,921 | 2.53125 | 3 | [] | no_license | from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
# Create Spark Context
sc = SparkContext.getOrCreate()
# Create Spark Session
spark = SparkSession(sc)
# Save path in variable
path = 'hdfs:///user/pknees/RSC20/training.tsv'
val = 'hdfs:///user/pknees/RSC20/test.tsv'
#Reading in the file
df = spark.read \
.load(path,
format="csv",delimiter="\x01")
df_val = spark.read \
.load(val,
format="csv",delimiter="\x01")
# Changing Column names
df = df.toDF("text_tokens", "hashtags", "tweet_id", "present_media", "present_links", "present_domains",\
"tweet_type", "language", "tweet_timestamp", "engaged_with_user_id", "engaged_with_user_follower_count",\
"engaged_with_user_following_count", "engaged_with_user_is_verified", "engaged_with_user_account_creation",\
"engaging_user_id", "engaging_user_follower_count", "engaging_user_following_count", "engaging_user_is_verified",\
"engaging_user_account_creation", "engaged_follows_engaging", "reply_timestamp", "retweet_timestamp", "retweet_with_comment_timestamp", "like_timestamp")
df_val = df_val.toDF("text_tokens", "hashtags", "tweet_id", "present_media", "present_links", "present_domains",\
"tweet_type", "language", "tweet_timestamp", "engaged_with_user_id", "engaged_with_user_follower_count",\
"engaged_with_user_following_count", "engaged_with_user_is_verified", "engaged_with_user_account_creation",\
"engaging_user_id", "engaging_user_follower_count", "engaging_user_following_count", "engaging_user_is_verified",\
"engaging_user_account_creation", "engaged_follows_engaging")
id_features = ["tweet_id","engaging_user_id","engaged_with_user_id"]
numeric_features = ["tweet_timestamp",
"engaged_with_user_follower_count", "engaged_with_user_following_count", "engaged_with_user_account_creation",
"engaging_user_follower_count", "engaging_user_following_count", "engaging_user_account_creation"
]
categorical_features = ["tweet_type", "language",
"engaged_with_user_is_verified", "engaging_user_is_verified", "engaged_follows_engaging"
]
text_features = ["text_tokens", "hashtags", "present_media", "present_links", "present_domains"]
label_columns = ["reply_timestamp", "retweet_timestamp", "retweet_with_comment_timestamp", "like_timestamp"]
from pyspark.sql import functions as f
#for feature in text_features:
# text_feature_split = f.split(df[feature], '\t')
# df = df.withColumn(feature, f.when(f.col(feature).isNotNull(), text_feature_split).otherwise(f.array().cast("array<string>")))
from pyspark.sql.types import IntegerType
for feature in numeric_features:
df = df.withColumn(feature,f.col(feature).cast(IntegerType()))
for feature in id_features:
output_col = feature + "_hashed"
df = df.withColumn(output_col, (f.hash(f.col(feature))))
df = df.withColumn(output_col, f.when(f.col(output_col) < 0, f.col(output_col)*-1%50).otherwise(f.col(output_col)%50))
for col in label_columns:
df = df.withColumn(col, f.when(f.col(col).isNotNull(), 1).otherwise(0))
##### Same preprocessing for validation (without label_columns transformation)
#for feature in text_features:
# text_feature_split = f.split(df_val[feature], '\t')
# df_val = df_val.withColumn(feature, f.when(f.col(feature).isNotNull(), text_feature_split).otherwise(f.array().cast("array<string>")))
for feature in numeric_features:
df_val = df_val.withColumn(feature,f.col(feature).cast(IntegerType()))
for feature in id_features:
output_col = feature + "_hashed"
df_val = df_val.withColumn(output_col, (f.hash(f.col(feature))))
df_val = df_val.withColumn(output_col, f.when(f.col(output_col) < 0, f.col(output_col)*-1%50).otherwise(f.col(output_col)%50))
# Set the numbers of quantiles/buckets for the baseline approach
nq = 50
from pyspark.ml.feature import QuantileDiscretizer, StringIndexer, FeatureHasher, HashingTF, OneHotEncoderEstimator, VectorAssembler
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
AllQuantileDiscretizers = [QuantileDiscretizer(numBuckets=nq,
inputCol=col,
outputCol=(col + "_bucketized"),
handleInvalid="keep") for col in numeric_features]
AllStringIndexers = [StringIndexer(inputCol=col,
outputCol=(col + "_indexed")) for col in categorical_features]
### FeatureHasher has been adapted to a hardcoded feature hashing + bucketing in the preprocessing step
#AllFeatureHashers = [FeatureHasher(numFeatures=nq,
# inputCols=[col],
# outputCol=(col + "_hashed")) for col in id_features]
#AllHashingTF = [HashingTF(inputCol=col,
# outputCol=(col + "_vectorized")) for col in text_features]
to_onehot_features = [col + "_bucketized" for col in numeric_features]
to_onehot_features.extend(col + "_indexed" for col in categorical_features)
to_onehot_features.extend(col + "_hashed" for col in id_features)
onehot_features = [col + "_oneHot" for col in numeric_features]
onehot_features.extend(col + "_oneHot" for col in categorical_features)
onehot_features.extend(col + "_oneHot" for col in id_features)
encoder = OneHotEncoderEstimator(inputCols=to_onehot_features,
outputCols=onehot_features)
assembler_features = VectorAssembler(
inputCols=onehot_features,
outputCol="features_oneHot")
#assembler_labels = VectorAssembler(
# inputCols=label_columns,
# outputCol="label")
AllRFModels = [RandomForestClassifier(labelCol=col, featuresCol="features_oneHot",predictionCol=(col+"_prediction"),probabilityCol=(col+"_probability"),rawPredictionCol=(col+"_raw_prediction"), numTrees=10) for col in label_columns]
from pyspark.ml import Pipeline
AllStages = list()
AllStages.extend(AllQuantileDiscretizers)
AllStages.extend(AllStringIndexers)
#AllStages.extend(AllFeatureHashers) #depreciated
#AllStages.extend(AllHashingTF)
AllStages.append(encoder)
AllStages.append(assembler_features)
AllStages.extend(AllRFModels)
pipeline = Pipeline(stages=AllStages)
pipeline_model = pipeline.fit(df)
pipeline_model.write().overwrite().save("pipeline_model_twitter_group13")
new_train = pipeline_model.transform(df_val).select(["tweet_id", "engaging_user_id", "reply_timestamp_prediction", "retweet_timestamp_prediction", "retweet_with_comment_timestamp_prediction", "like_timestamp_prediction"])
new_train.withColumnRenamed("engaging_user_id","user_id")
new_train.write.csv('prediction_like_timestamp_twitter_Group13.csv') | true |
094be6834b2a0adc6bdefac14a17fe4f841c3fef | Python | Soare-Robert-Daniel/Project-Sarus | /Digital Menu/Servant/app.py | UTF-8 | 3,938 | 2.859375 | 3 | [
"MIT"
] | permissive | from flask import *
import time
import record
import products
import json
app = Flask(__name__)
app.config['DEBUG'] = True
HOST = "127.0.0.1"
PORT = 5000
records = [] # keep tracks of all the records
rec_stack = [] # used to get the last record
starting_server_time = (time.asctime(time.localtime(time.time()))).split(" ")
# the ID will contains the date when the server has started and a number of the last order
# since the start of the server separated by a "___"
last_ID = "_".join(starting_server_time) + "___0"
def generate_ID():
# the new ID will contains the date when the server has started and the number of the current order
# since the start of the server separated by a "#"
global last_ID
print(last_ID)
creation_date, last_order_nr = last_ID.split("___")
new_order_nr = int(last_order_nr) + 1
last_ID = creation_date + "___" + str(new_order_nr)
return last_ID
@app.route('/')
def home():
return render_template("table_generator.html", ip = "IP: %s" % HOST, port = "PORT: %s" % PORT)
@app.route('/table/<string:table_name>/')
def get_table(table_name):
return render_template("test.html", products=products.products_list)
@app.route('/table/<string:table_name>/sendOrder', methods=['GET', 'POST'])
def take_order(table_name):
"""
Register the new order
"""
if request.method == "POST":
# Get the data from the request
raw_data = request.get_json()
print(raw_data)
# Extract the items and make a record for this request
new_record = record.Record(generate_ID(), table_name, raw_data["bill_info"], "Pending", raw_data["total_value"])
print(new_record)
# Save the record
records.append(new_record)
rec_stack.append(new_record)
# Everything is fine
return "ok " + new_record.ID
else:
return "The method is not available"
@app.route('/table/<string:table_name>/<string:ID>/status', methods=['GET', 'POST'])
def get_table_status(table_name, ID):
"""
Get the order's status from record identified by the ID
Table name is optional.
"""
if request.method == "POST":
for rec in records:
if rec.ID == ID:
return rec.status
return "Record do not exist!"
else:
return "The method is not available"
@app.route('/table/<string:ID>/getOrder')
def get_order(ID):
"""
Get the order's items from record identified by the ID
"""
for rec in records:
if rec.ID == ID:
return jsonify(products=rec.request_data)
return "None"
@app.route('/records')
def get_records():
"""
Get all the records
"""
return jsonify(records_list=[rec.get_record_for_history_json() for rec in records])
@app.route('/lastrecord')
def get_last_record():
"""
Pull a record from the stack
"""
if len(rec_stack):
rec = rec_stack[0].get_record_info_json()
rec_stack.pop(0)
return rec
return "None"
@app.route('/status/accepted/<string:ID>')
def set_status_accepted_record(ID):
"""
Set status "Accepted" to a record identified by ID
"""
return set_status(ID, "Accepted")
@app.route('/status/canceled/<ID>')
def set_status_canceled_record(ID):
"""
Set status "Canceled" to a record identified by ID
"""
return set_status(ID, "Canceled")
def set_status(ID, status):
"""
Set status to a record identified by ID
"""
log_rec = "Request was not validated!"
for rec in records:
if rec.ID == ID:
if rec.status == "Pending":
rec.status = status
log_rec = "Request validated!"
rec_stack.append(rec) # Push the record again in the stack
print("Status -> ID: %s \n" % ID + log_rec)
return log_rec
if __name__ == '__main__':
app.run(host=HOST, port=PORT)
| true |
a41e563e7e6e3535c049cc74239a7c255c5fd130 | Python | kthcorp/openapi.pudding.to_samples | /source/python/3-photos/16-get-emotions.py | UTF-8 | 920 | 2.734375 | 3 | [] | no_license | # -*- coding: utf8 -*-
import simplejson, urllib
import urllib2
"""
16 get emotion infomation
format : https://api.pudding.to/v1/emotions/ko?access_key=TEST_ACCESS_KEY&token=TEST_TOKEN
sample : https://api.pudding.to/v1/emotions/ko?access_key=TEST_ACCESS_KEY&token=TEST_TOKEN
"""
ACCESS_KEY = "96474e57-cb16-11e1-91b7-12313f062e84"
API_BASE = "http://openapi.pudding.to/api/v1/emotions/"
def get_emotions(lang_id, **args):
"""
Get emotions
"""
args.update({
'access_key': ACCESS_KEY
})
url = API_BASE + lang_id + "?" + urllib.urlencode(args)
if('format' in args and args['format'] == 'xml'):
result = urllib2.urlopen(url).read()
else:
result = simplejson.load(urllib.urlopen(url))
return result
if __name__ == "__main__" :
langid = "en" # ko, en, ja
json = get_emotions(langid)
print json
xml = get_emotions(langid, format='xml')
print xml
| true |
fc90fc6a45cd8bad12b0a0bed620a79c3c34b337 | Python | jason-osajima/ml-nanodegree | /capstone_project_exploration/lr_4.py | UTF-8 | 4,029 | 2.84375 | 3 | [] | no_license | # Modules to import for project
import tensorflow as tf
import h5py
import numpy as np
import time
#Load the data
h5f = h5py.File('sat-6.h5','r')
X_train = h5f['X_train'][:]
y_train = h5f['y-train'][:]
X_test = h5f['X_test'][:]
y_test = h5f['y_test'][:]
X_valid = h5f['X_valid'][:]
y_valid = h5f['y_valid'][:]
h5f.close()
#number of land_cover labels
land_cover = ['buildings', 'barren_land', 'trees', 'grassland', 'roads', 'water_bodies']
num_labels = len(land_cover)
image_size = 28
layers = 4
num_steps = 20001
batch_size = 128
# Function that we can use to measure accuracy
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
#Initiate an array for every 50 steps
steps = np.arange(0,num_steps,50)
#Initiate a loss array for every 50 steps.
ada_loss1 = np.zeros((num_steps-1)/50+1)
graph = tf.Graph()
with graph.as_default():
# Input the data into constants that are attached to the graph.
tf_X_train = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size * layers))
tf_y_train = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_X_valid = tf.constant(X_valid)
tf_X_test = tf.constant(X_test)
# These are the parameters that we are going to be training. We will initialize the
# weight matrix using random valued following a truncated normal distribution. The
# biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size * layers, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy. We take the average of the cross-entropy
# across all training examples which is our loss.
logits = tf.matmul(tf_X_train, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_y_train))
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_X_valid, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_X_test, weights) + biases)
#Initiate adagrad optimizer
ada_optimizer = tf.train.AdagradOptimizer(0.2).minimize(loss)
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print ("Initialized")
start = time.time()
for step in xrange(num_steps):
# Pick an offset within the training data, which has been randomized.
offset = (step * batch_size) % (X_train.shape[0] - batch_size)
# Generate a minibatch.
batch_data = X_train[offset:(offset + batch_size), :]
batch_labels = y_train[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_X_train : batch_data, tf_y_train : batch_labels}
_, l, predictions = session.run(
[ada_optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print ("Minibatch loss at step", step, ":", l)
print ("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print ("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), y_valid))
end = time.time()
print ("Training time (secs): {:.5f}".format(end - start))
start = time.time()
print ("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), y_test))
end = time.time()
print ("Prediction time (secs): {:.5f}".format(end - start))
| true |
e64b910564112af4e0de6604c280b399da39d7a8 | Python | sebpy/labyrinth | /display.py | UTF-8 | 3,379 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf8 -*-
""" Display class """
import pygame as pg
class Display:
""" show interface graphic with pygame """
SPRITE_SIZE = 30
SPRITE_N = 15
BOARD_SIZE = SPRITE_SIZE * SPRITE_N
def __init__(self):
pg.init()
pg.display.set_caption("Help Mac Gyver to escape!")
pg.key.set_repeat(400, 30)
self.window = pg.display.set_mode((self.BOARD_SIZE, self.BOARD_SIZE))
self.image = {"ground": pg.image.load("ressources/sol.png").convert(),
"wall": pg.image.load("ressources/wall.png").convert(),
"guardian": pg.image.load("ressources/Gardien.png").convert(),
"item_one": pg.image.load("ressources/ether.png").convert(),
"item_two": pg.image.load("ressources/seringue.png").convert(),
"item_three": pg.image.load("ressources/aiguille.png").convert(),
"mac_gyver": pg.image.load("ressources/MacGyver.png").convert()}
self.text_load = pg.font.SysFont("cosmicsansms", 32)
self.msg_load = self.text_load.render("To play, press ENTER or Q for quit",
True, (255, 255, 255))
self.msg_free = self.text_load.render("Great!! You are free!", True, (255, 255, 255))
self.msg_dead = self.text_load.render("You are dead!", True, (255, 255, 255))
def display_map(self, mapping):
""" Show map """
for y_pos, line in enumerate(mapping):
for x_pos, sprite in enumerate(line):
y_sprite = y_pos * 30
x_sprite = x_pos * 30
if sprite == "#":
self.window.blit(self.image["wall"], (x_sprite, y_sprite))
elif sprite == "G":
self.window.blit(self.image["guardian"], (x_sprite, y_sprite))
elif sprite == " ":
self.window.blit(self.image["ground"], (x_sprite, y_sprite))
elif sprite == "1":
self.window.blit(self.image["item_one"], (x_sprite, y_sprite))
elif sprite == "2":
self.window.blit(self.image["item_two"], (x_sprite, y_sprite))
elif sprite == "3":
self.window.blit(self.image["item_three"], (x_sprite, y_sprite))
def counter(self, items):
""" Show items counter """
counter_items = self.text_load.render("Items: " + str(items), True, (255, 255, 255))
i = 12
while i < 15:
self.window.blit(self.image["wall"], ((i * 30), (0 * 30)))
i += 1
self.window.blit(counter_items, (360, 4))
def mac_gyver(self, y_pos, x_pos):
""" Create Mac Gyver sprite """
self.window.blit(self.image["mac_gyver"], ((x_pos * 30), (y_pos * 30)))
def message(self, msg):
""" Print messages on screen """
if msg == self.msg_free:
self.window.blit(msg, (140, 220))
elif msg == self.msg_dead:
self.window.blit(msg, (160, 220))
@classmethod
def display_flip(cls):
""" display pygame """
pg.display.flip()
def clean_box(self, y_pos, x_pos):
""" change old position of MG by ground sprite """
self.window.blit(self.image["ground"], ((x_pos * 30), (y_pos * 30)))
| true |
281b9e03024ea2d4fe608019a0d03c79355be1a2 | Python | adibalcan/crawlingbot | /utils/listutils.py | UTF-8 | 181 | 3.34375 | 3 | [
"MIT"
] | permissive | # Make unique list and preserve items order
def unique(list):
seen = set()
uniqueList = [item for item in list if not (item in seen or seen.add(item))]
return uniqueList | true |
95bd323269bd2f3f5b5ed9dea6268d9f04d5fed5 | Python | gvauter/cymon-python | /cymon/cymon.py | UTF-8 | 3,710 | 2.9375 | 3 | [
"MIT"
] | permissive | import json
import requests
from urllib import quote_plus
class Cymon(object):
def __init__(self, auth_token=None, endpoint='https://cymon.io/api/nexus/v1'):
self.endpoint = endpoint
self.session = requests.Session()
self.session.headers = {
'content-type': 'application/json',
'accept': 'application/json',
}
if auth_token:
self.session.headers.update({'Authorization': 'Token {0}'.format(auth_token)})
def get(self, method, params=None):
r = self.session.get(self.endpoint + method, params=params)
r.raise_for_status()
return r
def post(self, method, params, headers=None):
r = self.session.post(self.endpoint + method, data=json.dumps(params), headers=headers)
r.raise_for_status()
return r
def get_paginator(self, method):
"""
Returns a Paginator class to use for handling API pagination.
"""
method = method.lower()
if self._can_paginate(method):
return Paginator(self, method)
else:
raise NoPaginatorError('Cannot paginate {} method'.format(method))
def _can_paginate(self, method):
"""
Basic check to raise exception when method cannot paginate.
"""
if method in ['ip_blacklist', 'domain_blacklist']:
return True
else:
return False
def ip_lookup(self, ip_addr):
r = self.get('/ip/' + ip_addr)
return json.loads(r.text)
def ip_events(self, ip_addr):
r = self.get('/ip/' + ip_addr + '/events')
return json.loads(r.text)
def ip_domains(self, ip_addr):
r = self.get('/ip/' + ip_addr + '/domains')
return json.loads(r.text)
def ip_urls(self, ip_addr):
r = self.get('/ip/' + ip_addr + '/urls')
return json.loads(r.text)
def domain_lookup(self, name):
r = self.get('/domain/' + name)
return json.loads(r.text)
def url_lookup(self, location):
r = self.get('/url/' + quote_plus(location))
return json.loads(r.text)
def ip_blacklist(self, tag, days=1, limit=10, offset=0):
''' supported tags: malware, botnet, spam, phishing, dnsbl, blacklist '''
r = self.get('/blacklist/ip/' + tag + '/?days=%d&limit=%d&offset=%d' %(days,limit,offset))
return json.loads(r.text)
def domain_blacklist(self, tag, days=1, limit=10, offset=0):
''' supported tags: malware, botnet, spam, phishing, dnsbl, blacklist '''
r = self.get('/blacklist/domain/' + tag + '/?days=%d&limit=%d&offset=%d' %(days,limit,offset))
return json.loads(r.text)
class Paginator(object):
"""
This class uses generators to provide an iterable object for performing
recusive API calls when a result has been paginated.
"""
def __init__(self, cymon, method):
self.cymon = cymon
self.method = method
def paginate(self, *args, **kwargs):
"""
Use Cymon client object to make recursive API calls when
result is paginated.
"""
has_next = False
method_to_call = getattr(self.cymon, self.method)
result = method_to_call(limit=100, *args, **kwargs)
if result['next'] is not None:
print result['next']
has_next = True
yield result['results'] # intial API call to start recursion
while has_next:
resp = requests.get(result['next'])
result = json.loads(resp.text)
if result['next'] is None:
has_next = False
yield result['results']
class NoPaginatorError(Exception):
pass
| true |
c547034b05b025ce3754c748561ce214ce02fce0 | Python | ehababdelghany/Easy-Recharge | /Server Side (API +Image Processing Module)/processing.py | UTF-8 | 3,461 | 2.734375 | 3 | [] | no_license | import cv2
import numpy as np
import pytesseract
import matplotlib.pyplot as plt
from PIL import Image
import string
import re
# Recognize text with tesseract for python
# this line for windows only
# we use linux as a server ,so will comment it
# pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Path of working folder on Disk
src_path = r"D:/Faculty" # source of images
def get_string(img_path):
# Read image with opencv
# print("soka1")
img = cv2.imread(img_path)
# print("soka")
# Convert to gray
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# print("sssss")
# Apply dilation and erosion to remove some noise
#kernel = np.ones((2, 2), np.uint8)
kernel =cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
#img = cv2.dilate(img, kernel, iterations=1)
img_erode = cv2.erode(img, kernel, iterations=1)
# Write image after removed noise
#plt.imshow(img1)
#cv2.imshow('removed_noise.png',img_erode)
#cv2.waitKey(0)
# Apply threshold to get image with only black and white
(thresh,img_threshold) = cv2.threshold(img, 117, 255, cv2.THRESH_BINARY)
#img_threshold = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 33, 2)
#cv2.imshow('image_threshold.png',img_threshold)
#cv2.waitKey(0)
# Write the image after apply opencv to do some ...
#cv2.imwrite(src_path + "thres.png", img)
# Recognize text with tesseract for python
result = pytesseract.image_to_string(img_path,lang='eng+ara')
return result
# print ("------ Done -------")
# Function to extract all the numbers from the given string
def Number(str):
array = re.findall(
r'[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]+|[0-9][0-9][0-9][0-9][-|" "][0-9][0-9][0-9][0-9][-|" "][0-9][0-9][0-9][0-9][-|" "][0-9][0-9][0-9]+',
str)
if (len(array) > 1):
if (array[0] == array[1]):
array = array[0]
elif (array[0] != array[1] or array[0] != array[1] != array[2]):
return array[0]
else:
array = array[0]
return ''.join(array)
def getCardNO(imageName):
# imagePath = "../" + imageName
str = get_string(imageName)
array = Number(str)
array = array.replace("-", "").replace(" ", "")
x = int(array)
# print(x)
return (x)
def NameC(imageName):
# imagePath = "../" + imageName
imagePath = get_string(imageName)
ss = "i can't detect the sim card :("
if (re.findall(r'etisalat|اتصالات|Etisalat|"*إتصالات|"*556', imagePath)):
ss = "etisalat"
elif (re.findall(r'فودافون|VODAFONE|vodafone|كارت لفرحة|فونلون|"*585*"|Vodafone', imagePath)):
ss = "vodafone"
elif (re.findall(r'اورانج|orange|Orange|اوراتج|اورائج', imagePath)):
ss = "orange"
elif (re.findall(r'we|WE', imagePath)):
ss = "we"
return ss
# getCardNO("media/26.jpg")
# ss = NameC("media/26.jpg")
#ss = ss.translate({ord(c): None for c in string.whitespace})
#ss= ss.replace("-", "")
# print("my company is " + (ss))
# found = True # Not necessary
#print ('--- Start recognize text from image ---')
# print (get_string("D:/Faculty/26.jpg"))
#print (Name(get_string("C:/Program Files/Tesseract-OCR/Temp/8.png")))
#print ("------ Done -------") | true |
4fd45fa9ec406e0a3451d3dde945bd00c1994c11 | Python | blasio99/pac-man-search | /search.py | UTF-8 | 8,439 | 3.484375 | 3 | [] | no_license | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class CustomNode:
def __init__(self,name,cost):
self.name=name
self.cost=cost
def getName(self):
return self.name
def getCost(self):
return self.cost
class Node:
def __init__(self,state,parent,action,path_cost):
self.state=state
self.parent=parent
self.action=action
self.path_cost=path_cost
def getState(self):
return self.state
def getParent(self):
return self.parent
def getAction(self):
return self.action
def getPathCost(self):
return self.path_cost
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first.
Your search algorithm needs to return a list of actions that reaches the
goal. Make sure to implement a graph search algorithm.
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
"""
# print("Start:", problem.getStartState())
# print("Is the start a goal?", problem.isGoalState(problem.getStartState()))
# print("Start's successors:", problem.getSuccessors(problem.getStartState()))
"*** YOUR CODE HERE ***"
#for successor in problem.getSuccessors(problem.getStartState()):
#(nextState,action,cost)=successor
#print("Urmatoarea stare posibila este: ", nextState," ,in directia:",action," cu costul:",cost)
# from game import Directions
# w = Directions.WEST
# return [w, w]
# successor=problem.getSuccessors(problem.getStartState())[0]
# (nextState,action,cost)=successor
# successor1=problem.getSuccessors(nextState)[0]
# (nextState1, action1, cost1) = successor1
# print("Cele doua actiuni sunt:",action," si", action1)
#from util import Stack
#node1=CustomNode("first",2)
#node2=CustomNode("second",4)
#my_stack=Stack()
#my_stack.push(node1)
#my_stack.push(node2)
#pop_elem=my_stack.pop()
#print(pop_elem.getName())
from util import Stack
myStack = Stack()
curent = problem.getStartState()
visited = []
node = Node(curent, None, None, None)
myStack.push((node, []))
while not myStack.isEmpty():
curentNode, solution = myStack.pop()
curent = curentNode.getState()
visited.append(curent)
if problem.isGoalState(curent):
return solution
successors = problem.getSuccessors(curent)
for i in successors:
if not i[0] in visited:
node = Node(i[0], curent, i[1], i[2])
myStack.push((node, solution + [i[1]]))
return []
def breadthFirstSearch(problem):
curent = problem.getStartState()
if problem.isGoalState(curent):
return []
myQueue = util.Queue()
visited = []
# (node,actions)
nod1=Node(curent,None,None,None)
myQueue.push((nod1, []))
while not myQueue.isEmpty():
qNode, solution = myQueue.pop()
currentNode=qNode.getState()
if currentNode not in visited:
visited.append(currentNode)
if problem.isGoalState(currentNode):
return solution
for nextNode, action, cost in problem.getSuccessors(currentNode):
node=Node(nextNode,currentNode,action,cost)
newAction = solution + [action]
myQueue.push((node, newAction))
def uniformCostSearch(problem):
start = problem.getStartState()
q = util.PriorityQueue()
visited = []
# q.push((start, [], 0), 0)
node = Node(start,None,[],0)
q.push(node,0)
while not q.isEmpty():
# currentNode, actions, cost = q.pop()
currentNode = q.pop()
if currentNode.getState() not in visited:
visited.append(currentNode.getState())
if problem.isGoalState(currentNode.getState()):
return currentNode.getAction()
for nextNode, action, node_cost in problem.getSuccessors(currentNode.getState()):
newAction = currentNode.getAction() + [action]
newCost = currentNode.getPathCost() + node_cost
newNode = Node(nextNode,currentNode,newAction,newCost)
q.push(newNode, newCost)
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
start = problem.getStartState()
q = util.PriorityQueue()
visited = []
# q.push((start, [], 0), 0)
node = Node(start,None,[],0)
q.push(node,0)
while not q.isEmpty():
# currentNode, actions, cost = q.pop()
currentNode = q.pop()
if currentNode.getState() not in visited:
visited.append(currentNode.getState())
if problem.isGoalState(currentNode.getState()):
return currentNode.getAction()
for nextNode, action, node_cost in problem.getSuccessors(currentNode.getState()):
newAction = currentNode.getAction() + [action]
newCost = currentNode.getPathCost() + node_cost
heuristicCost = newCost + heuristic(nextNode, problem)
newNode = Node(nextNode,currentNode,newAction,newCost)
q.push(newNode, heuristicCost)
util.raiseNotDefined()
def randomSearch(problem):
import random
solution=[]
current=problem.getSuccessors(problem.getStartState())
while not problem.isGoalState(current):
successors = problem.getSuccessors(current)
random_index=random.randint(0,len(successors)-1)
next_state=successors[random_index]
action=next_state[1]
solution.append(action)
current=next_state[0]
print("Solution: ", solution)
return solution
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| true |
c41e3a43a5ccf62143c4bb4d34488d3e446a308f | Python | niphadkarneha/SummerCamp | /Python Scripts/Strings.py | UTF-8 | 1,591 | 4.75 | 5 | [] | no_license | # Strings
'spam eggs' # single quotes
'doesn\'t' # use \' to escape the single quote
"doesn't" # ...or use double quotes instead
'"Yes," he said.'
"\"Yes,\" he said."
'"Isn\'t," she said.'
s = 'First line.\nSecond line.' # \n means newline
s # without print(), \n is included in the output
print(s) # with print(), \n produces a new line
# If you don’t want characters prefaced by \ to be interpreted as special characters,
# you can use raw strings by adding an r before the first quote
print('C:\some\name') # here \n means newline!
print(r'C:\some\name') # note the r before the quote
# Triple-quotes:
""" to span multiple lines
stuff
more stuff
even more stuff
"""
# Repeat using *
# Concatenation using +
3 * 'un' + 'ium'
# Indexing strings
word = 'Python'
word[0] # character in position 0
word[5] # character in position 5
# Negative index starts counting from the right
word[-1] # last character
word[-2] # second-last character
word[-6]
# Slicing gives a substring
word[0:2] # characters from position 0 (included) to 2 (excluded) 'Py'
word[2:5] # characters from position 2 (included) to 5 (excluded) 'tho'
#An omitted first index defaults to zero
#An omitted second index defaults to the size of the string
word[:2] # character from the beginning to position 2 (excluded)
word[4:] # characters from position 4 (included) to the end
word[-2:] # characters from the second-last (included) to the end
| true |
5069b83ec1ce44e98afb38f091ca903a969f1a75 | Python | fgabel/novelty_detection | /models/example_model.py | UTF-8 | 1,531 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | from base.base_model import BaseModel
import tensorflow as tf
class ExampleModel(BaseModel):
def __init__(self, config):
super(ExampleModel, self).__init__(config)
self.build_model()
self.init_saver()
def build_model(self):
self.is_training = tf.placeholder(tf.bool)
self.x = tf.placeholder(tf.float32, shape=[None] + self.config.state_size)
self.y = tf.placeholder(tf.float32, shape=[None, 10])
# network architecture
d1 = tf.layers.dense(self.x, 512, activation=tf.nn.relu, name="dense1")
d2 = tf.layers.dense(d1, 10, name="dense2")
with tf.name_scope("loss"):
self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=d2))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
self.train_step = tf.train.AdamOptimizer(self.config.learning_rate).minimize(self.cross_entropy,
global_step=self.global_step_tensor)
correct_prediction = tf.equal(tf.argmax(d2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def init_saver(self):
# here you initialize the tensorflow saver that will be used in saving the checkpoints.
self.saver = tf.train.Saver(max_to_keep=self.config.max_to_keep)
| true |
4960f848d423d34502bb11e40baf8b3d57e905cb | Python | YaleDHLab/lab-workshops | /machine-learning/helpers.py | UTF-8 | 7,948 | 3.390625 | 3 | [
"MIT"
] | permissive | from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
import warnings
# quiet scipy future warnings
warnings.filterwarnings('ignore')
# decision boundary grid colors
grid_colors = ListedColormap([
'#ff8585',
'#6db4f3',
])
# decision boundary point colors
point_colors = ListedColormap([
'#ff0000',
'#0000ff',
])
def plot_decision_boundary(clf, X, labels, margin=0.2, mesh_unit=0.01, proba=False):
'''
Plot the classification decision for each point in a quantized grid
From: http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
@args:
{class} clf: a class that has a method .predict() that takes as input
an array of k dimensional values and returns an array with shape n,1
where n = the number of observations in the input array. This returned
array of values should contain class predictions--one per input element.
nb: if proba=True, the class should contain a method `.decision_function()`
that should return an array with shape n,1 that contains probability
values for a given class prediction. See scikit classifiers for examples
of both methods.
@returns:
void
'''
# find the min value in the first column and subtract `margin`
x_min = X[:, 0].min() - margin
# find the max value in the first column and add `margin`
x_max = X[:, 0].max() + margin
# find the minimum value in the second column and subtract `margin`
y_min = X[:, 1].min() - margin
# find the minimum value in the second column and add `margin`
y_max = X[:, 1].max() + margin
# get a list of values from min to max, counting by `mesh_unit`
x_range = np.arange(x_min, x_max, mesh_unit)
y_range = np.arange(y_min, y_max, mesh_unit)
# create a dense grid with one row for each value in x_range and
# one column for each value in y_range
xx, yy = np.meshgrid(x_range, y_range)
# `np.ravel` flattens a multidimensional array to a single dimension.
# `np.c_` makes its first and second args the first and second columns in a 2D
# array, so np.c_[xx.ravel(), yy.ravel()] has one 2D observation per grid unit
grid_vals = np.c_[xx.ravel(), yy.ravel()]
# plot continuous predictions if proba == True, else discrete classifications
if proba:
# some classifiers use decision_function to return continuous probabilities
# while others use predict_proba
if hasattr(clf, 'decision_function'):
Z = clf.decision_function(grid_vals)
else:
Z = clf.predict_proba(grid_vals)[:,1]
else:
Z = clf.predict(grid_vals)
# reshape Z (a 1D array of classification decisions) to a 2D x by y grid
Z = Z.reshape(xx.shape)
# plot the background decision boundary
cmap = plt.cm.RdBu if proba else grid_colors
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.8)
# plot the observations
plt.scatter(X[:,0], X[:,1], s=30, c=labels, cmap=point_colors, edgecolors='#000000')
def plot_distance(arr):
'''
Given `arr` with two arrays, each of two or three elements,
plot the points at positions `arr[0]` and `arr[1]`
and plot lines between those two points
@args:
arr [arr]: an array composed of 2d or 3d arrays
@returns:
void
'''
if len(arr[0]) == 2:
plot_distance_2d(arr)
elif len(arr[0]) == 3:
plot_distance_3d(arr)
def plot_distance_2d(arr):
'''
Given `arr` with two 2-element arrays, plot the points
at positions `arr[0]` and `arr[1]` and plot lines between
those two points
@args:
arr [arr]: an array composed of 2d arrays
@returns:
void
'''
a, b = arr
df = np.array([a, b])
# point data: pattern for drawing points is:
# ax.scatter(x_vals, y_vals, z_vals)
plt.scatter(df[:,0], df[:,1], s=100, c=['blue', 'orange'], alpha=1.0, edgecolors='#000000')
# add point labels
plt.text(0.05, 0.05, 'a', fontsize=20, horizontalalignment='center')
plt.text(0.95, 0.95, 'b', fontsize=20, horizontalalignment='center')
# line data: pattern for drawing lines is:
# ax.plot([x_start, x_end], [y_start, y_end], zs=[z_start, z_end])
plt.plot( [a[0], b[0]], [a[1], a[1]], c='red' ) # x-line
plt.plot( [b[0], b[0]], [a[1], b[1]], c='purple' ) # y-line
plt.plot( [a[0], b[0]], [a[1], b[1]], c='gray', linestyle=':' ) # direct line
# add axis labels
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
def plot_distance_3d(arr):
'''
Given `arr` with two 3-element arrays, plot the points
at positions `arr[0]` and `arr[1]` and plot lines between
those two points.
@args:
arr [arr]: an array composed of 3d arrays
@returns:
void
'''
a, b = arr
df = np.array([a, b])
fig = plt.figure()
ax = fig.gca(projection='3d')
# point data: pattern for drawing points is:
# ax.scatter(x_vals, y_vals, z_vals)
ax.scatter(df[:,0], df[:,1], df[:,2], s=100, c=['blue', 'orange'], alpha=1.0)
# label points
ax.text(0.1, 0.1, 0, 'a', fontsize=20, horizontalalignment='center')
ax.text(0.9, 0.9, 1.0, 'b', fontsize=20, horizontalalignment='center')
# line data: pattern for drawing lines is:
# ax.plot([x_start, x_end], [y_start, y_end], zs=[z_start, z_end])
ax.plot( [a[0], b[0]], [a[0], a[0]], zs=[a[0], a[0]], c='red' ) # x-line
ax.plot( [b[0], b[0]], [a[0], b[0]], zs=[a[0], a[0]], c='purple' ) # y-line
ax.plot( [b[0], b[0]], [b[0], b[0]], zs=[a[0], b[0]], c='green' ) # z-line
ax.plot( [a[0], b[0]], [a[0], b[0]], zs=[a[0], b[0]], c='gray', linestyle=':' ) # direct line
# add axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def plot_iforest_decision_boundary(*args, **kwargs):
'''
Create and display the decision boundary for an isolation forest.
'''
clf = args[0] # the isolation forest classifier
X = args[1] # the input array of observations used to train the classifier
new_vals = args[2] # the array of observations classified by the classifier
result = args[3] # the classification results from the classifier
margin = kwargs.get('margin', 6) # margin around the plot
mesh = kwargs.get('grid_x', 0.5) # the size of each colormesh grid unit
x_lims = kwargs.get('x_lims', (-13, 12)) # the min max x values to display
y_lims = kwargs.get('y_lims', (-13, 5)) # the min max y values to display
# get the x and y grid domains
x_domain = [ X[:, 0].min() - margin, X[:, 0].max() + margin ]
y_domain = [ X[:, 1].min() - margin, X[:, 1].max() + margin ]
# get a list of values from min to max, counting by `mesh`
x_range = np.arange(x_domain[0], x_domain[1], mesh)
y_range = np.arange(y_domain[0], y_domain[1], mesh)
# create the data with which to color the background grid
xx, yy = np.meshgrid(x_range, y_range)
# classify each unit of the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# reshape Z into a 2D grid
Z = Z.reshape(xx.shape)
# fill in the grid values
plt.contourf(xx, yy, Z, cmap=plt.cm.YlGn)
# add the training points; edgecolors='k' is short for 'edgecolors'='black'
train_p = plt.scatter(X[:,0], X[:,1], c='green', edgecolors='k', alpha=0.4)
# separate new_vals into outliers and "inliers" based on result
outliers = []
inliers = []
for idx, i in enumerate(result):
if result[idx] == 1:
inliers.append(new_vals[idx])
else:
outliers.append(new_vals[idx])
outliers = np.array(outliers)
inliers = np.array(inliers)
# plot the inliers and outliers
in_p = plt.scatter(inliers[:,0], inliers[:,1], c='white', edgecolors='k')
out_p = plt.scatter(outliers[:,0], outliers[:,1], c='red', edgecolors='k')
# limit the axis ranges
plt.xlim(x_lims)
plt.ylim(y_lims)
# add a title to the plot
plt.title('Isolation Forests Decision Boundary')
# add a legend to the plot
plt.legend([train_p, in_p, out_p], [
'training observation',
'classified as non-outlier',
'classified as outlier',
], loc=[0.025, 0.05], framealpha=0.97)
plt.show() | true |
a9b5b9407acb601bced073886275a68e712fa2bd | Python | furioustushar93/Face_Recognition | /recogniotion_root.py | UTF-8 | 2,119 | 2.640625 | 3 | [] | no_license | import numpy as np
import cv2
import os
def distance(x1, x2):
return np.sqrt(sum((x1 - x2) ** 2))
def knn(train, test, k=5):
dist = []
for i in range(train.shape[0]):
# Get the vector and label
ix = train[i, :-1]
iy = train[i, -1]
# Compute the distance from test point
d = distance(test, ix)
dist.append([d, iy])
# Sort based on distance and get top k
dk = sorted(dist, key=lambda x: x[0])[:k]
# Retrieve only the labels
labels = np.array(dk)[:, -1]
# Get frequencies of each label
output = np.unique(labels, return_counts=True)
# Find max frequency and corresponding label
index = np.argmax(output[1])
return output[0][index]
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
skip = 0
data_path = './data/'
face_data = []
face_section = 0
class_id = 0
label = []
names = {}
#Data Preparation
for fx in os.listdir(data_path):
if fx.endswith('.npy'):
names[class_id] = fx[:-4]
dataitem = np.load(data_path+fx)
face_data.append(dataitem)
target = class_id*np.ones((dataitem.shape[0],))
label.append(target)
face_dataset = np.concatenate(face_data, axis = 0)
face_label = np.concatenate(label, axis = 0).reshape((-1,1))
train_set = np.concatenate((face_dataset,face_label),axis = 1)
while True:
ret,frame = cap.read()
if ret == False:
continue
faces = face_cascade.detectMultiScale(frame,1.3,5)
for face in faces:
x,y,w,h = face
offset = 10
face_section = frame[y-offset:y+h+offset, x-offset:x+w+offset]
face_section = cv2.resize(face_section,(100,100))
out = knn(train_set,face_section.flatten())
pred_name = names[int(out)]
cv2.putText(frame,pred_name,(x,y-10),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2,cv2.LINE_AA)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2)
cv2.imshow("faces",frame)
key_pressed = cv2.waitKey(1) & 0xFF
if key_pressed == ord('c'):
break
cap.release()
cv2.destroyAllWindows() | true |
72df85ffecec103d4d1eae9d3764b0a904e886d9 | Python | SuZhuo20/basic | /stu/day18_02_08/python_basic/SetStu.py | UTF-8 | 5,166 | 4.21875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# @File : SetStu.py
# @Author: Zhuozhuo.Geng
# @Date : 2018/2/8
# @Desc : python中常用集合:list、tuple、dict、set
# 可变对象和不可变对象区别:
# 对于不变对象来说,调用对象自身的任意方法,也不会改变该对象自身的内容。相反,这些方法会创建新的对象并返回,这样,就保证了不可变对象本身永远是不可变的
class SetStu(object):
# list集合 []:
# 特点:
# 1.可变的有序列表
# 优点:
# 1.
# 缺点:
# 1.查找速度慢
# 适用场景:
# 1.
def listDemo(self):
print('list 下表从0开始....................................................................................')
listDemo = [1, 'A', True, [1, 'B'], 12.3]
# len(list) 计算list集合的元素个数
print('len(list):', len(listDemo))
# 获取指定下标的元素
print('listDemo[1]:', listDemo[1])
print('listDemo[-1]:', listDemo[-1])
# 在list末尾追加元素
print('list.append():')
listDemo.append('appendEle')
# 将元素插入到指定位置
print('list.insert(index, value):')
listDemo.insert(2, 'new2')
print(listDemo)
# 删除list中元素,不加参数时删除末尾元素
print('list.pop([index]):')
listDemo.pop()
listDemo.pop(3)
# list.sort()对list中的元素进行排序
# print(listDemo.sort())
# tuple元组 ()
# 特点:
# 1.不可变的有序列表,tuple中的元素不可变(当定义一个tuple时tuple中的元素就不可变)
# 优点:
# 1.
# 缺点:
# 1.
# 适用场景:
# 1.
# 和list的区别:
# 1.tuple在定义时需要确定tuple中的元素,元素一旦确定则不可修改,tuple没有list中的append、insert方法
def tupleDemo(self):
print('tuple demo....................................................................................')
classmates = ('zhangsan', 'lisi', 'wangwu')
print('classmates[0]:', classmates[0])
classList = ('zhqng', ['wangwu', 'lisi'])
classList[1].append(True)
print(classList)
# dict字典 {}
# 特点:
# 0.无序
# 1.key:value对相当于java中的Map,这种以key-value方式存储,在放入时需要根据key计算出存储的位置(使用哈希算法计算),所以在dict中作为key的对象值不能改变(比如:str、int)
# 2.每个key只能对应一个value,同一个key放入多个value只会记录最后一个value
# 优点:
# 1.有极快的查找速度
# 缺点:
# 1.耗费内存
# 适用场景:
# 1.
# 和list比较:
# 1.dict查找和插入速度极快,不会随着key的增加而变慢
# 2.dict需要占用大量的内存,内存浪费多
# 1.list查找和插入速度会随着元素的增加而变慢
# 2.占用空间少,内存浪费少
def dictDemo(self):
print('dict demo....................................................................................')
dictDemo = {'name':'suzhuo', 'age':24}
print(dictDemo)
# 根据key取值
print('dictDemo[\'name\']:', dictDemo['name'])
# 根据key取值如果此key不存在则返回defValue(默认值)
print('dictDemo.get(\'key\', [defValue]):', dictDemo.get('age', -1))
# 判断某个key是否存在dict中
print('\'name\' in dictDemo:', 'name' in dictDemo)
# 删除dict中的某个key时对应的value也会被删除 pop('key')->返回被删除key对应的value
print('dictDemo.pop(\'name\'):', dictDemo.pop('name'))
# 判断某个变量是否是dict类型
print('isinstance(dictDemo, dict):', isinstance(dictDemo, dict))
# set集合 ([])
# 特点:
# 0.无序
# 1.和dict类似,但是set中只存储key
# 2.set中的key也是不能重复,set中使用hash算法计算key是否重复,因此key中的值也是不可变对象
# 优点:
# 1.
# 缺点:
# 1.
# 适用场景:
# 1.相当于数学中无序、无重复元素的集合
def setDemo(self):
print('set demo....................................................................................')
# set集合在定义的时候需要使用list初始set集合中的元素
setDemo = set([1, 'A'])
setDemo1 = set([2, 'b'])
# 使用set.add(ele)方法向set中添加元素
print('setDemo.add(\'b\'):', setDemo.add('b'))
# 使用set.remove(key)删除元素
print('setDemo.remove(key):', setDemo.remove(1))
# 取setDemo 和 setDemo1公共元素
print('setDemo & setDemo1:', (setDemo & setDemo1))
# 取setDemo 和 setDemo1 所有不重复元素
print('setDemo | setDemo1:', (setDemo | setDemo1))
print(setDemo)
if __name__ == '__main__':
setStu = SetStu()
setStu.listDemo()
setStu.tupleDemo()
setStu.dictDemo()
setStu.setDemo() | true |
1590ca605e7ba4e47e3cc01e76714f34a1a271f6 | Python | daniel-reich/ubiquitous-fiesta | /JBkfqYW4iYwmgvwTf_22.py | UTF-8 | 78 | 3.15625 | 3 | [] | no_license |
def is_prime(num):
return num > 1 and all(num % i for i in range(2, num))
| true |
34100602b2f012fdd5afaf713ffe9fbba5dd6190 | Python | cljacoby/leetcode | /src/pacific-atlantic-water-flow/pacific-atlantic-water-flow.py | UTF-8 | 2,169 | 3.375 | 3 | [
"MIT"
] | permissive | # https://leetcode.com/problems/pacific-atlantic-water-flow
class Solution(object):
def pacificAtlantic(self, heights):
"""
:type heights: List[List[int]]
:rtype: List[List[int]]
"""
self.heights = heights
self.rows = len(self.heights)
self.cols = len(self.heights[0])
solutions = []
for (i, row) in enumerate(self.heights):
for (j, _cell) in enumerate(row):
ret = self.step(dict(), set(), pow(10, 5)+1, i, j)
print(f"heights[{i},{j}] = {heights[i][j]}, ret = {ret}")
if ret == 3:
solutions.append([i, j])
return solutions
def step(self, memo, seen, last, i, j):
if (i,j) in memo:
return memo[(i,j)]
if i < 0 or j < 0:
return 1
if i >= self.rows or j >= self.cols:
return 2
if (i,j) in seen:
return 0
if self.heights[i][j] > last:
return 0
seen.add((i,j))
a = self.step(memo, seen, self.heights[i][j], i + 1, j)
b = self.step(memo, seen, self.heights[i][j], i - 1, j)
c = self.step(memo, seen, self.heights[i][j], i, j + 1)
d = self.step(memo, seen, self.heights[i][j], i, j - 1)
seen.remove((i,j))
ret = a | b | c | d
if (i,j) in memo:
memo[(i,j)] = memo[(i,j)] | ret
else:
memo[(i,j)] = ret
return ret
if __name__ == "__main__":
sol = Solution()
tests = [
(
[[1,2,2,3,5],[3,2,3,4,4],[2,4,5,3,1],[6,7,1,4,5],[5,1,1,2,4]],
[[0,4],[1,3],[1,4],[2,2],[3,0],[3,1],[4,0]],
),
(
[[10,10,10],[10,1,10],[10,10,10]],
[[0,0],[0,1],[0,2],[1,0],[1,2],[2,0],[2,1],[2,2]],
)
]
for (heights, solution) in tests:
result = sol.pacificAtlantic(heights)
solution, result = sorted(solution), sorted(result)
# print(f"result {result} != solution {solution}")
assert solution == result, \
f"result {result} != solution {solution}"
print("✅ All tests passed")
| true |
7985afced40aaf7db370c007184656c0ba21a9fd | Python | fengzhongzhu1621/xTool | /tests/decorators/test_signal.py | UTF-8 | 1,456 | 2.53125 | 3 | [
"MIT",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause",
"Python-2.0"
] | permissive | # -*- coding: utf-8 -*-
import pytest
from xTool.decorators.signal import Signal, receiver
@pytest.fixture
def signal():
return Signal()
def test_connect_to_frozen_signal(signal):
signal.freeze()
async def cb():
...
with pytest.raises(RuntimeError):
signal.connect(cb)
@pytest.mark.parametrize(
"callback", [
None,
max,
lambda x: x,
],
)
def test_wrong_callback(signal, callback):
with pytest.raises(RuntimeError):
signal.connect(callback)
async def test_receiver_decorator(signal):
called = False
@receiver(signal)
async def foo():
nonlocal called
called = True
await signal.call()
assert called
async def test_call_arguments(signal):
received_args, received_kwargs = None, None
@receiver(signal)
async def foo(*args, **kwargs):
nonlocal received_args, received_kwargs
received_args, received_kwargs = args, kwargs
await signal.call("foo", "bar", spam="spam")
assert received_args == ("foo", "bar")
assert received_kwargs == {"spam": "spam"}
async def multiple_receivers(signal):
foo_called, bar_called = False, False
@receiver(signal)
async def foo():
nonlocal foo_called
foo_called = True
@receiver(signal)
async def bar():
nonlocal bar_called
bar_called = True
await signal.call()
assert all(foo_called, bar_called)
| true |
21eeb9d4ebd687110bae98310f3bce44969d46d9 | Python | 01coders/100-Days-Of-Code | /python_parikshith21/Day20.py | UTF-8 | 2,892 | 4.25 | 4 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon May 27 14:58:14 2019
@author: Parikshith.H
"""
#program to count the occurence of word 'day' in a string
str = "have a nice day enjoy the day manoj"
l = str.split()
print(l)
count = 0
for elem in l:
if elem == 'day':
count+=1
print(count)
# =============================================================================
# #output:
# ['have', 'a', 'nice', 'day', 'enjoy', 'the', 'day', 'manoj']
# 2
# =============================================================================
#program to print all words and their length in a string
str1 = "This is python class"
count=0
l1=str1.split()
for elem in l1:
print(elem,":",len(elem))
# =============================================================================
# #output:
# This : 4
# is : 2
# python : 6
# class : 5
# =============================================================================
#program to print longest word in a string
str1 = "This is python class"
length=0
longest=None
l1=str1.split()
for word in l1:
if longest == None or len(longest) < len(word):
longest = word
print("The longest word is:",longest)
print("The length is:",len(longest))
# =============================================================================
# #output:
# The longest word is: python
# The length is: 6
# =============================================================================
#Program to create a list with user entered values
l = list()
n=int(input("Enter the number of elements:"))
for i in range(n):
#l[i]=input() #error
elem=input()
l.append(elem)
print(l)
# =============================================================================
# #output:
# Enter the number of elements:3
# 1
# 2
# 3
# ['1', '2', '3']
# =============================================================================
#Program to create a list with user entered values without for loop
l1=list()
while(True):
elem=input()
if elem=="exit":
break
else:
l1.append(elem)
print(l1)
# =============================================================================
# #output:
# 1
# 2
# 3
# 4
# 5
# exit
# ['1', '2', '3', '4', '5']
# =============================================================================
#Program to find sum and average of elements in a list
l=list()
while(True):
elem=(input("Enter values:"))
if elem=="exit":
break
else:
l.append(float(elem))
print(l)
print("The sum is:",sum(l))
print("The average is:",sum(l)/len(l))
# =============================================================================
# #output:
# Enter values:2
# Enter values:3
# Enter values:4
# Enter values:3
# Enter values:2
# Enter values:exit
# [2.0, 3.0, 4.0, 3.0, 2.0]
# The sum is: 14.0
# The average is: 2.8
# ============================================================================= | true |
683b77b5f40eea69f0f0d7e84955f580ad769d1e | Python | junyi1997/TQC_Python | /1.第一類/PYD105.py | UTF-8 | 235 | 3.6875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
#矩形面積計算
a=eval(input())
b=eval(input())
c=a*2+b*2
d=a*b
print("Height = {:.2f}".format(a))
print("Width = {:.2f}".format(b))
print("Perimeter = {:.2f}".format(c))
print("Area = {:.2f}".format(d))
| true |
7a13762f0e4aa266164b152f3410e4753a7b620c | Python | scc-usc/LAcrime | /viterbi work/RelationsWithCrime/parkingMetersDTLA.py | UTF-8 | 1,132 | 2.8125 | 3 | [] | no_license | """
LA atm locations
Author : Omkar Damle
Date : 22nd June 2017
"""
import pandas as pd
import math
from pyproj import Proj, transform
import numpy
def isInsideBox(ll_lon,ll_lat,ur_lon,ur_lat,lon,lat):
isInside = True
if lat > ur_lat or lat < ll_lat or lon > ur_lon or lon < ll_lon:
isInside = False
return isInside
streetData = pd.read_csv('LADOT_Meters_Citywide_Dec_2015.csv')
# for speed purposes
MAX_RECORDS = 10000
LA_COORDINATES = (34.05, -118.24)
#Downtown LA
# Lower Left -> 34.038811, -118.273534
# Upper Right -> 34.053781, -118.237727
#horizontal length = 3.27km
#vertical length = 1.74km
ll_lat = 34.038811
ll_lon = -118.273534
ur_lat = 34.053781
ur_lon = -118.237727
count = 0
downTownMeters = []
for each in streetData.iterrows():
count += 1
if count%10000 == 0:
print count
arr1 = each[1]['the_geom'][7:-1].split()
lon1,lat1 = float(arr1[0]), float(arr1[1])
if isInsideBox(ll_lon,ll_lat,ur_lon,ur_lat,lon1,lat1) == True:
if math.isnan(lat1) or math.isnan(lon1):
pass
else:
downTownMeters.append((lon1,lat1))
numpy.save('downTownMeters',downTownMeters)
print(len(downTownMeters))
| true |
9fa0b28a5aabf305af28ae2edb0c9f3d74be9988 | Python | Aasthaengg/IBMdataset | /Python_codes/p03576/s613164837.py | UTF-8 | 713 | 2.9375 | 3 | [] | no_license | from itertools import combinations, product
import bisect as bs
def tuple_int(iterable):
return tuple(map(int, iterable.split()))
def S_with_K_plots(plots, K):
result = []
X, Y = sorted([x for x,y in plots]), sorted([y for x,y in plots])
for xinf, xsup in combinations(X,2):
for yinf in Y:
ysup = [y for x,y in plots if xinf<=x<=xsup and y >= yinf]
if len(ysup) < K: continue
ysup.sort()
result.append((xsup-xinf)*(ysup[K-1]-yinf))
return result
if __name__ == '__main__':
with open(0) as f:
N, K = map(int, f.readline().split())
plots = list(map(tuple_int, f.readlines()))
print(min(S_with_K_plots(plots, K))) | true |
78eed8981bb7105ada605b83712b156a2d313b37 | Python | kankshamasrani/Big-Data | /BigData_3/shortest_path.py | UTF-8 | 4,350 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from pyspark import SparkConf, SparkContext
import sys
from pyspark.sql import SQLContext
from pyspark.sql.types import StructType, StructField, StringType, IntegerType, ArrayType
def main(argv=None):
if argv is None:
inputs = sys.argv[1]
output = sys.argv[2]
source = sys.argv[3]
target = sys.argv[4]
conf = SparkConf().setAppName('shortest-path')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
#read text input and store graph edges in a dataframe
text = sc.textFile(inputs)
edges = text.map(lambda line: line.split(':')) \
.filter(lambda lineSplit: len(lineSplit) > 1) \
.map(lambda lineSplit: (lineSplit[0],lineSplit[1].split())) \
.flatMapValues(lambda x: x)
schema = StructType([
StructField('from', StringType(), False),
StructField('to', StringType(), False)
])
edges = sqlContext.createDataFrame(edges,schema)
edges.cache()
# Check if source node exists
if edges[edges['from']==source].count() == 0:
outdata = ["Source not found"]
else:
#initialize known path dataframe
schema2 = StructType([
StructField('node', StringType(), False),
StructField('source', StringType(), False),
StructField('distance', IntegerType(), False)
])
knownpaths = sc.parallelize([[source,'No source',0]])
knownpaths = sqlContext.createDataFrame(knownpaths,schema2)
knownpaths.cache()
schema3 = StructType([
StructField('node', StringType(), False)
])
tovisit = sc.parallelize([[source]])
tovisit = sqlContext.createDataFrame(tovisit,schema3)
#iterate through edges with limit of length 6
for i in range(6):
# get neighbours of node in tovisit
neighbours = edges.join(tovisit, edges['from']==tovisit['node'], 'inner') \
.drop('node')
neighbours.cache()
# get new paths for each neighbours
newpaths = knownpaths.join(neighbours, knownpaths['node']==neighbours['from'], 'inner')
newpaths = newpaths.withColumn('new_dist', newpaths['distance']+1) \
.select('to','from','new_dist')
# union new paths and known paths
knownpaths = knownpaths.unionAll(newpaths).dropDuplicates()
knownpaths.cache()
# keep only path with minimum distance
minpaths = knownpaths.groupby('node').min('distance') \
.withColumnRenamed('node','min_node')
knownpaths2 = minpaths.join(knownpaths, (minpaths['min_node']==knownpaths['node']) \
& (minpaths['min(distance)']==knownpaths['distance']), 'inner') \
.drop('min_node').drop('min(distance)')
knownpaths = knownpaths2
knownpaths.cache()
#the list of nodes to be visited next iteration
tovisit = neighbours.select('to').withColumnRenamed('to','node')
# output result in each iteration by simply writing the Row objects
# Since the no. of entry is roughly the number of nodes, it is small enough to do coalesce
outdata = knownpaths.rdd.map(lambda x: "node %s: source %s, distance %i" \
% (x[0], x[1], x[2]))
outdata.coalesce(1).saveAsTextFile(output + '/iter-' + str(i))
# Stop finding path if found the target
if tovisit[tovisit['node']==target].count() > 0:
break
# Check if target is found
if knownpaths[knownpaths['node']==target].count() == 0:
outdata = ["Target not found"]
else:
# Trace path from target back to source
outdata=[]
path = target
outdata.insert(0,path)
while (path <> source):
path = knownpaths[knownpaths['node']==path].select('source').first()[0]
outdata.insert(0,path)
# Since the no. of entry is roughly the number of nodes, it is small enough to do coalesce
outdata = sc.parallelize(outdata)
outdata.coalesce(1).saveAsTextFile(output + '/path')
if __name__ == "__main__":
main() | true |
bd3f4ebf0851f8e60e14dae8ed1bb58fa007f04d | Python | chokozainer/chokozainerrl | /chokozainerrl/envs/test.py | UTF-8 | 4,824 | 2.890625 | 3 | [
"BSD-2-Clause"
] | permissive | import sys
import gym
import numpy as np
import gym.spaces
class MyEnv(gym.Env):
metadata = {'render.modes': ['human', 'ansi']}
FIELD_TYPES = [
'S', # 0: スタート
'G', # 1: ゴール
'~', # 2: 芝生(敵の現れる確率1/10)
'w', # 3: 森(敵の現れる確率1/2)
'=', # 4: 毒沼(1step毎に1のダメージ, 敵の現れる確率1/2)
'A', # 5: 山(歩けない)
'Y', # 6: 勇者
]
MAP = np.array([
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5], # "AAAAAAAAAAAA"
[5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], # "AA~~~~~~~~~~"
[5, 5, 2, 0, 2, 2, 5, 2, 2, 4, 2, 2], # "AA~S~~A~~=~~"
[5, 2, 2, 2, 2, 2, 5, 5, 4, 4, 2, 2], # "A~~~~~AA==~~"
[2, 2, 3, 3, 3, 3, 5, 5, 2, 2, 3, 3], # "~~wwwwAA~~ww"
[2, 3, 3, 3, 3, 5, 2, 2, 1, 2, 2, 3], # "~wwwwA~~G~~w"
[2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 2, 2], # "~~~~~~==~~~~"
])
MAX_STEPS = 100
def __init__(self):
super().__init__()
# action_space, observation_space, reward_range を設定する
self.action_space = gym.spaces.Discrete(4) # 東西南北
self.observation_space = gym.spaces.Box(
low=0,
high=len(self.FIELD_TYPES),
shape=self.MAP.shape
)
self.reward_range = [-1., 100.]
self._reset()
def _reset(self):
# 諸々の変数を初期化する
self.pos = self._find_pos('S')[0]
self.goal = self._find_pos('G')[0]
self.done = False
self.damage = 0
self.steps = 0
return self._observe()
def _step(self, action):
# 1ステップ進める処理を記述。戻り値は observation, reward, done(ゲーム終了したか), info(追加の情報の辞書)
if action == 0:
next_pos = self.pos + [0, 1]
elif action == 1:
next_pos = self.pos + [0, -1]
elif action == 2:
next_pos = self.pos + [1, 0]
elif action == 3:
next_pos = self.pos + [-1, 0]
if self._is_movable(next_pos):
self.pos = next_pos
moved = True
else:
moved = False
observation = self._observe()
reward = self._get_reward(self.pos, moved)
self.damage += self._get_damage(self.pos)
self.done = self._is_done()
return observation, reward, self.done, {}
def _render(self, mode='human', close=False):
# human の場合はコンソールに出力。ansiの場合は StringIO を返す
outfile = StringIO() if mode == 'ansi' else sys.stdout
outfile.write('\n'.join(' '.join(
self.FIELD_TYPES[elem] for elem in row
) for row in self._observe()
) + '\n'
)
return outfile
def _close(self):
pass
def _seed(self, seed=None):
pass
def _get_reward(self, pos, moved):
# 報酬を返す。報酬の与え方が難しいが、ここでは
# - ゴールにたどり着くと 100 ポイント
# - ダメージはゴール時にまとめて計算
# - 1ステップごとに-1ポイント(できるだけ短いステップでゴールにたどり着きたい)
# とした
if moved and (self.goal == pos).all():
return max(100 - self.damage, 0)
else:
return -1
def _get_damage(self, pos):
# ダメージの計算
field_type = self.FIELD_TYPES[self.MAP[tuple(pos)]]
if field_type == 'S':
return 0
elif field_type == 'G':
return 0
elif field_type == '~':
return 10 if np.random.random() < 1/10. else 0
elif field_type == 'w':
return 10 if np.random.random() < 1/2. else 0
elif field_type == '=':
return 11 if np.random.random() < 1/2. else 1
def _is_movable(self, pos):
# マップの中にいるか、歩けない場所にいないか
return (
0 <= pos[0] < self.MAP.shape[0]
and 0 <= pos[1] < self.MAP.shape[1]
and self.FIELD_TYPES[self.MAP[tuple(pos)]] != 'A'
)
def _observe(self):
# マップに勇者の位置を重ねて返す
observation = self.MAP.copy()
observation[tuple(self.pos)] = self.FIELD_TYPES.index('Y')
return observation
def _is_done(self):
# 今回は最大で self.MAX_STEPS までとした
if (self.pos == self.goal).all():
return True
elif self.steps > self.MAX_STEPS:
return True
else:
return False
def _find_pos(self, field_type):
return np.array(list(zip(*np.where(
self.MAP == self.FIELD_TYPES.index(field_type)
))))
| true |
0991e7376326858aa29484fb8e1f86c7334226d4 | Python | SpaceZZ/AutoScrapyProject2 | /AutoScrapy/scripts/ehi_org.py | UTF-8 | 5,008 | 3.03125 | 3 | [] | no_license | """
v1 contact: fit.meal.planner@gmail.com
Script uses Selenium to scrap the following webpage
"""
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
import pandas as pd
import csv
import time
import random
############################################## START CONFIGURATION ####################################################
# webpage to scrape
webpage_target = r"https://www.ehi.org/de/das-institut/unsere-mitglieder?page_id=194&posts_page="
# Google Chrome profile to use
profile_uri = r"C:\Users\Admin\AppData\Local\Google\Chrome\User Data\Default"
# Google webdriver location (location of the exe)
webdriver_location = r"C:\Users\Admin\PycharmProjects\seleniumScraper\driver\chromedriver.exe"
############################################## END CONFIGURATION ####################################################
headers = ['name', 'url', 'phone', 'email', 'address', 'website']
class Item:
"""
Class to hold the information about the scrape
"""
def __init__(self):
"""
Initializing the all the properties of the class
"""
self.name = ""
self.url = ""
self.phone = ""
self.email = ""
self.address = ""
self.website = ""
def __str__(self):
"""
Method returns the data scraped
:return:
:rtype:
"""
return str(vars(self))
def get_driver():
"""
Create chrome driver for scraping
:return:
:rtype:
"""
opts = Options()
opts.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36")
opts.add_argument("--user-data-dir={}".format(profile_uri))
opts.add_argument("--disable-extensions")
return webdriver.Chrome(executable_path=webdriver_location, options=opts)
def write_to_file(item):
"""
Methods writes completed object to the file
:param item:
:return:
"""
dict_to_print = {'name': item.name,
'url': item.url,
'phone': item.phone,
'email': item.email,
'address': item.address,
'website': item.website
}
with open("results_ehi_org.csv", 'a', encoding='utf-8', newline='') as file:
writer = csv.DictWriter(file, delimiter='\t', fieldnames=headers)
writer.writerow(dict_to_print)
print(dict_to_print)
def scrape_card(card, current_url):
"""
Function scrapes premium website from the webpage
:param card:
:type card:
:return:
:rtype:
"""
# initialize object to be returned later
item = Item()
# get name
name = card.find_element_by_xpath('.//h4')
if name:
item.name = name.text
# get url
item.url = current_url
# get address
address = card.find_elements_by_xpath('.//div[@class="col-md-4"]//p[position()>1]')
if address:
for _ in address:
item.address += " " + _.text.strip()
item.address.strip()
phone = card.find_element_by_xpath('.//div[@class="col-md-8 border-none"]/p[2]')
if phone:
item.phone = phone.text
email = card.find_element_by_xpath('.//div[@class="col-md-8 border-none"]/p[4]')
if email:
item.email = email.text
# webpage
webpage = card.find_element_by_xpath('.//div[@class="col-md-8 border-none"]/p[last()]/a')
if webpage:
item.website = webpage.get_attribute("href")
if item.website == "http:":
item.website = ""
print("Scraped {} \n {}".format(item.url, item))
return item
def write_header():
"""
Function writes header one time
:return:
:rtype:
"""
with open("results_ehi_org.csv", 'a', encoding='utf-8', newline='') as file:
writer = csv.DictWriter(file, delimiter='\t', fieldnames=headers)
writer.writeheader()
print("Wrote header to file")
def main():
"""
Entry point for the script.
:return:
:rtype:
"""
items = []
results = []
driver = get_driver()
for index in range(1, 29):
wait_time = random.randint(2, 5)
target = webpage_target + str(index)
print("Waiting for loading the page {}".format(wait_time))
WebDriverWait(driver, wait_time)
driver.get(target)
results_cards = driver.find_elements_by_xpath('*//div[@class="media-body col-md-9"]')
for result in results_cards:
items.append(scrape_card(result, driver.current_url))
write_header()
for item in items:
print(item)
write_to_file(item)
print("Total results {}".format(len(items)))
if __name__ == "__main__":
main()
| true |
fd925d2542166be2de5bc16be2de198617b35dec | Python | lucaspicoli7/Curso_Python_Neri | /11-Estruturas de Controle de Fluxo 'FOR' - Copia.py | UTF-8 | 2,986 | 4.1875 | 4 | [] | no_license | print("<<< ESTRUTURAS DE CONTROLE 'FOR' >>>",chr(10))
# Exemplo 1:
print("Exemplo 1 - Forma Simples do 'FOR' -->")
print('-'*50,chr(13))
# Lista cada item da string (pode ser número, letra ou símbolo)
for contador in '01234567890abcdefg!@#$%':
print("Posição..:",contador)
print('-'*50,chr(10))
# Exemplo 2:
print("Exemplo 2 - Repetição por Range -->")
print('-'*50,chr(13))
# Executa a repetição n vezes começando do 0 (zero)
for contador in range(50):
print("Posição..:",contador)
else:
print("Fim das Repetições.")
print('-'*50,chr(10))
# Executa a repetição n vezes começando do 30 até 50 pulando de 2 em 2
for contador in range(30,50,2):
print("Posição..:",contador)
else:
print("Fim das Repetições.")
print('-'*50,chr(10))
# Executa a repetição n vezes começando do 30 até 50 pulando de 2 em 2
for contador in range(50,40, -1):
print("Posição..:",contador)
else:
print("Fim das Repetições.")
print('-'*50,chr(10))
for contador in range(-6,6):
print("Posição..:",contador)
else:
print("Fim das Repetições.")
print('-'*50,chr(10))
# Tabuada
numTabuada = int(input("Digite um número para tabuada:"))
for tabuada in range(1,10):
print("%d * %d = %d" % (numTabuada,tabuada,(numTabuada * tabuada)))
print('-'*50,chr(10))
# Executa a repetição n vezes começando do 30 até 50 pulando de 1 em 1
# Quando chegar no 35 ele passa adiante (continua) sem executar o 35
for contador in range(30,50,1):
if contador == 35:
continue
print("Posição..:",contador)
else:
print("Fim das Repetições.")
print('-'*50,chr(10))
# Exemplo 3:
print("Exemplo 3 - Repetição Utilizando uma Lista -->")
print('-'*50,chr(13))
# Executa a repetição para cada item da lista
for planeta in ["Mercúrio","Venus","Terra","Marte","Jupiter","Saturno","Urano","Netuno","Plutão"]:
print("Planeta..:",planeta)
else:
print("Fim dos Planetas.")
print('-'*50,chr(10))
# Exemplo 4:
print("Exemplo 4 - Repetição com Tuplas -->")
print('-'*50,chr(13))
# Executa a repetição com Tuplas
for cores in [("Preto","Branco"),("Verde","Amarelo"),("Azul","Vermelho"),("Prata","Ouro")]:
print("Cor..:",cores)
print('-'*50,chr(10))
# Exemplo 5:
print("Exemplo 5 - Repetição com Interrupção (break) -->")
print('-'*50,chr(13))
# Executa a repetição até o break
for planeta in ["Mercúrio","Venus","Terra","Marte","Jupiter","Saturno","Urano","Netuno","Plutão"]:
print("Planeta..:",planeta)
if planeta == "Urano":
break
else: # Este else nunca será executado porque o break interrompe a repetição antes
print("Fim dos Planetas.")
print('-'*50,chr(10))
# Exemplo 6:
print("Exemplo 5 - Soma (por incremento) (o interessante é como é feito a soma '+=' ) -->")
print('-'*50,chr(13))
print("Soma dos números de 1 a 10) -->")
valorTotal = 0
for numero in range(1,11):
valorTotal += numero
print("Valor Total",valorTotal)
| true |
4e6b7f99c6761b71bfe72dec16178c90615f6854 | Python | sweettuse/lifxlan3 | /lifxlan3/utils.py | UTF-8 | 4,625 | 2.6875 | 3 | [
"MIT"
] | permissive | import logging
import time
from collections import deque
from concurrent.futures import wait
from concurrent.futures.thread import ThreadPoolExecutor
from contextlib import contextmanager
from functools import wraps
from itertools import cycle
from socket import AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_BROADCAST, SO_REUSEADDR, socket
from threading import local
from typing import Optional, List, Any, TypeVar, Union, Iterable
T = TypeVar('T')
OneOrMore = Union[T, List[T]]
def init_log(name, level=logging.INFO):
"""create logger using consistent settings"""
log = logging.getLogger(name)
log.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
log.addHandler(handler)
return log
log = init_log(__name__)
def timer(func):
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.perf_counter()
try:
return func(*args, **kwargs)
finally:
log.info(f'func {func.__name__!r} took {time.perf_counter() - start_time} seconds')
return wrapper
@contextmanager
def localtimer():
start_time = time.time()
try:
yield
finally:
print(f'localtimer took {time.time() - start_time}')
class WaitPool:
"""
allow jobs to be submitted to either an existing pool or a dynamically-created one,
wait for it to complete, and have access to the futures outside the `with` block
"""
threads_per_pool = 8
def __init__(self, pool: Optional[Union[int, ThreadPoolExecutor]] = None):
self._pool = self._init_pool(pool)
self._local = local()
@staticmethod
def _init_pool(pool: Optional[Union[int, ThreadPoolExecutor]]):
if isinstance(pool, ThreadPoolExecutor):
return pool
if isinstance(pool, int):
num_threads = pool
elif pool is None:
num_threads = WaitPool.threads_per_pool
else:
raise ValueError(f'invalid value for `pool`: {pool!r}')
return ThreadPoolExecutor(num_threads)
@property
def futures(self):
try:
f = self._local.futures
except AttributeError:
f = self._local.futures = []
return f
@property
def results(self) -> List[Any]:
return [f.result() for f in self.futures]
def wait(self):
wait(self.futures)
def __getattr__(self, item):
"""proxy for underlying pool object"""
desc = type(self).__dict__.get(item)
if hasattr(desc, '__get__'):
return desc.__get__(self)
return getattr(self._pool, item)
def submit(self, fn, *args, **kwargs):
fut = self._pool.submit(fn, *args, **kwargs)
self.futures.append(fut)
return fut
def map(self, fn, *iterables):
self.futures.extend(self._pool.submit(fn, *args) for args in zip(*iterables))
def dispatch(self, fn, *args, **kwargs):
"""run on thread pool but don't wait for completion"""
return self._pool.submit(fn, *args, **kwargs)
def __enter__(self):
self.futures.clear()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.wait()
def exhaust(iterable):
"""
immediately consume an iterable and discard results
should be used for side effects (printing, updating, submitting to job pool, etc)
"""
deque(iterable, maxlen=0)
@contextmanager
def init_socket(timeout):
"""manage a socket so it gets closed after exiting with block"""
sock = socket(AF_INET, SOCK_DGRAM)
try:
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
sock.settimeout(timeout)
try:
sock.bind(('', 0)) # allow OS to assign next available source port
except Exception as err:
raise ConnectionError(
f'WorkflowException: error {str(err)} while trying to open socket'
)
yield sock
finally:
sock.close()
def even_split(array: Iterable, n_splits: int) -> List[List]:
"""
split array as evenly as possible
note, flattening the result will not necessarily be in order of the original input
similar to np.array_split, only for 1d arrays
"""
res = [[] for _ in range(n_splits)]
for v, r in zip(array, cycle(res)):
r.append(v)
return res
class classproperty:
def __init__(self, f) -> None:
self.f = f
def __get__(self, instance, cls):
return self.f(cls)
| true |
3c4b4c45d967363b18706a37867a6a7e26f33de3 | Python | diofant/diofant | /diofant/matrices/expressions/hadamard.py | UTF-8 | 2,193 | 3.078125 | 3 | [] | permissive | from ...core import Mul
from ...core.strategies import condition, do_one, exhaust, flatten, unpack
from ...core.sympify import sympify
from ..matrices import ShapeError
from .matexpr import MatrixExpr
def hadamard_product(*matrices):
"""
Return the elementwise (aka Hadamard) product of matrices.
Examples
========
>>> A = MatrixSymbol('A', 2, 3)
>>> B = MatrixSymbol('B', 2, 3)
>>> hadamard_product(A)
A
>>> hadamard_product(A, B)
A.*B
>>> hadamard_product(A, B)[0, 1]
A[0, 1]*B[0, 1]
"""
if not matrices:
raise TypeError('Empty Hadamard product is undefined')
validate(*matrices)
if len(matrices) == 1:
return matrices[0]
return HadamardProduct(*matrices).doit()
class HadamardProduct(MatrixExpr):
"""
Elementwise product of matrix expressions
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()``.
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
"""
is_HadamardProduct = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check', True)
if check:
validate(*args)
return super().__new__(cls, *args)
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Mul(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
from .transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **ignored):
return canonicalize(self)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError('Mix of Matrix and Scalar symbols')
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError(f'Matrices {A} and {B} are not aligned')
rules = (unpack, flatten)
canonicalize = exhaust(condition(lambda x: isinstance(x, HadamardProduct),
do_one(rules)))
| true |
d80d6396304ad47f59d4166061f1052d1b865d95 | Python | SohaSiddiqui6/NLP_Depression_Detection | /depression_detection.py | UTF-8 | 3,577 | 2.890625 | 3 | [] | no_license | import pandas as pd
import re
import gensim
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
import warnings
from sklearn.metrics import accuracy_score
from sklearn import preprocessing
warnings.filterwarnings("ignore")
data = pd.read_csv("Depressiondata.csv")
data.rename({"Unnamed: 0":"a"}, axis="columns", inplace=True)
data.drop(["a"], axis=1, inplace=True)
def array_cleaner(array):
X = []
for sentence in array:
clean_sentence = ''
review = re.sub('[^a-zA-Z]', ' ', str(sentence))
review = review.lower()
words = review.split(' ')
for word in words:
clean_sentence = clean_sentence +' '+ word
X.append(clean_sentence)
return X
import math
for i in range(0,36513):
if data["Depression"][i]== math.isnan(i):
data["Depression"][i]= "True"
X = data.iloc[:,0]
Y = data.iloc[:,1].astype("str")
train_X, X_test, train_Y, y_test = train_test_split(X, Y, test_size = 0.20, random_state = 0)
train_X = array_cleaner(train_X)
X_test = array_cleaner(X_test)
num_features = 300
min_word_count = 1
num_workers = 4
context = 10
downsampling = 1e-3
# Initializing the train model
from gensim.models import word2vec
print("Training model....")
model = word2vec.Word2Vec(train_X,\
workers=num_workers,\
size=num_features,\
min_count=min_word_count,\
window=context,
sample=downsampling)
model.init_sims(replace=True)
model_name = "Depression_Analysis"
model.save(model_name)
def featureVecMethod(words, model, num_features):
# Pre-initialising empty numpy array for speed
featureVec = np.zeros(num_features,dtype="float32")
nwords = 0
#Converting Index2Word which is a list
index2word_set = set(model.wv.index2word)
for word in words:
if word in index2word_set:
nwords = nwords + 1
featureVec = np.add(featureVec,model[word])
# Dividing the result by number of words to get average
featureVec = np.divide(featureVec, nwords)
return featureVec
# Function for calculating the average feature vector
def getAvgFeatureVecs(reviews, model, num_features):
counter = 0
reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
for review in reviews:
# Printing a status message every 1000th review
if counter%1000 == 0:
print("Review %d of %d"%(counter,len(reviews)))
reviewFeatureVecs[counter] = featureVecMethod(review, model, num_features).T
counter = counter+1
return reviewFeatureVecs
trainDataVecs = getAvgFeatureVecs(train_X, model, num_features)
testDataVecs = getAvgFeatureVecs(X_test, model, num_features)
forest = RandomForestClassifier(n_estimators = 5)
#classifier = SGDClassifier()
forest = forest.fit(trainDataVecs, train_Y)
#classifier.fit(trainDataVecs,train_Y)
#testdata=['feel lost','sick of this life','pathetic world']
#testDataVecs = getAvgFeatureVecs(testdata, model, num_features)
result = forest.predict(testDataVecs)
print(result)
#y_pred = classifier.predict(testDataVecs)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_pred,y_test)
accuracy=accuracy_score(y_pred,y_test)
print(accuracy*100)
| true |
cbbfaa700557258b7b6019ce969f797fe5e887b5 | Python | Grae-Drake/euler-redux | /problem_17.py | UTF-8 | 2,214 | 4.125 | 4 | [] | no_license | """Solve Project Euler Problem 17.
Problem statement: If the numbers 1 to 5 are written out in words: one, two,
three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out
in words, how many letters would be used?
Note: Do not count spaces or hyphens. For example, 342 (three hundred and
forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20
letters. The use of "and" when writing out numbers is in compliance with
British usage.
URL
"""
import argparse
from datetime import datetime
def solution(limit: int) -> int:
"""Generate English representations and sum lengths."""
ones = ['', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',
'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen',
'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen',
'nineteen']
tens = ['', 'ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty',
'seventy', 'eighty', 'ninety']
def britify(n: int) -> str:
"""Convert n to British English representation, omit spaces etc."""
if n == 1000:
return 'onethousand'
else:
result = ''
hundred_count = n // 100
if hundred_count:
result += ones[hundred_count] + 'hundred'
mod_100 = n % 100
if hundred_count and n % 100:
result += 'and'
if mod_100 < 20:
result += ones[mod_100]
else:
result += tens[mod_100 // 10]
result += ones[n % 10]
return result
return sum([len(britify(x)) for x in range(1, limit + 1)])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("limit", help="limit, default to 5", default=5,
type=int, nargs='?')
args = parser.parse_args()
clock_start = datetime.now()
answer = solution(args.limit)
clock_end = datetime.now()
print("The answer is {} for input {}.".format(answer, args.limit))
print("Execution time was {}.".format(clock_end - clock_start))
| true |
d545d77e16fb6398a2cdb676095bba16fb056bed | Python | pastmax4/pyGraphTry20190104_01 | /Grafico03.py | UTF-8 | 990 | 3.390625 | 3 | [] | no_license | '''
Created on 14 dic 2017
@author: mpasteri
linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)[source]
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless endpoint is set to False. In that case, the sequence consists of all but the last of num + 1 evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
Su NumPy
http://www.physics.nyu.edu/pine/pymanual/html/chap3/chap3_arrays.html
'''
import pylab as myplt
import numpy
x = numpy.linspace(0,15,100)
a1 = 0.5
y1 = a1*x
a2 =1.0
y2 = a2*x
a3 = 1.5
y3 = a3*x
print('x, y')
for ind in range(0,100):
print(x[ind],y1[ind])
#myplt.axis([0, 100, 0, 100])
myplt.plot(x,y1)
myplt.plot(x,y2)
myplt.plot(x,y3)
myplt.grid(True)
myplt.show()
| true |
3554237813a66a8e5665996f7472159ff1f2d037 | Python | optionalg/programming-introduction | /Aula 07/Aula7-Lab-08.py | UTF-8 | 1,876 | 4.125 | 4 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Aula 7
# Laboratório
# Exercício 8
# Autor: Lucien Constantino
def get_ball_diameter():
while True:
try:
diameter = int(input("Digite o diâmetro da bola de boliche: "))
except NameError:
print("Valor inválido.")
continue
if diameter < 1 or diameter > 10000:
print("Diâmetro deve ser maior ou igual a 1, menor ou igual a 10.000")
continue
else:
break
return diameter
def get_box_dimensions():
while True:
try:
measures_input = input("Digite a altura, largura e profundidade - separados por espaço: ")
except (NameError, ValueError, AttributeError, UnboundLocalError):
print("Valor inválido.")
continue
measures_input = measures_input.split()
if len(measures_input) != 3:
print("É preciso digitar apenas Altura, largura e profundidade.")
continue
measures = map(int, measures_input)
for measure in measures:
if measure < 1 or measure > 10000:
print("Altura, largura e profundidade devem ser maiores ou igual a 1, menores ou igual a 10.000")
continue
break
return measures
def fits_inside_box(box_measures, ball_diameter):
fits = True
for measure in box_measures:
if ball_diameter > measure:
fits = False
break
return fits
assert fits_inside_box([3, 2, 5], 3) is False
assert fits_inside_box([5, 5, 5], 5) is True
assert fits_inside_box([15, 9, 10], 9) is True
assert fits_inside_box([10, 20, 30], 100) is False
ball_diameter = get_ball_diameter()
box_measures = get_box_dimensions()
result = ""
if fits_inside_box(box_measures, ball_diameter):
result = "S"
else:
result = "N"
print(result)
| true |
17e2f492dde53a8094ba6c57b4905bf3768b3f5c | Python | pavelov2013/dbParser | /main.py | UTF-8 | 922 | 2.53125 | 3 | [] | no_license | import openpyxl as opx
import os.path
import docx
from datetime import datetime
import time
start_time = datetime.now()
wb = opx.load_workbook("res.xlsx")
sheet = wb.active
directory = 'referats/'
files = os.listdir(directory)
for el in files:
sheet["A"+str(files.index(el)+1)].value = el
keys = list()
with open("keys.txt", encoding="utf-8") as file:
keys = [l.strip() for l in file]
for i in files:
iterator = 0
document = docx.Document("referats/"+ i)
doc = ""
info = ""
for el in document.paragraphs:
doc += el.text
doc = doc.lower()
for el in keys:
if el in doc:
#print("ind = " + i + "\nkey = " +el + "\n\n")
info += el+","
iterator+=1
sheet["Z"+str(files.index(i)+1)].value = info
sheet["AD"+str(files.index(i)+1)].value = iterator
wb.save("res.xlsx")
print(datetime.now() - start_time)
| true |
493fa6a14805de5a0b84c2d033677f4de3a2f2bb | Python | cheapthrillandwine/python_research | /Filters/mosaic.py | UTF-8 | 1,498 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import cv2
import numpy as np
# モザイク処理
def mosaic(img, alpha):
# 画像の高さと幅
w = img.shape[1]
h = img.shape[0]
# 縮小→拡大でモザイク加工
img = cv2.resize(img,(int(w*alpha), int(h*alpha)))
img = cv2.resize(img,(w, h), interpolation=cv2.INTER_NEAREST)
return img
def main():
# カスケード型識別器の読み込み
cascade = cv2.CascadeClassifier(r"C:/Users/NSW00_906882/Desktop/exercise/opencv-3.3.0/data/haarcascades/haarcascade_frontalface_default.xml")
# 動画の読み込み
# capture = cv2.VideoCapture(0)
capture = cv2.VideoCapture("IMG1.MOV", 0)
# 動画終了まで繰り返し
while(capture.isOpened()):
# フレーム取得
ret, frame = capture.read()
#グレースケール変換
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 顔領域の探索
face = cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3, minSize=(30, 30))
# 顔領域を赤色の矩形で囲む
for (x,y,w,h) in face:
# 顔部分を切り出してモザイク処理
frame[y:y+h, x:x+w] = mosaic(frame[y:y+h, x:x+w], 0.05)
# フレームを表示
cv2.imshow("Frame", frame)
# qキーが押されたら途中終了
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| true |
b90de18e012eda255456aaf55eeb1fa73a7dcfd5 | Python | leesohyeon/Python_ac | /1028/ifex/IfTest04.py | UTF-8 | 647 | 4.40625 | 4 | [] | no_license | """
사용자로부터 정수를 입력받아
정수의 부호에 따라 거북이를 움직여보자!
양수(100, 100)
0(100, 0)
음수(100, -100)
"""
import turtle as t
t.shape("turtle")
t.up()
t.goto(100, 100)
t.write("거북이가 여기로 오면, '양수' 입니다.")
t.goto(100, 0)
t.write("거북이가 여기로 오면, '0' 입니다.")
t.goto(100, -100)
t.write("거북이가 여기로 오면, '음수' 입니다.")
t.goto(0, 0)
t.down()
num = int(input("숫자를 입력하세요 : "))
if num > 0:
t.goto(100, 100)
if num == 0:
t.goto(100, 0)
if num < 0:
t.goto(100, -100)
t.exitonclick()
| true |
58cfcc7bdb90c7e7fc98bed30101c9f0b464d520 | Python | hdobrovo/RSV_aging | /Cotton Rat Fitting.py | UTF-8 | 5,603 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 26 16:20:28 2017
@author: sams club
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 3 17:37:11 2017
@author: sams club
"""
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import minimize
import matplotlib.pyplot as plt
params = np.ones(5)
# Parameter array
# data = np.loadtxt("Lung 3 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Lungs_14_Day_Old.csv", delimiter = ",")
data = np.loadtxt("Lung 28 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Lung Adult .csv", delimiter = ",")
# data = np.loadtxt("Trachea 3 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Trachea 14 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Trachea 28 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Trachea Adult Infection.csv", delimiter = ",")
# data = np.loadtxt("Nose 3 Day Old Infection.csv", delimiter = ",")
# data = np.loadtxt("Nose 14 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Nose 28 Day Old.csv", delimiter = ",")
# data = np.loadtxt("Nose Adult.csv", delimiter = ",")
# Importing data from a spreadsheet into a variable to used to call
# the data set.
x_data = data[:,][:,0]
# Initializing x variable to store 1st column of data, time.
y_data = data[:,][:,1]
# Initializing y variable to store 2nd column of data, viral count.
#model for reference
# y = T,E,I,V
# dydt[0] = dT
# dydt[1] = dE
# dydt[2] = dI
# dydt[3] = dV
# y[0] = T
# y[1] = E
# y[2] = I
# y[3] = V
# z[0] = B
# z[1] = k
# z[2] = d
# z[3] = p
# z[4] = c
def F(y,t):
"""
Defining function which uses simple viral model where:
dT/t = -BTV
dE/t = BTV - kE
dI/t = kE - dI
dV/t = pI- cV
The dydt array contains the values for the variables in order:
(dT/t,dE/t,dI/t,and dV/t)
The y array contains the values for the parameters in order:
(B,k,d,p, and c)
The z array contains values for the parameters in order:
(T,E,I, and V)
Needed for use in odeint command.
"""
global params
dydt = np.zeros_like(y)
dydt[0] = -params[0] * y[0] * y[3]
dydt[1] = params[0] * y[0] * y[3] - params[1] * y[1]
dydt[2] = params[1] * y[1] - params[2] * y[2]
dydt[3] = params[3] * y[2] - params[4] * y[3]
return dydt
t = np.linspace(0, 7, 600)
# Initializing time variable for a time span of 20 days beginning on day 1.
guess = np.array( [1e-5, 4, 4, 4e6, 5, 1e4] )
# Initializing guesses for parameters (pulled from handout on simple viral model)
# in order: B, k, d, p, c, and v0.
def SSR(guess):
"""
SSR(sum of squared residuals) is the objective function that will
later be minimized. The log of the integrated model and the original
data set are taken so to account for the logarithmic nature of the
data and the optimizer's tendency to overcompensate for large
differences.
SSR Formula = sum(y_prdicted - y_data)^2
"""
global params
params = guess[0:5]
y0 = [1, 0, 0, guess[5]]
# Initializing initial conditions- using 1st data point of RS virus data set
# as the initial value of virus. Initial conditions in order: T, E, I, V.
y_prediction = odeint(F,y0,x_data)
virus_prediction = (y_prediction[:,][:,3])
virus_prediction_log = np.log10(virus_prediction)
virus_data_log = np.log10(y_data)
#plt.plot(virus_prediction_log)
#plt.plot(virus_data_log,'r')
#plt.show(True)
#Prints plotting out in real time.
diff = virus_prediction_log - virus_data_log
return sum((diff)**2)
# result = minimize(SSR,guess, method = 'Nelder-Mead')
# result = minimize(SSR,guess, method = 'Powell')
# result = minimize(SSR,guess, method = 'CG')
# result = minimize(SSR,guess, method = 'BFGS')
# result = minimize(SSR,guess, method = 'Newton-CG')
result = minimize(SSR,guess, method = 'L-BFGS-B')
# result = minimize(SSR,guess, method = 'TNC')
# result = minimize(SSR,guess, method = 'COBYLA')
# result = minimize(SSR,guess, method = 'SLSQP')
# result = minimize(SSR,guess, method = 'dogleg')
# result = minimize(SSR,guess, method = 'trust-ncg')
print (result)
print (result.success)
# Finding the smallest SSR value and the corresponding parameter values.
# Printed for later reference.
params = result.x[0:5]
# Initializing an array to hold results of parameters obtained
# by optimiziation in order of : B, k, d, p, and c.
y0 = [1, 0, 0, result.x[5]]
y_fitted = odeint(F,y0,t)
virus_best_fit = y_fitted[:,][:,3]
# Obtaining predicted y values when the optimized parameters are used
# in the model. Virus_best_fit var is used to store the virus model as
# its in the 3rd index and listed 4th in T, E,I, and V.
optimal_B_value = result.x[0]
optimal_k_value = result.x[1]
optimal_d_value = result.x[2]
optimal_p_value = result.x[3]
optimal_c_value = result.x[4]
# Setting the parameters to the the returns of the optimized SSR.
print ("Optimal B value = ", optimal_B_value)
print ("Optimal k value = ", optimal_k_value)
print ("Optimal d value = ", optimal_d_value)
print ("Optimal p value = ", optimal_p_value)
print ("Optimal c value = ", optimal_c_value)
plt.plot(x_data,y_data, "o", label = "Dataset")
plt.plot(t,virus_best_fit, label = "Line of Best Fit")
plt.legend()
plt.xlabel("Time (days)")
ax = plt.gca()
ax.set_yscale('log')
plt.ylabel("Virus")
plt.title("RSV Count in Lungs of 28 Day Old Rats")
| true |
b545f00ca11459c59fa36c7ffb5a947175d8471a | Python | PengChen11/math-series | /math_series/series.py | UTF-8 | 1,337 | 3.46875 | 3 | [] | no_license | # function to calculate the nth fibonacci number. I hate using recursion for this task cause the big O is 2*n and when n goes above 30, it eats up all my computer's resources.
# The following solution's big O is only n-2. much faster.
# n starts with 0.
def fibonacci(n):
prev, nex = 0, 1
for i in range(n - 1):
prev, nex = nex, prev + nex
if n==0:
return prev
else:
return nex
# this is the calculation based on fibonacci's fomular.
# (1+5**0.5)/2 is called golden mean
# I don't know how python calculate floats, but these two method starts to give different value when n=72. feels like something wrong with acturacy for float calculation when it's really a big number.
# n starts with 0
def fibonacci_1(n):
return int((((1 + 5 ** 0.5) / 2)**n-(1-((1 + 5 ** 0.5) / 2))**n)/(5**0.5))
# function to calculate the nth lucas number.
# n starts with 0
def lucas(n):
prev, nex = 2, 1
for i in range(n - 1):
prev, nex = nex, prev + nex
if n==0:
return prev
else:
return nex
# function to calculate the nth number, based on 2 optional prams.
# if no optional prams input, then using fibonacci.
def sum_series(n,prev=0,nex=1):
for i in range(n - 1):
prev, nex = nex, prev + nex
if n==0:
return prev
else:
return nex
| true |
11189b978a9dbc57e0f4a6c2d128750e8f2d0906 | Python | eirvandelden/2II25 | /rdflib/database.py | UTF-8 | 7,000 | 2.578125 | 3 | [] | no_license | # functies die we nog nodig hebben:
# - Laatste X pages
##########################################################
##
## Imports
##
##########################################################
# Dependencies imports
import sha, re, os, globals
# Class imports
import usermethods
import documentmethods
import searchmethods
import tagmethods
#import commentmethods
##########################################################
##
## Class: Database
##
##########################################################
class database():
"""
Generally {type}methods-functions return a succes-boolean, and nothing else;
'get'-functions and search-functions are exceptions to this.
For the creators of {type}methods-modules:
Complementary properties are:
{user1} follows {user2 of tag} - {user2 of tag} followed_by {user1}
{user} interest {document} - {document} interests {user}
{user} owner {document} - {document} owned_by {user}
{user} contributor {document} - {document} contributed_to_by {user}
"""
##########################################################
## Initialisation
##########################################################
#def __init__(self):
#moet hier nog iets staan?
##########################################################
## Termination
##########################################################
def exitdb(self):
globals.writedb()
##########################################################
##
## Methods: Document
##
## (For methods to get the comments on a document, see Comment methods)
##########################################################
""" Used shorthands:
pid: person identifier
did: document identifier
"""
def new_document(self, pid, name, content):
return documentmethods.new(pid, name, content)
def edit_document(self, did, name, content):
return documentmethods.edit(did, name, content)
def get_document(self, did):
return documentmethods.get(did)
def get_documents_from_user(self, pid):
return documentmethods.getalluser(pid)
def get_last_x_dox_from_user(self, pid, number):
if len(get_documents_from_user(pid)) < number:
return get_documents_from_user(pid)
else:
return get_documents_from_user(pid)[:number]
def get_last_x_dox_general(self, number):
return documentmethods.getlastxgeneral(number)
def get_document_property(self, did, property):
return documentmethods.get_property_value(did, property)
def delete_document(self, did):
return documentmethods.delete(did)
##########################################################
##
## Methods: User
##
## (For methods to get the comments on a document, see Comment methods)
##########################################################
def new_user(self, name, password, email):
return usermethods.new(name, password, email)
def delete_user(self, id):
return usermethods.delete(id)
# for properties with only 1 value (for ex. name)
def set_user_property_value(self, id, property, value):
return usermethods.setpropertyvalue(id, property, value)
# for properties with multiple values (for ex. diplomas, although these aren't in here now)
def add_user_property_value(self, id, property, value):
return usermethods.addpropertyvalue(id, property, value)
def delete_user_property_value(self, id, property):
return usermethods.deletepropertyvalue(id, property)
def delete_specific_user_property_value(self, id, property, value):
return usermethods.deletespecific(id, property, value)
def get_all_user_info(self, id):
return usermethods.getallinfo(id)
def get_user_property_value(self, id, property):
return usermethods.getpropertyvalue(id, property)
def follow_user(self, follower_id, followed_id):
return usermethods.follow(follower_id, followed_id)
def get_userid_from_email(self, email):
return usermethods.fromemail(email)
def get_userid_from_other_property(self, property, value):
return usermethods.fromproperty(property, value)
##########################################################
##
## Methods: Search
##
## (For methods to get the comments on a document, see Comment methods)
##########################################################
def search_document(self, arg):
# geef arg van de vorm [eigenschap, waarde]
# returnt een array [document]
return searchmethods.documents(arg)
# nog te doen?
def search_user(self, arg):
# geef arg van de vorm [eigenschap, waarde]
# returnt een array [user]
return searchmethods.users(arg)
##########################################################
##
## Methods: Tags
##
## (Add a tag to a document by calling set_{type}_property)
##########################################################
def create_tag(self, tag):
return tagmethods.add(tag)
def tag_document(self, tag, documentid):
return tagmethods.tag(tag, documentid)
def remove_tag_document(self, tag, documentid):
return tagmethods.remove(tag, documentid)
def delete_tag(self, tag):
return tagmethods.delete(tag)
def follow_tag(self, id, tag):
return tagmethods.follow(id, tag)
def get_tagged(self, tag):
return tagmethods.get(tag)
##########################################################
##
## Methods: Comments
##
##
##########################################################
#def add_comment_user(self, comment, id, poster_id):
# return commentmethods.add(comment, id, poster_id)
#def add_comment_document(self, comment, document, poster_id):
# return commentmethods.add(comment, document, poster_id)
#def remove_comment(self, id):
# return commentmethods.remove(id)
#def get_comments_on_user(self, id):
# returnt een array [comment]
# return commentmethods.commentsonuser(id)
#def get_comments_on_document(self, id):
# returnt een array [comment]
# return commentmethods.commentsondocument(id)
#def get_comments_by_user(self, id):
# returnt een array [comment]
# return commentsmethods.commentsbyuser(id)=======
#return commentsmethods.commentsbyuser(id)
### Termination
#
#
def exitdb(self):
globals.writedb()
if __name__== '__main__':
db = database()
#print db.new_user('jippie', 'asdf', 'geb@aren.taal')
print db.set_user_property_value('1', 'email', 'asdf@asdf.asdf')
print 'getvalue email:'
print db.get_user_property_value('1', 'email')
print 'delete:'
print db.delete_specific_user_property_value('1', 'email', 'asdf@asdf.asdf')
print 'setvalue email what:'
print db.set_user_property_value('1', 'asdf', 'what@asdf.asdf')
print 'getvalue asdf:'
print db.get_user_property_value('1', 'asdf')
exit()
| true |
0e57f971ec1142453ba1f7ba0e2d52db19a45f66 | Python | ecekalem/past-courses | /BIN515_Structural_Bioinformatics/takeHomeFinal/q2/.ipynb_checkpoints/chainSeparator-checkpoint.py | UTF-8 | 709 | 2.78125 | 3 | [] | no_license | import sys
def chainSeparator(pdbFileName):
"""
Inputs the pdbFile and returns
the chains as separated
"""
chains = {}
with open(pdbFileName) as pdbFile:
for line in pdbFile:
if line.startswith('ATOM'):
currentChain = line[21]
if currentChain in chains:
chains[currentChain].append(line)
else:
chains[currentChain] = [line]
return chains
chainDict = chainSeparator(sys.argv[1])
for chain in chainDict:
with open(sys.argv[1]+'_chain'+chain+'.pdb', "w") as pdbFile:
for line in chainDict[chain]:
pdbFile.write(line)
pdbFile.write("END\n")
| true |
934789a0cb3a6254a6d6e65c5ed20422324fe2c6 | Python | noe-d/synthetic_test | /synthetic/net.py | UTF-8 | 363 | 3.03125 | 3 | [
"MIT"
] | permissive | import igraph
def load_net(file_path, directed):
net = igraph.Graph.Load(file_path)
# force directed / undirected
if net.is_directed() and not directed:
net = net.as_undirected()
if not net.is_directed() and directed:
net = net.as_directed()
net = net.simplify()
assert (net.is_directed() == directed)
return net
| true |
bf9005162393d20b4169f9b854a0431d7099b5ac | Python | ozelentok/ProjectEulerSolutions | /pe5.py | UTF-8 | 693 | 3.390625 | 3 | [] | no_license | #!/usr/bin/python
import ozLib
inc = 1
num = 0
found = 0
for prime in ozLib.primesTo(20):
inc *= prime
while not found:
num += inc
for divider in range(2, 21):
if(num%divider != 0):
found = 0
break
found = 1
if found:
print num
else:
print "No number like that exists"
""" less efficent way
i = 0
found = 0
while not found:
i += 20
for divider in range(2, 16):
if(i % divider != 0):
found = 0
break
found = 1
found = 0
inc = i
while not found:
i += inc # found by using same program on smaller range
for divider in range(2, 21):
if(i%divider != 0):
found = 0
break
found = 1
if found:
print i
else:
print "No number like that exists"
"""
| true |
074b56f1a5826f1cc260b70317abd0dc3ffa265f | Python | Uttam1982/PythonTutorial | /08-Python-DataTypes/Sets/12-Iterating-set.py | UTF-8 | 149 | 4.1875 | 4 | [] | no_license | # Iterating Through a Set
# We can iterate through each item in a set using a for loop.
my_set = {12,23,34,45,56,67,78}
for i in my_set:
print(i) | true |
198cc4db8f2ae71fa539eb1f5590e3e671f782bd | Python | felipecuetor/coursera_downloader_backend | /populate_coursera_db.py | UTF-8 | 5,881 | 2.78125 | 3 | [] | no_license | import os
import requests
import datetime
import json
#Allows you to manually execute the data generation algorithm that analyzes all courses within the data folder in the root of the directory.
#This algorithm recursivly goes deeper and deeper within the folder system searching for the files.
def course_static_directory_analyzer_remote(course_path, recursize_directory_path, recursive_location_path, course_id, existing_lessons, existing_lessons_files ):
course_directory = os.listdir(course_path)
existing_lessons_in_directory={}
existing_lessons_in_directory_files={}
for element in course_directory:
element=str(element)
dir2 = course_path+str(element)
if os.path.isdir(dir2):
folder_name_division=""
split1 = element.split("_")
if split1[0].replace('.','',1).isdigit():
folder_name_division=split1[0]+">>>"+element[3:]
else:
folder_name_division=element
existing_lessons = course_static_directory_analyzer_remote(dir2+"/", recursize_directory_path+">>>"+str(element), recursive_location_path+">>>"+folder_name_division, course_id, existing_lessons, existing_lessons_files)[0]
else:
file_name_division=""
file_name=""
split1 = element.split("_")
if split1[0].replace('.','',1).isdigit():
file_name_division=split1[0]+">>>"+element[3:]
file_name=element[2:]
lesson_name=element[3:]
lesson_identifier=split1[0]
else:
file_name_division=element
file_name=element
lesson_name=file_name_division
lesson_identifier=file_name
file_name = element
file_directory_path = recursize_directory_path+">>>"+str(element)
file_location_path = recursive_location_path+">>>"+file_name_division
#url = 'http://localhost:8080/files/'
payload = {
'file_name': file_name,
'file_course_location': file_location_path,
'file_directory': file_directory_path,
'course_id':course_id
}
#r = requests.post(url, data=payload)
if lesson_identifier not in existing_lessons_in_directory:
existing_lessons_in_directory_files[lesson_identifier]=[]
existing_lessons_in_directory_files[lesson_identifier].append(payload)
existing_lessons_in_directory[lesson_identifier]={"lesson_name":lesson_name,"lesson_identifier":lesson_identifier}
existing_lessons.append(existing_lessons_in_directory)
existing_lessons_files.append(existing_lessons_in_directory_files)
return (existing_lessons, existing_lessons_files)
def course_element_generator(course_name, course_download_date, course_revised, course_download_available, course_error):
url = 'http://localhost:8080/courses/'
payload = {
'course_name': course_name,
'course_download_date': course_download_date,
'course_revised': course_revised,
'course_download_available':course_download_available,
'course_error':course_error
}
r = requests.post(url, data=payload)
return r.content
def detect_language_in_name(text, course_id):
period_split = text.split(".")
if(len(period_split)==3):
url = 'http://localhost:8080/courselanguage/'
payload = {
'course_id': course_id,
'language': period_split[1]
}
r = requests.post(url, data=payload)
def find_course_languages(course_info):
url = 'http://localhost:8080/course_files/?course_id='+str(course_info["id"])
r = requests.get(url)
course_files = r.content
for file in json.loads(course_files):
detect_language_in_name(file["file_name"], course_info["id"])
def dump_file_list(file_list, course_id, lesson_id):
for file in file_list:
url = 'http://localhost:8080/files/'
payload = file
payload["lesson_id"] = lesson_id
r = requests.post(url, data=payload)
def dump_lesson_list(existing_lessons,course_id, existing_lessons_files):
current_list = list(reversed(existing_lessons))
directory_lesson_file_list = list(reversed(existing_lessons_files))
previous_element_id = 0
for id, current in enumerate(current_list):
existing_lessons_in_directory_keys=list(reversed(sorted(current.keys())))
lesson_file_list = directory_lesson_file_list[id]
for key in existing_lessons_in_directory_keys:
current_lesson = current[key]
current_lesson_file_list = lesson_file_list[key]
url = 'http://localhost:8080/lesson/'
payload = current_lesson
payload["course_id"]=course_id
payload["next_lesson_id"]=previous_element_id
r = requests.post(url, data=payload)
previous_element = json.loads(r.content)
previous_element_id = previous_element["id"]
dump_file_list(current_lesson_file_list, course_id, previous_element_id)
dir1 = "./data/"
course_directory = os.listdir(dir1)
for element in course_directory:
dir2 = dir1+str(element)
existing_lessons=[]
existing_lessons_files=[]
if os.path.isdir(dir2):
course_info = json.loads(course_element_generator(element, datetime.datetime.now(), False, False, False))
print("Course post:"+str(course_info))
lessons_files_tuple = course_static_directory_analyzer_remote(dir2+"/", element, element, course_info["id"],existing_lessons,existing_lessons_files)
print("File post")
dump_lesson_list(existing_lessons,course_info["id"],existing_lessons_files)
print("Lesson post")
find_course_languages(course_info)
print("language post")
| true |
0a297a6541df27c0136e015664e791bfa63b787e | Python | MattRooke/Python-Sandbox | /CP1404/in_class_examples/my_own_exceptions.py | UTF-8 | 207 | 3.015625 | 3 | [] | no_license | class MyEception (Exception):
"""My custom exeption"""
pass
class BlankInput (Exception):
"""Blank Input Exception"""
pass
user_input = input(">")
if not user_input:
raise BlankInput | true |
08dd8fb665c119afef6fce10d351a00ea585da18 | Python | Trinity-Armstrong/ICS3U-Assignment7-Python | /largest_element.py | UTF-8 | 1,158 | 4.5625 | 5 | [] | no_license | #!/usr/bin/env python3
# Created by: Trinity Armstrong
# Created on: December 2019
# This program identifies the largest number in a list
def identify(array_of_numbers):
# This function identifies the largest number in a list
largest_number = 0
# Process
for counter in range(0, len(array_of_numbers)):
if largest_number < array_of_numbers[counter]:
largest_number = array_of_numbers[counter]
return largest_number
def main():
# This function gets a list of 5 numbers from user and prints the largest
# Instructions
print("I will help you identify the largest number in a list of 5 numbers.\
")
print("")
# Array declaration
number_list = []
# Process
try:
for loop_counter in range(5):
number = int(input("Enter an integer: "))
number_list.append(number)
# Call function
largest = identify(number_list)
# Output
print("")
print("The largest number in this list is", largest)
except Exception:
print("")
print("This is not an integer, try again.")
if __name__ == "__main__":
main()
| true |
9d2c54f70939d5662353de464e4018c0bd58c231 | Python | Resetand/opencv-labs | /lib/utils.py | UTF-8 | 1,120 | 2.734375 | 3 | [] | no_license | from urllib.request import urlopen
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Utils:
@staticmethod
def fetch_image(url, flag=cv2.IMREAD_COLOR):
req = urlopen(url)
image = np.asarray(bytearray(req.read()), dtype="uint8")
return cv2.imdecode(image, flag)
@staticmethod
def capture_webcam(img_handler=(lambda frame: frame), winname='Open-cv'):
""" @param img_hwandler (Mat) => Mat: """
cap = cv2.VideoCapture(0)
while (True):
_, frame = cap.read()
frame = img_handler(frame)
pressed_key = cv2.waitKey(5) & 0xFF
cv2.imshow(winname, frame)
if pressed_key == 27:
break
cv2.destroyAllWindows()
cap.release()
@staticmethod
def show_image_compare(orig, result, title=''):
figure = plt.figure()
figure.canvas.set_window_title(title)
figure.add_subplot(1, 2, 1)
plt.imshow(orig)
figure.add_subplot(1, 2, 2)
plt.imshow(result)
plt.show(block=True)
plt.waitforbuttonpress()
| true |
c32cf0eeb4d5231512cb5db9caf78dec19b86f7e | Python | IngBiancoRoberto/PressReview | /webread_test.py | UTF-8 | 4,476 | 2.53125 | 3 | [] | no_license | #
#import requests
#import sys
import webread
import unittest
class TestWebread(unittest.TestCase):
def test_GenericReadFailed(self):
page = webread.generic_read(website_url='http://xxx')
# assert
self.assertEqual(page,[])
def test_RepubblicaRead(self):
media = 2
no_media = 1
websites,titles,links = webread.repubblica_read(media=media,no_media=no_media)
self.assertEqual(len(websites),media+no_media)
self.assertEqual(websites[0],'Repubblica')
self.assertEqual(len(titles),media+no_media)
self.assertEqual(len(links),media+no_media)
def test_RepubblicaManyRead(self):
media = 100
no_media = 0
websites,titles,links = webread.repubblica_read(media=media,no_media=no_media)
self.assertEqual(len(websites)>0,True)
self.assertEqual(websites[0],'Repubblica')
self.assertEqual(len(titles)>0,True)
self.assertEqual(len(links)>0,True)
def test_RepubblicaFailedRead(self):
websites, titles, links= webread.repubblica_read(website_url='http://xxx')
#
self.assertEqual(websites,[],'websites should be empty')
self.assertEqual(titles,[],'Titles should be empty')
self.assertEqual(links,[],'Links should be empty')
def test_CorriereRead(self):
websites, titles,links = webread.corriere_read(xmedium=2,medium=2)
self.assertEqual(len(websites),4)
self.assertEqual(websites[0],'Corriere')
self.assertEqual(len(titles),4)
self.assertEqual(len(links),4)
def test_CorriereManyRead(self):
# to test extreme values for article numbers
websites, titles,links = webread.corriere_read(xmedium=100,medium=0)
self.assertEqual(len(websites)>0,True)
self.assertEqual(websites[0],'Corriere')
self.assertEqual(len(titles)>0,True)
self.assertEqual(len(links)>0,True)
def test_CorriereFailedRead(self):
websites, titles, links= webread.corriere_read(website_url='http://xxx')
#assert
self.assertEqual(websites,[],'websites should be empty')
self.assertEqual(titles,[],'Titles should be empty')
self.assertEqual(links,[],'Links should be empty')
def test_BBCNewsRead(self):
websites, titles,links = webread.bbcnews_read()
self.assertEqual(len(websites),2)
self.assertEqual(websites[0],'BBC News')
self.assertEqual(len(titles),2)
self.assertEqual(len(links),2)
def test_BBCNewsFailedRead(self):
websites, titles, links= webread.bbcnews_read(website_url='http://xxx')
#assert
self.assertEqual(websites,[],'websites should be empty')
self.assertEqual(titles,[],'Titles should be empty')
self.assertEqual(links,[],'Links should be empty')
def test_Sole24OreRead(self):
websites, titles,links = webread.sole24ore_read(narts=2)
self.assertEqual(len(websites),2)
self.assertEqual(websites[0],'Sole 24 Ore')
self.assertEqual(len(titles),2)
self.assertEqual(len(links),2)
def test_Sole24OreManyRead(self):
websites, titles,links = webread.sole24ore_read(narts=100)
self.assertEqual(len(websites)>0,True)
self.assertEqual(websites[0],'Sole 24 Ore')
self.assertEqual(len(titles)>0,True)
self.assertEqual(len(links)>0,True)
def test_Sole24OreFailedRead(self):
websites, titles, links= webread.sole24ore_read(website_url='http://xxx')
#assert
self.assertEqual(websites,[],'websites should be empty')
self.assertEqual(titles,[],'Titles should be empty')
self.assertEqual(links,[],'Links should be empty')
def test_NYTimesRead(self):
websites, titles,links = webread.nytimes_read(narts=2)
self.assertEqual(len(websites),2)
self.assertEqual(websites[0],'NY Times')
self.assertEqual(len(titles),2)
self.assertEqual(len(links),2)
def test_NYTimesFailedRead(self):
websites, titles, links= webread.nytimes_read(website_url='http://xxx')
#assert
self.assertEqual(websites,[],'websites should be empty')
self.assertEqual(titles,[],'Titles should be empty')
self.assertEqual(links,[],'Links should be empty')
if __name__ == '__main__':
unittest.main() | true |
cecb65318c6f77b30a92c77d40091ce3dbeb35e1 | Python | NinaWie/pitch_type | /1_Pose_Estimation/time_probe.py | UTF-8 | 2,319 | 2.59375 | 3 | [] | no_license | import time
from config_reader import config_reader
param_, model_ = config_reader()
TIME_PRINT = param_['print_tictoc'] is '1'
TIME_PROBE_ID = 0
TIME_STACK = []
TEXT_PADDING = 24
FIRST_STAMP = None
PREV_STAMP = None
class TimeStamp:
def __init__(self, label):
self.children = []
self.elapsed = -1
self.begun = time.time()
self.label = label
def pretty(self, level=0, percentage=100.0):
tabbing = ''.join(level * [' '])
equal_padding = ''.join((TEXT_PADDING - len(self.label)) * [' '])
result = '| %s|__%s%s: %.2f%% (%.6fs)' % (tabbing, self.label, equal_padding, percentage, self.elapsed)
return result
def time_printout(stamp, level=0):
accounted_percentage = 0.0
for child in stamp.children:
elapsed_percentage = child.elapsed / stamp.elapsed* 100
if TIME_PRINT: print child.pretty(level, elapsed_percentage)
time_printout(child, level + 1)
accounted_percentage += elapsed_percentage
# TODO: percentage unaccounted for
if len(stamp.children):
tabbing = ''.join(level * [' '])
if TIME_PRINT: print '| %s---(%.2f%% unaccounted)' % (tabbing, 100 - accounted_percentage)
def time_summary():
global FIRST_STAMP, PREV_STAMP, TIME_STACK
if TIME_PRINT: print '| TICTOC RESULTS:'
if TIME_PRINT: print '|'
if TIME_PRINT: print FIRST_STAMP.pretty()
time_printout(FIRST_STAMP, 1)
FIRST_STAMP = None
PREV_STAMP = None
TIME_STACK = []
def tic(label):
global FIRST_STAMP, PREV_STAMP
stamp = TimeStamp(label)
if FIRST_STAMP is None:
FIRST_STAMP = stamp
PREV_STAMP = stamp
else:
# Stamp becomes child of immediate parent; child takes over current parent
PREV_STAMP.children.append(stamp)
PREV_STAMP = stamp
TIME_STACK.append(stamp)
def toc(label):
global PREV_STAMP, TIME_STACK
last_label = TIME_STACK[len(TIME_STACK) - 1].label
if last_label != label:
raise Exception('Inconsistent tic tocs: "%s" -> "%s"' % (label, last_label))
stamp = TIME_STACK.pop()
stamp.elapsed = time.time() - stamp.begun
# Relinquish current parent from its role; its parent becomes the new parent
if len(TIME_STACK):
PREV_STAMP = TIME_STACK[len(TIME_STACK) - 1]
| true |
082f6ca1fd647be942474006704be400236686ee | Python | gustavla/vision-research | /detector/histogram_of_detections.py | UTF-8 | 3,592 | 2.578125 | 3 | [] | no_license |
# Let's pad a little bit, so that we get the features correctly at the edges
import argparse
import matplotlib.pylab as plt
import amitgroup as ag
import gv
import numpy as np
def main():
parser = argparse.ArgumentParser(description='Train mixture model on edge data')
parser.add_argument('model', metavar='<model file>', type=argparse.FileType('rb'), help='Filename of the model file')
parser.add_argument('mixcomp', metavar='<mixture component>', type=int, help='mix comp')
parser.add_argument('--negatives', action='store_true', help='Analyze n')
args = parser.parse_args()
model_file = args.model
mixcomp = args.mixcomp
negatives = args.negatives
# Load detector
detector = gv.Detector.load(model_file)
llhs = calc_llhs(detector, not negatives, mixcomp)
plt.hist(llhs, 10)
plt.show()
def calc_llhs(detector, positives, mixcomp):
padding = 0
if not positives:
np.random.seed(0)
originals, bbs = gv.voc.load_negative_images_of_size('bicycle', detector.kernel_size, count=50, padding=padding)
else:
profiles = map(int, open('profiles.txt').readlines())
originals, bbs = gv.voc.load_object_images_of_size_from_list('bicycle', detector.unpooled_kernel_size, profiles, padding=padding)
print "NUMBER OF IMAGES", len(originals)
limit = None
reses = []
llhs = []
# Extract features
for i in xrange(len(originals)):
im = originals[i]
grayscale_img = im.mean(axis=-1)
bb = bbs[i]
#edges = detector.extract_pooled_features(im)
# Now remove the padding
#edges = edges[padding:-padding,padding:-padding]
#edgemaps.append(edges)
#plt.imshow(im)
#plt.show()
# Check response map
print "calling response_map", im.shape, mixcomp
res, small = detector.response_map(grayscale_img, mixcomp)
# Normalize
xx = (res - res.mean()) / res.std()
print 'small', small.shape
# Check max at the center of the bounding box (bb)
ps = detector.settings['pooling_size']
m = int((bb[0]+bb[2])/ps[0]//2), int((bb[1]+bb[3])/ps[1]//2)
#m = res.shape[0]//2, res.shape[1]//2
s = 2
#print 'factor', self.factor(
#print 'ps', ps
#print 'im', im.shape
#print 'res', res.shape
#print m
top = xx[max(0, m[0]-s):min(m[0]+s, res.shape[0]), max(0, m[1]-s):min(m[1]+s, res.shape[1])].max()
llhs.append(top)
if 1:
if limit is not None:
plt.subplot(3, 6, 1+2*i)
plt.imshow(im, interpolation='nearest')
plt.subplot(3, 6, 2+2*i)
plt.imshow(res, interpolation='nearest')
plt.colorbar()
plt.title("Top: {0:.2f} ({1:.2f})".format(top, res.max()))
elif False:#top < -5000:
#plt.subplot(3, 6, 1+2*i)
plt.subplot(1, 2, 1)
plt.imshow(im, interpolation='nearest')
#plt.subplot(3, 6, 2+2*i)
plt.subplot(1, 2, 2)
plt.imshow(res, interpolation='nearest')
plt.colorbar()
#plt.title("{0}".format(i))
plt.title("Top: {0:.2f} ({1:.2f})".format(top, res.max()))
plt.show()
#print llhs
if 0:
if limit is not None:
plt.show()
else:
plt.hist(llhs, 10)
plt.show()
return np.asarray(llhs)
if __name__ == '__main__':
main()
| true |
f3a7da9891214b3a84bf6d9c85a8b06ea9dc08a4 | Python | tongwang/django-sugar | /sugar/cache/middleware.py | UTF-8 | 1,543 | 2.5625 | 3 | [] | no_license | from django.conf import settings
from django.utils.cache import patch_cache_control
class HTTPCacheControlMiddleware(object):
"""
Simple middleware which sets HTTP Cache-Control headers without all of the
other overhead of django.middleware.cache.UpdateCacheMiddleware. This is
intended for use with a front-end accelerator such as Varnish when you
want Django to set cache policy but not actually cache responses.
Basic rules:
1. To avoid accidental leaks of private information only anonymous
requests will be updated
2. We only set headers for successful GET requests
3. We don't touch requests which already have a Cache-Control header
Usage:
1. Add "sugar.cache.middleware.HTTPCacheControlMiddleware" to your
MIDDLEWARE_CLASSES
2. Add a dictionary to settings.py which has the values you want::
DEFAULT_HTTP_CACHE_CONTROL = dict(public=True, max_age=300)
"""
def __init__(self):
self.cache_control_args = getattr(settings, "DEFAULT_HTTP_CACHE_CONTROL", {})
def process_response(self, request, response):
if hasattr(request, "user") and not request.user.is_anonymous():
return response
if request.method != 'GET':
return response
if not response.status_code == 200:
return response
if response.has_header("Cache-Control"):
return response
patch_cache_control(response, **self.cache_control_args)
return response
| true |
ab09ee82aa8ae0a8b59fc18c05f63529b69a3c2a | Python | daniel-reich/ubiquitous-fiesta | /yiEHCxMC9byCqEPNX_20.py | UTF-8 | 185 | 3.25 | 3 | [] | no_license |
def is_palindrome(p):
p = "".join([let.lower() for let in p if let.isalpha()])
if len(p) < 2:
return True
if p[0] != p[-1]:
return False
return is_palindrome(p[1:-1])
| true |
6e31e41e51cf406bf240423ed7cff5b6cb5199b0 | Python | brainmentorspvtltd/RDE_Python_2021 | /PythonSectionA/dict_exercise_2.py | UTF-8 | 734 | 3.28125 | 3 | [] | no_license | data = [
{"name":"Ram","branch":"IT","marks":{"math":67,"phy":76}},
{"name":"Shyam","branch":"CS","marks":{"chem":91,"bio":92}},
{"name":"Mohan","branch":"IT","marks":{"math":55,"c++":60}},
{"name":"Aman","branch":"CS","marks":{"java":60,"math":59}},
{"name":"Kunal","branch":"IT","marks":{"math":37,"phy":84}},
]
name = input("Enter Student Name : ")
marks = 0
for i in range(len(data)):
if data[i]["name"] == name:
# s1 = data[i]["marks"]["math"]
# s2 = data[i]["marks"]["phy"]
# print("Total Marks",s1 + s2)
# break
for key in data[i]["marks"]:
marks += data[i]["marks"][key]
print("Total Marks of {} is {}".format(name, marks))
| true |
c9a1d9a3ca02b015a952fc8cd403a3a47cc611f9 | Python | MinghuiGao/py_cpu_load_info | /CPUAnylasis.py | UTF-8 | 544 | 3.171875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
# https://blog.csdn.net/qq_29721419/article/details/71638912
name_list = ['Monday', 'Tuesday', 'Friday', 'Sunday']
num_list = [1.5, 0.6, 7.8, 6]
num_list1 = [1, 2, 3, 1]
plt.bar(range(len(num_list)), num_list, label='boy', fc='y')
plt.bar(range(len(num_list)), num_list1, bottom=num_list, label='girl', tick_label=name_list,
fc='r')
plt.bar(range(len(num_list)), num_list1, bottom=num_list, label='robot', tick_label=name_list,
fc='r')
plt.bar()
plt.legend()
plt.show()
| true |
62f4f1137c19139be320bade2aa4281fdb81dada | Python | TinyHandsome/BookStudy | /1-books/book2_TensorFlow实战Google深度学习框架(第二版)/tensorboard_test/tensorboard_test8.py | UTF-8 | 6,121 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: tensorboard_test8.py
@time: 2019/5/13 9:34
@desc: 在生成好辅助数据之后,以下代码展示了如何使用TensorFlow代码生成PROJECTOR所需要的日志文件来可视化MNIST测试数据在最后的输出层向量。
"""
import tensorflow as tf
from BookStudy.book2 import mnist_inference
import os
# 加载用于生成PROJECTOR日志的帮助函数。
from tensorflow.contrib.tensorboard.plugins import projector
from tensorflow.examples.tutorials.mnist import input_data
# 和前面中类似地定义训练模型需要的参数。这里我们同样是复用mnist_inference过程。
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
# 可以通过调整这个参数来控制训练迭代轮数。
TRAINING_STEPS = 10000
MOVING_AVERAGE_DECAY = 0.99
# 和日志文件相关的文件名及目录地址。
LOG_DIR = './log3'
SPRITE_FILE = 'D:/Python3Space/BookStudy/book2/tensorboard_test/log2/mnist_sprite.jpg'
META_FILE = 'D:/Python3Space/BookStudy/book2/tensorboard_test/log2/mnist_meta.tsv'
TENSOR_NAME = 'FINAL_LOGITS'
# 训练过程和前面给出的基本一致,唯一不同的是这里还需要返回最后测试数据经过整个
# 神经网络得到的输出矩阵(因为有很多张测试图片,每张图片对应了一个输出层向量,
# 所以返回的结果是这些向量组成的矩阵。
def train(mnist):
# 输入数据的命名空间。
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
y = mnist_inference.inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)
# 处理滑动平均的命名空间。
with tf.name_scope("moving_average"):
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
# 计算损失函数的命名空间。
with tf.name_scope("loss_function"):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
# 定义学习率、优化方法及每一轮执行训练操作的命名空间。
with tf.name_scope("train_step"):
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase=True
)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variable_averages_op]):
train_op = tf.no_op(name='train')
# 训练模型
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
if i % 1000 == 0:
print("After %d training step(s), loss on training batch is %g." % (i, loss_value))
# 计算MNIST测试数据对应的输出层矩阵。
final_result = sess.run(y, feed_dict={x: mnist.test.images})
# 返回输出层矩阵的值。
return final_result
# 生成可视化最终输出层向量所需要的日志文件。
def visualisation(final_result):
# 使用一个新的变量来保存最终输出层向量的结果。因为embedding是通过TensorFlow中
# 变量完成的,所以PROJECTOR可视化的都是TensorFlow中的变量。于是这里需要新定义
# 一个变量来保存输出层向量的取值。
y = tf.Variable(final_result, name=TENSOR_NAME)
summary_writer = tf.summary.FileWriter(LOG_DIR)
# 通过projector.ProjectorConfig类来帮助生成日志文件。
config = projector.ProjectorConfig()
# 增加一个需要可视化的embedding结果。
embedding = config.embeddings.add()
# 指定这个embedding结果对应的TensorFlow变量名称。
embedding.tensor_name = y.name
# 指定embedding结果所对应的原始数据信息。比如这里指定的就是每一张MNIST测试图片
# 对应的真实类别。在单词向量中可以是单词ID对应的单词。这个文件是可选的,如果没有指定
# 那么向量就没有标签。
embedding.metadata_path = META_FILE
# 指定sprite图像。这个也是可选的,如果没有提供sprite图像,那么可视化的结果
# 每一个点就是一个小圆点,而不是具体的图片。
embedding.sprite.image_path = SPRITE_FILE
# 在提供sprite图像时,通过single_image_dim可以指定单张图片的大小。
# 这将用于从sprite图像中截取正确的原始图片。
embedding.sprite.single_image_dim.extend([28, 28])
# 将PROJECTOR所需要的内容写入日志文件。
projector.visualize_embeddings(summary_writer, config)
# 生成会话,初始化新声明的变量并将需要的日志信息写入文件。
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, os.path.join(LOG_DIR, "model"), TRAINING_STEPS)
summary_writer.close()
# 主函数先调用模型训练的过程,再使用训练好的模型来处理MNIST测试数据,
# 最后将得到的输出层矩阵输出到PROJECTOR需要的日志文件中。
def main(argc=None):
mnist = input_data.read_data_sets('D:/Python3Space/BookStudy/book2/MNIST_data', one_hot=True)
final_result = train(mnist)
visualisation(final_result)
if __name__ == '__main__':
main() | true |
3f138c0e40eea242f31aa7eb8bf11722f4550e97 | Python | alexeyche/alexeyche-junk | /bm/activation.py | UTF-8 | 2,070 | 3.0625 | 3 | [] | no_license | import numpy as np
from common_check import grad_check
class Activation(object):
def __call__(self, x):
raise NotImplementedError
def grad(self, x):
raise NotImplementedError
def approx_grad(self, x, epsilon=1e-05):
dx = np.zeros(x.shape)
for i in xrange(dx.shape[0]):
for j in xrange(dx.shape[1]):
de = np.zeros(dx.shape)
de[i, j] = epsilon
lo = self(x - de)
ro = self(x + de)
dx[i, j] = np.sum((ro - lo)/(2.0*epsilon))
return dx
class ClipActivation(Activation):
def __call__(self, x):
return np.clip(x, 0.0, 1.0)
# return np.clip(x + 0.1*np.random.randn(*x.shape), 0.0, 1.0)
def grad(self, x):
dx = np.ones(x.shape)
dx[np.where(x < 0.0)] = 0.0
dx[np.where(x > 1.0)] = 0.0
return dx
class ExpClipActivation(Activation):
def __call__(self, x):
return np.exp(np.clip(x, 0.0, 1.0))
def grad(self, x):
dx = np.exp(x)
dx[np.where(x < 0.0)] = 0.0
dx[np.where(x > 1.0)] = 0.0
return dx
class ExpActivation(Activation):
def __call__(self, x):
return np.exp(x)
def grad(self, x):
return np.exp(x)
class SoftplusActivation(Activation):
def __call__(self, x):
return np.log(1.0 + np.exp(x-1.0))
def grad(self, x):
return 1.0/(1.0 + np.exp(-(x-1.0)))
class SigmoidActivation(Activation):
def __call__(self, x):
v = 1.0/(1.0 + np.exp(-x))
# return np.floor(np.random.random(v.shape) + v)
return v
def grad(self, x):
v = 1.0/(1.0 + np.exp(-x))
return v * (1.0 - v)
def test_act_grad(act, x, epsilon=1e-05, tol=1e-05, fail=True):
dy = act.grad(x)
dy_approx = act.approx_grad(x, epsilon=epsilon)
grad_check(dy, dy_approx, "act", fail, tol)
return dy, dy_approx
if __name__ == '__main__':
test_act_grad(
ExpClipActivation(),
np.random.random((10, 10))
) | true |
1c9e3c90e0eb3de81b9d1403d5c5f2219bcf6ec0 | Python | christopherwebb/sainsbury_test | /tests/test_parser.py | UTF-8 | 1,081 | 2.796875 | 3 | [] | no_license | import unittest
from pyquery import PyQuery as pq
from SiteCrawler.Parser import MasterPageParser, ProductPageParser
class TestMasterPageParser(unittest.TestCase):
def setUp(self):
self.parser = MasterPageParser('hello')
def test_find_product_page_links(self):
self.parser.py_query = pq('<ul class="productLister"><li><h3 class="productInfo"><a href="blah"></h3></li></ul>')
results = self.parser.GetResults()
self.assertEqual(results, ['blah'])
class TestProductPageParser(unittest.TestCase):
def setUp(self):
self.parser = ProductPageParser('hello')
def test_description(self):
self.parser.py_query = pq('<div><h3 class="productDataItemHeader">Description</h3><div><p>Apricots</p></div></div>')
results = self.parser.GetResults()
self.assertEqual(results['description'], 'Apricots')
def test_finds_unit_price(self):
self.parser.py_query = pq('<div class="pricing"><div class="pricePerUnit">$3.45</div></div>')
results = self.parser.GetResults()
self.assertEqual(results['unit_price'], 3.45)
| true |
90dd8e9237bee343f75f70fabe2feaa91e4959ff | Python | Acrylami/ABYSS | /rooms.py | UTF-8 | 3,606 | 3.34375 | 3 | [] | no_license | # all room details are in this file
# room details have to be filled in
import items
room_main_door = {
'name': 'main door',
'description': 'The only exit. You have to get out.',
'door': False,
'opened': False,
}
room_lobby = {
'name': 'main lobby',
'description': """The lobby of your home. The lights are bright and unforgiving, hurting your eyes.
An ornate coatrack stands in one corner of the room, at the foot of a winding staircase.
A photo is mounted on the wall. """,
'items': [],
'items_not': [items.item_rack],
'exits': {'west': 'kitchen', 'east': 'bathroom', 'up': 'stairs', 'south': 'main door'},
'door': True,
}
room_kitchen = {
'name': 'kitchen',
'description': """The room is spacious yet cluttered, dirty plates sit on the kitchen counter,
pots and pans hang above the stove and an array of empty bottles litter the floor and worktop.
You drank too much last night. There is an oven, fridge and a sink here.""",
'items': [items.item_riddle_clock, ],
'items_not': [items.item_oven, items.item_fridge, items.item_sink,],
'exits': {'east': 'lobby',},
'door': False,
}
room_bathroom = {
'name': 'bathroom',
'description': """An ornate bath is the centrepiece of this room, made of marble.
The toilet and sink sit towards the right of the room. The basin of the sink has a faint pink tinge,
and the floor is damp as though recently cleaned. Above the sink is a cabinet.
The smell of bleach is thick in this room..""",
'items': [items.item_paper,],
'items_not': [items.item_bath, items.item_sink, items.item_toilet, items.item_cabinet,
items.item_mirror],
'exits': {'west': 'lobby'},
'door': False,
}
room_nursery = {
'name': 'nursery',
'description': """The walls of this room are covered in a galaxy print,
with a small cot in the corner of the room. Toys take up most of the floor space.
You feel uncomfortable here.""",
'items': [items.item_building_block, items.item_light_switch,],
'items_not': [items.item_rack, items.item_cot,],
'exits': {'east': 'landing floor', },
'door': False,
}
room_bedroom = {
'name': 'bedroom',
'description': """You open the door to the bedroom. A four poster canopy bed sits in the middle of the
room with clothes strewn over it haphazardly. A wooden wardrobe stands to your left open.
To your right is a desk and a vanity.""",
'items': [items.item_riddle_candle,
items.item_matchsticks,
items.item_pendulum],
'items_not': [items.item_wardrobe, items.item_desk, items.item_bed, items.item_picture,],
'exits': {'west': 'landing floor', },
'door': True,
}
room_stairs = {
'name': 'stairs',
'description': """A grand ornate staircase.
When I walk on the floorboards it squeaks""",
'items': [],
'items_not': [],
'exits': {'down': 'lobby', 'up': 'landing floor', },
'door': True,
}
room_landing_floor_1 = {
'name': 'landing floor',
'description': """A giant ominous hallway.
Looking directly towards me is a strange looking photo""",
'items': [],
'items_not': [items.item_photo_frame],
'exits': {'west': 'nursery', 'east': 'bedroom', 'down': 'stairs' },
'door': True,
}
rooms_id = {
'lobby': room_lobby,
'kitchen': room_kitchen,
'bathroom': room_bathroom,
'nursery': room_nursery,
'bedroom': room_bedroom,
'stairs': room_stairs,
'landing floor': room_landing_floor_1,
'main door': room_main_door
}
| true |
72fc40a70cfdad5a11b0f952fc6db7dff784e368 | Python | jw3329/leetcode-problem-solving | /501. Find Mode in Binary Search Tree/solution.py | UTF-8 | 840 | 3.1875 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def findMode(self, root: TreeNode) -> List[int]:
if not root: return []
dic = {}
self.findModeHelper(root,dic)
sortedItem = sorted(dic.items(), key=lambda x : x[1], reverse=True)
res = []
val = sortedItem[0][1]
i = 0
while i < len(sortedItem) and sortedItem[i][1] == val:
res.append(sortedItem[i][0])
i += 1
return res
def findModeHelper(self,root,dic):
if not root: return
self.findModeHelper(root.left,dic)
self.findModeHelper(root.right,dic)
if root.val not in dic: dic[root.val] = 1
else: dic[root.val] += 1
| true |
fad3174e83437904e27d8966bf6c64575fa8c265 | Python | xiaogaogaoxiao/FP_D2D | /generate_Ray.py | UTF-8 | 4,877 | 2.5625 | 3 | [] | no_license | import os
import torch
import numpy as np
# import the global Hyperparameters
from global_var import *
# save the generated dataset, defalut in ./datasets
def save_dataset(dataset, path = './datasets', name = 'train.pth.tar'):
if not os.path.exists(path):
os.makedirs(path)
filename = os.path.join(path+'/'+name)
torch.save(dataset, filename)
def generate_channel(Ksize = [3, 1, 2, 1, 1], channel_type = 'Gau', seed = 2020):
# K: users; L: IRSs; M1: elements of IRS; M: receiver antennas; N: transmitter antennas
K, L, Ml, M, N = Ksize
global train_sample_num
global val_sample_num
np.random.seed(seed)
# path loss factor
Cd = 1e-3 # path loss at the reference distance
CI = 1e-3*10**(10/3)
aTR, aIT, aIR = 2.8, 2, 2
# training dataset
Hd = np.zeros((train_sample_num, K, K, M, N)) # transmitter-receiver
Tx = np.zeros((train_sample_num, L, K, Ml, N)) # transmitter-IRS
Rx = np.zeros((train_sample_num, K, L, M, Ml)) # IRS-receiver
# location
LocTx = np.dstack((-np.random.rand(train_sample_num, K)*20 + 20, -np.random.rand(train_sample_num, K)*20 + 20)) # transmitter
LocRx = np.dstack((-np.random.rand(train_sample_num, K)*20 + 50, -np.random.rand(train_sample_num, K)*20 + 20)) # receiver
LocIRS = np.array([25, 20]) # IRS
if channel_type == 'Ray':
# transmitter-receiver
coeff1 = np.sqrt(Cd*np.linalg.norm(LocTx[:,np.newaxis,:,:] - LocRx[:,:,np.newaxis,:], axis = 3)**(-aTR))
tmpHd = 1/np.sqrt(2)*(np.random.randn(*Hd.shape) + 1j*np.random.randn(*Hd.shape))
Hd = coeff1[:,:,:,np.newaxis,np.newaxis]*tmpHd
# transmitter-IRS
coeff2 = np.sqrt(CI*np.linalg.norm(LocTx - LocIRS, axis = 2)**(-aIT))
tmpTx = 1/np.sqrt(2)*(np.random.randn(*Tx.shape) + 1j*np.random.randn(*Tx.shape))
Tx = coeff2[:,:,np.newaxis,np.newaxis]*tmpTx
# IRS-receiver
coeff3 = np.sqrt(CI*np.linalg.norm(LocIRS - LocRx, axis = 2)**(-aIR))
tmpRx = 1/np.sqrt(2)*(np.random.randn(*Rx.shape) + 1j*np.random.randn(*Rx.shape))
Rx = coeff3[:,:,np.newaxis,np.newaxis]*tmpRx
elif channel_type == 'Gau':
Hd = 1/np.sqrt(2)*(np.random.randn(*Hd.shape) + 1j*np.random.randn(*Hd.shape))
Tx = 1/np.sqrt(2)*(np.random.randn(*Tx.shape) + 1j*np.random.randn(*Tx.shape))
Rx = 1/np.sqrt(2)*(np.random.randn(*Rx.shape) + 1j*np.random.randn(*Rx.shape))
else:
print('Train: Generation does not work')
train_Hd, train_Tx, train_Rx = Hd.astype(np.complex64), Tx.astype(np.complex64), Rx.astype(np.complex64)
# validation dataset
Hd = np.zeros((val_sample_num, K, K, M, N)) # transmitter-receiver
Tx = np.zeros((val_sample_num, L, K, Ml, N)) # transmitter-IRS
Rx = np.zeros((val_sample_num, K, L, M, Ml)) # IRS-receiver
# location
LocTx = np.dstack((-np.random.rand(val_sample_num, K)*20 + 20, -np.random.rand(val_sample_num, K)*20 + 20)) # transmitter
LocRx = np.dstack((-np.random.rand(val_sample_num, K)*20 + 50, -np.random.rand(val_sample_num, K)*20 + 20)) # receiver
LocIRS = np.array([25, 20]) # IRS
if channel_type == 'Ray':
# transmitter-receiver
coeff1 = np.sqrt(Cd*np.linalg.norm(LocTx[:,np.newaxis,:,:] - LocRx[:,:,np.newaxis,:], axis = 3)**(-aTR))
tmpHd = 1/np.sqrt(2)*(np.random.randn(*Hd.shape) + 1j*np.random.randn(*Hd.shape))
Hd = coeff1[:,:,:,np.newaxis,np.newaxis]*tmpHd
# transmitter-IRS
coeff2 = np.sqrt(CI*np.linalg.norm(LocTx - LocIRS, axis = 2)**(-aIT))
tmpTx = 1/np.sqrt(2)*(np.random.randn(*Tx.shape) + 1j*np.random.randn(*Tx.shape))
Tx = coeff2[:,:,np.newaxis,np.newaxis]*tmpTx
# IRS-receiver
coeff3 = np.sqrt(CI*np.linalg.norm(LocIRS - LocRx, axis = 2)**(-aIR))
tmpRx = 1/np.sqrt(2)*(np.random.randn(*Rx.shape) + 1j*np.random.randn(*Rx.shape))
Rx = coeff3[:,:,np.newaxis,np.newaxis]*tmpRx
elif channel_type == 'Gau':
Hd = 1/np.sqrt(2)*(np.random.randn(*Hd.shape) + 1j*np.random.randn(*Hd.shape))
Tx = 1/np.sqrt(2)*(np.random.randn(*Tx.shape) + 1j*np.random.randn(*Tx.shape))
Rx = 1/np.sqrt(2)*(np.random.randn(*Rx.shape) + 1j*np.random.randn(*Rx.shape))
else:
print('Val: Generation does not work')
val_Hd, val_Tx, val_Rx = Hd.astype(np.complex64), Tx.astype(np.complex64), Rx.astype(np.complex64)
# save dataset
save_dataset({'train_Tx': train_Tx, 'train_Rx': train_Rx}, path = './datasets', name = channel_type+'_train.pth.tar')
save_dataset({'val_Tx': val_Tx, 'val_Rx': val_Rx}, path = './datasets', name = channel_type+'_val.pth.tar')
print('Datasets are generated successfully!')
if __name__ == "__main__":
generate_channel(Ksize = Ksize, channel_type = 'Gau') | true |
11649da23b78d8ffb2c35ab96509422b7d58fab1 | Python | prashant-bande/NLP | /Lemmatization.py | UTF-8 | 1,352 | 3.546875 | 4 | [] | no_license |
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
paragraph = """Words aren’t things that computers naturally understand. By
encoding them in a numeric form, we can apply mathematical rules and do matrix
operations to them. This makes them amazing in the world of machine learning,
especially. Take deep learning for example. By encoding words in a numerical
form, we can take many deep learning architectures and apply them to words.
Convolutional neural networks have been applied to NLP tasks using word
embeddings and have set the state-of-the-art performance for many tasks. Even
better, what we have found is that we can actually pre-train word embeddings
that are applicable to many tasks. That’s the focus of many of the types we
will address in this article. So one doesn’t have to learn a new set of
embeddings per task, per corpora. Instead, we can learn general representation
which can then be used across tasks."""
sentences = nltk.sent_tokenize(paragraph)
lemmatizer = WordNetLemmatizer()
# Lemmatization
for i in range(len(sentences)):
words = nltk.word_tokenize(sentences[i])
words = [lemmatizer.lemmatize(word) for word in words if word not in set(stopwords.words('english'))]
sentences[i] = ' '.join(words) | true |
6677349f51341ab628792fb4c2a3f3ba63aac8cc | Python | tomergreenwald/tracer | /find_placeholders.py | UTF-8 | 1,682 | 3.34375 | 3 | [] | no_license | import re
# According to printf format string specification from Wikipedia, the format is:
# %[parameter][flags][width][.precision][length]type
# see http://en.wikipedia.org/wiki/Printf_format_string#Format_placeholders
PLACEHOLDERS_REGEX = """
( # Capture everything in group 1
% # Look for a literal '%'
(?: # Match an actual placeholder (and not '%%')
(?:\d+\$)? # Look for an optional paramter - number and then '$'
[-+ 0#]{0,5} # 0 to 5 optional flags
(?:\d+|\*)? # Optional width
(?:\.(?:\d+|\*))? # Optional precision
(?:hh|h|l|ll|L|z|j|t|q|I|I32|I64|w)? # Optional length
[diufFeEgGxXoscqaAn] # Type
| # OR
%)) # literal "%%"
"""
def find_placeholders_in_format_string(format_string):
"""
Find inside a C-style format string, all the the placeholders for parameters
i.e. for "Worker name is %s and id is %d" will return the indexes and the placeholders:
[(15, "%s"), (28, "%d")]
:param format_string: The string to find placeholders in
:return: List of placeholders location and value
"""
matches = re.finditer(PLACEHOLDERS_REGEX, format_string, flags=re.X)
results = []
for i in matches:
# Remove all the "%%" found. These are literal "%", not placeholders
if i.group(0) != "%%":
results.append((i.start(0), i.group(0)))
return results | true |
7fa2427546863122ac4ad851939fbd45f20a7a2b | Python | bugsz/minisql | /src/BufferManager/BufferManager.py | UTF-8 | 10,284 | 2.890625 | 3 | [] | no_license | import os
from BufferManager.bufferDS import PageData, PageHeader
from BufferManager.bufferDS import BufferBlock
from utils import utils
from collections import deque
import random
MAX_BUFFER_BLOCKS = 100
PAGE_SIZE = 8192
class BufferManager:
# TODO 是不是要考虑维护一个文件表
def __init__(self) -> None:
pass
buffer_blocks = []
LRU_replacer = deque()
replacer_len = 0
# LRUReplacer(MAX_BUFFER_BLOCKS)
@classmethod
def _search_buffer_block(cls, file_name, page_id) -> BufferBlock:
"""
不对外暴露的接口
根据filename和page搜寻缓冲区是否存在该block
如果没有,就返回None
:param file_name: 文件名
:param page_id : 文件中的offset
:return BufferBlock
"""
for buffer_block in cls.buffer_blocks:
if buffer_block.file_name == file_name and buffer_block.page_id == page_id:
return buffer_block
return None
@classmethod
def mark_dirty(cls, file_name, page_id):
block = cls._search_buffer_block(file_name, page_id)
if block != None:
block.dirty = True
@classmethod
def pin(cls, file_name, page_id):
"""
添加一次pin计数,这一般会在对某个block进行操作的时候调用
:param file_name: string
:param page_id : int
"""
block = cls._search_buffer_block(file_name, page_id)
if block is None:
return
if block.pin_count == 0:
cls.LRU_replacer.remove(block)
cls.replacer_len -= 1
block.pin_count += 1
@classmethod
def unpin(cls, file_name, page_id):
"""
减少一次pin计数,这一般会在对某个block结束操作的时候调用
"""
block = cls._search_buffer_block(file_name, page_id)
if block is None:
return
if block.pin_count > 0:
block.pin_count -= 1
else:
cls.LRU_replacer.append(block)
cls.replacer_len == 1
@classmethod
def flush_buffer(cls):
"""
把dirty block写入磁盘,不踢出缓冲区
"""
for block in cls.buffer_blocks:
if block.dirty == True:
cls.write_back_to_file(block.file_name, block.page_id)
block.dirty = False
@classmethod
def kick_out_victim_LRU(cls):
"""
通过LRU机制将block踢出缓冲区
如果没有能踢出的,就随机踢出一个
:return bool, LRU_replacer是否为空
"""
flag = True
if cls.replacer_len == 0:
print("No block can be replaced!")
rand_idx = random.randint(0, MAX_BUFFER_BLOCKS-1)
victim_block = cls.buffer_blocks[rand_idx]
# 如果没有block可以被替换该怎么办
# 那就随机unpin一个
flag = False
else:
victim_block = cls.LRU_replacer.popleft()
cls.replacer_len -= 1
# print("kick out victim page_id = {}".format(victim_block.page_id))
if victim_block.dirty == True:
cls.write_back_to_file(
victim_block.file_name, victim_block.page_id)
cls.buffer_blocks.remove(victim_block)
return flag
@classmethod
def set_page(cls, file_name, page_id, new_page):
block = cls._search_buffer_block(file_name, page_id)
if block is None:
return
block.page = new_page
block.dirty = True
@classmethod
def fetch_page(cls, file_name, page_id) -> PageData:
"""
对外暴露的接口
访问一个page
1. 如果该页在buffer里面,直接返回
2. 如果该页不在buffer里面且buffer非满,则读到buffer里面
3. 如果该页不在buffer里面且buffer满了,则找到一个victim,移除后进行步骤2.
:return : PageData
"""
block_from_buffer = cls._search_buffer_block(file_name, page_id)
if block_from_buffer is not None:
return block_from_buffer.page
elif len(cls.buffer_blocks) < MAX_BUFFER_BLOCKS:
# 如果该页不在buffer内且buffer非满
# print(file_name, page_id)
page_data = cls._fetch_page_from_file(file_name, page_id)
block = BufferBlock(page_data, file_name, page_id)
cls.buffer_blocks.append(block)
cls.LRU_replacer.append(block)
cls.replacer_len += 1
return page_data
else:
# 踢出一个victim
status = cls.kick_out_victim_LRU()
return (cls.fetch_page(file_name, page_id))
@classmethod
def _fetch_page_from_file(cls, file_name, page_id):
"""
不对外暴露的接口
从磁盘读取指定的page,并返回
:return : PageData
"""
page_offset = page_id * PAGE_SIZE
with open(os.path.join(utils.DB_FILE_FOLDER, file_name), "rb+") as f:
f.seek(page_offset)
page_data = f.read(PAGE_SIZE)
next_free_page = utils.byte_to_int(page_data[0:4])
page_bytearray = bytearray(page_data[4:8192])
return PageData(next_free_page, page_bytearray)
# TODO 还要具体考虑
@classmethod
def write_back_to_file(cls, file_name, page_id):
"""
将block写回文件
:param file_name: string 文件名
:param page_id : int
"""
block = cls._search_buffer_block(file_name, page_id)
page_offset = page_id * PAGE_SIZE
page_data = bytearray(
b'\x00' * 8188) if block.page.data is None else block.page.data
page_data = utils.int_to_byte(block.page.next_free_page) + page_data
with open(os.path.join(utils.DB_FILE_FOLDER, file_name), "rb+") as f:
f.seek(page_offset, 0)
f.write(page_data)
block.dirty = False
@classmethod
def _read_file_header(cls, file_name):
"""
内部方法
:return : PageHeader
"""
with open(os.path.join(utils.DB_FILE_FOLDER, file_name), "rb+") as f:
if f is None:
return None
header_data = f.read(PAGE_SIZE)
page_header = PageHeader(utils.byte_to_int(header_data[0:4]),
utils.byte_to_int(header_data[4:8]),
header_data[8:PAGE_SIZE]
)
return page_header
@classmethod
def remove_block(cls, file_name, page_id, force=False):
"""
将指定block踢出,该block应当在buffer内且没有被pin
如果要强制移除,将force属性置为True
如果block是dirty的,那么先写回
如果没找到block,那就返回false
:param force : bool, 是否强制移除
"""
block = cls._search_buffer_block(file_name, page_id)
if block is None or (block.pin_count > 0 and force == False):
return False
if block.dirty == True:
cls.write_back_to_file(file_name, page_id)
cls.buffer_blocks.remove(block)
return True
@classmethod
def remove_file(cls, file_name):
"""
删除文件,同时删除buffer中所有与其相关的block
该删除是强制的
"""
page_header = cls._read_file_header(file_name)
for i in range(page_header.size):
cls.remove_block(file_name, i+1, force=True)
os.remove(os.path.join(utils.DB_FILE_FOLDER, file_name))
@classmethod
def force_clear_buffer(cls):
"""
强制清空buffer,不管其有没有被pin
这一般在退出程序的时候使用
"""
for block_idx in range(len(cls.buffer_blocks)-1, -1, -1):
block = cls.buffer_blocks[block_idx]
if block.dirty == True:
cls.write_back_to_file(block.file_name, block.page_id)
cls.buffer_blocks.remove(block)
@classmethod
def create_page(cls, file_name) -> PageData:
"""
在文件全满的时候创建一个新页
返回PageData类型
:param file_name : string
:return PageData
"""
file_header = cls._read_file_header(file_name)
page_id = file_header.size + 1
# file_header.size += 1
# file_header.first_free_page = page_id
data = bytearray(b"\xff\xff\xff\xff" + b"\x00" * 8188)
# page_data = PageData(0, data)
with open(os.path.join(utils.DB_FILE_FOLDER, file_name), 'rb+') as f:
f.seek(PAGE_SIZE*page_id, 0)
f.write(data)
return cls.fetch_page(file_name, page_id)
@classmethod
def set_header(cls, file_name, header):
"""
重新设置某个文件的文件头
:param header : PageHeader
"""
data = utils.int_to_byte(header.first_free_page) \
+ utils.int_to_byte(header.size) \
+ header.data
with open(os.path.join(utils.DB_FILE_FOLDER, file_name), "rb+") as f:
f.seek(0, 0)
f.write(data)
@classmethod
def get_header(cls, file_name):
"""
获取某个文件的文件头
:param file_name : string
:return PageHeader
"""
return cls._read_file_header(file_name)
@classmethod
def create_file(cls, file_name):
"""
创建新文件,创建成功返回0
文件有文件头,但没有可用page
"""
if os.path.exists(os.path.join(utils.DB_FILE_FOLDER, file_name)):
print("File {} exists!".format(file_name))
return -1
with open(os.path.join(utils.DB_FILE_FOLDER, file_name), "w") as f:
pass
file_header = PageHeader(0, 0, bytearray(b'\x00'*8184))
cls.set_header(file_name, file_header)
return 0
| true |
1876153348acf8696ec61aa686c4878e2996d523 | Python | Piyush123-grumpy/Class_acitvity | /Lab_1/passed.py | UTF-8 | 173 | 3.671875 | 4 | [] | no_license | N=int(input('enter the time in minutes:'))
hours=(N//60)
minutes=(N%60)
print(f'the hours is {hours}')
print(f'the minutes is {minutes}')
print(f'Its {hours}:{minutes} now') | true |
26b732824402af811acb54e4c5fdc9992b73b598 | Python | dynizhnik/python_home_work | /ua/univer/lesson03/task01.py | UTF-8 | 378 | 3.59375 | 4 | [] | no_license | number_of_count = int(input('Enter number of count: '))
my_list = list(range(number_of_count * 2))
for x in my_list:
if x % 2 == 1:
print(x, end=', ')
def task01_mylist():
global mylist
mylist = list(range(1, 20, 2))
print(mylist)
task01_mylist()
mylist2 = list()
for i in range(20):
if i % 2 == 1:
mylist2.append(i)
print(mylist2)
| true |
a909586af8a4d921c174de7ed041fec5c0bd67fb | Python | LadyGracie/testpractice | /DisplayingText.py | UTF-8 | 199 | 3.828125 | 4 | [] | no_license | print ('My name is Grace\nI am a girl')
print ('My name is Peter\tI am a boy')
print ("""My name is John!
I am a boy""")
name = input ('What is your name?\n')
print ('Hello %s' %name) | true |
5618b3f9efddd926628dbfd53f183dd4f6040d39 | Python | Sankareswaran-egrove/lung-disease-detection-from-chest-xray | /chest_xray/train.py | UTF-8 | 8,103 | 2.765625 | 3 | [
"MIT"
] | permissive | # import the necessary packages
import numpy as np
import pandas as pd
import os
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard, EarlyStopping
from tensorflow.keras.utils import multi_gpu_model
from sklearn.model_selection import roc_auc_score
from helper import utils, config
class Train():
def __init__(self):
"""[train the model with the given train configurations.]
"""
self.train_df = pd.read_csv(config.TRAIN_METADATA_PATH)
self.val_df = pd.read_csv(config.VAL_METADATA_PATH)
self.train_steps = int(len(self.train_df) // config.BATCH_SIZE)
self.val_steps = int(len(self.val_df) // config.BATCH_SIZE)
def build_model(self, show_summary=False):
"""[Finetune a pre-trained densenet model]
Keyword Arguments:
show_summary {bool} -- [show model summary] (default: {False})
"""
img_input = Input(shape=(224, 224, 3))
base_model = DenseNet121(include_top=False,
weights="imagenet",
input_tensor=img_input,
input_shape=(224, 224, 3),
pooling="avg"
)
# TODO: add additional dense layers.
output = Dense(len(config.CLASS_NAMES),
activation="sigmoid", name="output")(base_model.output)
model = Model(inputs=img_input, outputs=output)
if show_summary:
print(model.summary())
return model
def data_generator(self):
"""[Generate train and val data generators]
Returns:
[tuple(ImageDataGenerator)] -- [train and val image datagenerator]
"""
train_aug = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=False) # TODO: do we need apply imagenet mean and std.
val_aug = ImageDataGenerator(rescale=1./255)
train_datagen = train_aug.flow_from_dataframe(self.train_df,
directory=None, # can be none if x_col is full image path
x_col=self.train_df["Image Path"],
y_col=config.CLASS_NAMES,
target_size=(
224, 224),
class_mode='categorical',
batch_size=config.BATCH_SIZE,
shuffle=True)
val_datagen = val_aug.flow_from_dataframe(self.val_df,
directory=None,
x_col=self.train_df["Image Path"],
y_col=config.CLASS_NAMES,
target_size=(
224, 224),
class_mode='categorical',
batch_size=config.BATCH_SIZE,
shuffle=False)
return (train_datagen, val_datagen)
def callbacks(self):
"""[Configure training callbacks]
Returns:
[List] -- [list of callbacks]
"""
checkpoint = ModelCheckpoint(config.MODEL_PATH,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
save_weights_only=False)
reduceLR = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
patience=1,
verbose=1,
mode="min",
min_lr=config.MIN_LR)
tensorboard = TensorBoard(log_dir=config.LOG_DIR)
earlyStop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=3,
verbose=1,
restore_best_weights=True)
callbacks = [checkpoint, reduceLR, tensorboard, earlyStop]
return callbacks
def train(self, model, train_datagen, val_datagen, callbacks):
"""[Train a with the given train configurations,
if there is an existed trained model, resume training from where it has stop training.
if not create new model and compile it.
compute class weights to solve the data imbalance.
fit the train and val generator to the model.
]
Arguments:
model {[Model]} -- [keras functional model]
train_datagen {[ImageDatagenerator]} -- [train data generator]
val_datagen {[ImageDatagenerator]} -- [val data generator]
callbacks {[List]} -- [list of callbacks]
"""
# resume training if prevously trained model exists
if os.path.exists(config.MODEL_PATH):
# load trained model
print("[INFO] load trained model...")
model = load_model(config.MODEL_PATH)
else:
print("[INFO] create new model...")
# make directories to store the training outputs,.
# output_paths = [config.MODEL_PATH, config.LOG_DIR]
# for ouput_path in output_paths:
# os.makedirs(ouput_path)
# model = self.build_model()
print("[INFO] compile the model")
model.compile(optimizer=Adam(lr=config.INTIAL_LR),
loss="binary_crossentropy",
metrics=["accuracy"])
# compute class weights
class_weight = utils.compute_class_weight(
self.train_df, config.CLASS_NAMES)
# check multiple gpu availability
# TODO: how to train model on multiple gpu?
# gpus = os.getenv("CUDA_VISIBLE_DEVICES", "1").split(",")
gpus = tf.config.experimental.list_physical_devices("GPU")
if len(gpus) > 1:
print(f"[INFO] multi_gpu_model is used! gpus={gpus}")
model = multi_gpu_model(model, gpus)
else:
print("[INFO] there is no gpu in this device")
# fit the train and validation datagen to the model
print("[INFO] training the model..")
model.fit(train_datagen,
epochs=config.EPOCHS,
verbose=1,
callbacks=callbacks,
# TODO: need to be tuple (x_val, y_vall)
validation_data=val_datagen,
shuffle=True,
class_weight=class_weight,
steps_per_epoch=self.train_steps,
validation_steps=self.val_steps
)
# save trained model explicitly
print("[INFO] save the trained model")
model.save(config.MODEL_PATH)
if __name__ == "__main__":
# create and initialize Train object
train = Train()
# build the model
model = train.build_model(show_summary=True)
# train and val datagenrator
(train_datagen, val_datagen) = train.data_generator()
# training callbacks
callbacks = train.callbacks()
# train the modela
train.train(model, train_datagen, val_datagen, callbacks)
| true |
d022a9dc2bdf8d42c4c54d03f0f8da682a01e9ce | Python | ironboundsoftware/ironboundsoftware | /misc/src/mpgmaker.py | UTF-8 | 1,184 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python
#
# Creates an MPG from a bunch of jpgs
#
# Based off of a perl version by Stephen B. Jenkins (pg 65 Dr. Dobbs Journal, April 2004)
# Python version:Nick Loadholtes
# May 5, 2004
#
import time, urllib, os
WAITINTERVAL= 61
LOCATION = 'http://www.jwz.org/webcollage/collage.jpg'
WIN32PATH ="C:\\Program Files\\ImageMagick-6.0.0-Q16\\convert"
LINUXPATH = " "
PATH = WIN32PATH
def gather():
print "Gathering pictures..."
while(1):
(year, month, day, hour, min) = time.localtime()[0:5]
filename = "img%d_%d_%d_%d_%d.jpg" % (year,month,day,hour,min)
print "Getting ", filename
urllib.urlretrieve(LOCATION, filename)
time.sleep(WAITINTERVAL)
def generate():
print "Generating the MPG!"
(year, month, day, hour, min) = time.localtime()[0:5]
filename = "%d_%d_%d_%d_%d.mpg" % (year,month,day,hour,min)
mpgargs = ("-adjoin", " *.jpg ", filename)
os.execv(WIN32PATH, mpgargs)
if __name__ == "__main__":
print "Starting!"
try:
gather()
except KeyboardInterrupt:
generate() | true |
77190dd74856d657f6e2a6bd874ed68b2a091df0 | Python | Utkarsh-Deshmukh/Fingerprint-Enhancement-Python | /src/example.py | UTF-8 | 895 | 2.703125 | 3 | [
"BSD-2-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 18 11:42:58 2016
@author: utkarsh
"""
import sys
from src.FingerprintImageEnhancer import FingerprintImageEnhancer
import cv2
if __name__ == '__main__':
image_enhancer = FingerprintImageEnhancer() # Create object called image_enhancer
if(len(sys.argv)<2): # load input image
print('loading sample image');
img_name = '2.jpg'
img = cv2.imread('../images/' + img_name)
elif(len(sys.argv) >= 2):
img_name = sys.argv[1];
img = cv2.imread('../images/' + img_name)
if(len(img.shape)>2): # convert image into gray if necessary
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
out = image_enhancer.enhance(img) # run image enhancer
image_enhancer.save_enhanced_image('../enhanced/' + img_name) # save output
| true |
7c8e9ad5e137d6a9df2a828692b48a1a2d80b4cf | Python | ShanXiY/meiduo | /mall/apps/verifications/serializers.py | UTF-8 | 1,372 | 2.546875 | 3 | [] | no_license | from django_redis import get_redis_connection
from rest_framework import serializers
class RegisterSmscodeSerializer(serializers.Serializer):
text = serializers.CharField(label='图片验证码',min_length=4,max_length=4,required=True)
image_code_id = serializers.UUIDField(label='uuid',required=True)
"""
校验:
1.字段类型
2.字段选项
3.单个字段
4.多个字段
"""
def validate(self, attrs):
""""
# 1.用户提交的图片验证码
# 2.获取redis验证码
# 2.1连接redis
# 2.2获取redis_text
# 2.3判断是否过期
# 3.比对
# 4.校验完成 返回attrs
"""""
# 1.用户提交的图片验证码
text = attrs.get('text')
image_code_id = attrs.get('image_code_id')
# 2.获取redis验证码
# 2.1连接redis
redis_conn = get_redis_connection('code')
# 2.2获取redis_text
redis_text = redis_conn.get('img_%s'%image_code_id)
# 2.3判断是否过期
if redis_text is None:
raise serializers.ValidationError('图片验证码已过期')
# 3.比对
if redis_text.decode().lower() != text.lower():
raise serializers.ValidationError('图片验证码不一致')
# 4.校验完成 返回attrs
return attrs | true |
74df57a500d2777656d594ce3bc41a8c5e035cd9 | Python | tomz12321/BackToFundamentalLearning | /PythonFlaskPractice/sayHi.py | UTF-8 | 286 | 2.609375 | 3 | [] | no_license | from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def sayHi():
name = request.args.get("name", "Tom")
return f'Hello, {name}!'
# $ env FLASK_APP=sayHi.py flask run
# * Serving Flask app "hello"
# * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) | true |
686f0d81e1bfaf94dc84cfe27661d3b9e480ea7e | Python | dean2000/codewars-solutions | /kyu_7/Mumbling.py | UTF-8 | 496 | 3.4375 | 3 | [] | no_license | #1. Option 1 - not done
inp = "ZpglnRxqenU"
def accum(s):
ls = (''.join(([x.lower()*(s.find(x)+1)+"-" for x in s])))
return ls
#2. Option 2 - working solution
def accum(s):
ls = []
for index,letter in enumerate(s):
ls.append(letter.upper())
ls.append(letter.lower()*index)
ls.append('-')
return ''.join(l for l in ls[:-1])
print(accum(inp))
#3. Option 3 - clever solution
def accum(s):
return '-'.join(c.upper() + c.lower() * i for i, c in enumerate(s))
print(accum(inp))
| true |
9d06ba6da2a8473126d074a4f4e2724857702633 | Python | jeffb4real/SLHPA-Web-App | /mysite/slhpa/tables.py | UTF-8 | 3,244 | 2.765625 | 3 | [
"MIT"
] | permissive | import django_tables2 as tables
from django.utils.html import format_html
from .models import PhotoRecord
from .templatetags.photodir import getdir
class PhotoTable(tables.Table):
"""
While simple, passing a QuerySet directly to {% render_table %} does not allow for any customization. For that, you must define a custom Table class (this one)
"""
# Customize column names
url_for_file = tables.Column(verbose_name="Photo")
resource_name = tables.Column(verbose_name="Photo Identifier")
# https://django-tables2.readthedocs.io/en/latest/pages/custom-data.html?highlight=bound_column
# # Table.render_foo methods
# To change how a column is rendered, define a render_foo method on the table for example: render_row_number() for a column named row_number. This approach is suitable if you have a one-off change that you do not want to use in multiple tables.
# record – the entire record for the row from the table data
# value – the value for the cell retrieved from the table data
# column – the Column object
# bound_column – the BoundColumn object
# bound_row – the BoundRow object
# table – alias for self
#
# https://django-tables2.readthedocs.io/en/latest/pages/custom-data.html?highlight=ImageColumn
def render_url_for_file(self, record):
subdir = getdir(record.resource_name)
photo_filename = subdir + '/' + record.resource_name
return format_html(f'<a href="/static/slhpa/images/photos/{photo_filename}.jpg" target="_blank">'
f' <div style="text-align:right">'
f' <img id="main_img" src="/static/slhpa/images/photos/{photo_filename}.jpg" style="max-width:200px"'
f' data-toggle="tooltip" title="Click for larger image." >'
f' <img id="overlay_img" src="/static/slhpa/images/photos/finger.png" width="20%"'
f' data-toggle="tooltip" title="Click for larger image." >'
f' </div>'
f'</a>'
)
def render_resource_name(self, record):
return format_html(f'<a href="/slhpa/detail/{record.resource_name}" '
f'data-toggle="tooltip" title="Click for more detail." '
f'target="_blank">{record.resource_name}</a>'
)
class Meta:
# https://django-tables2.readthedocs.io/en/latest/pages/table-data.html?highlight=exclude
# customize what fields to show or hide:
# sequence – reorder columns
# fields – specify model fields to include
# exclude – specify model fields to exclude
model = PhotoRecord
sequence = ('resource_name', 'title',
'description', 'subject', 'year', 'url_for_file', '...')
exclude = ('address', 'contributor', 'geo_coord_original',
'geo_coord_UTM', 'period_date', 'verified_gps_coords',
'gps_latitude', 'gps_longitude', 'document')
template_name = 'django_tables2/bootstrap.html'
attrs = {"class": "table table-striped"}
| true |
10034c8fdef2070196115b4c9c682ea7b7629e52 | Python | jameshughes89/intracranialPressureNonlinearModelling | /scripts/12-MakeTables.py | UTF-8 | 2,540 | 2.546875 | 3 | [] | no_license | '''
Creates the accuracy when models were applied to the data fit to
does nonlinear all, nonoliear 250, nonlinear 1s, linear all, linear 250, linear 1s, nonlinear all less, etc. etc. etc.
'''
import numpy as np
import csv
import matplotlib.pylab as plt
import scipy.stats
subjects = [3029993, 3033031, 3083337, 3094054, 3096171, 3105502, 3169632, 3262086, 3269261, 3289177, 3367596, 3379471, 3460047, 3463681, 3505904, 3516004, 3555523, 3562822, 3582988, 3599360, 3600995, 3607634, 3623238, 3645431, 3646209, 3662063, 3721988, 3738640, 3779174, 3781713]
times = ['all','250','1s']
linNonlin = ['', '_Linear']
feat = ['', '_lessFeatures']
def calcCI(a):
return scipy.stats.norm.interval(0.95, loc=np.nanmean(a), scale=(np.nanstd(a)/np.sqrt(len(a))))[1] - np.nanmean(a) # return 1 because 0 is the negative
def calcIQR(a):
# kill nans
a = a[np.logical_not(np.isnan(a))]
q75, q25 = np.percentile(a, [75 ,25])
return q75-q25
nlData = []
lData = []
for tme in times:
nlData.append(np.array(list(csv.reader(open('6-abEerrOfAllModels_' + tme + '.csv','r')))).astype(float))
lData.append(np.diag(np.array(list(csv.reader(open('3-abEmat_Linear_' + tme + '.csv','r')))).astype(float)))
nlData = np.array(nlData)
lData = np.array(lData)
for i, sub in enumerate(subjects):
print '\\hline', sub, '&', '%.4f'% np.nanmedian(nlData[0,i]), '(%.4f)'% calcIQR(nlData[0,i]), '&', '%.4f'% np.nanmin(nlData[0,i]), '&', '%.4f'% lData[0,i], '&', '%.4f'% np.nanmedian(nlData[1,i]), '(%.4f)'% calcIQR(nlData[1,i]), '&', '%.4f'% np.nanmin(nlData[1,i]), '&', '%.4f'% lData[1,i], '&', '%.4f'% np.nanmedian(nlData[2,i]), '(%.4f)'% calcIQR(nlData[2,i]), '&', '%.4f'% np.nanmin(nlData[2,i]), '&', '%.4f'% lData[2,i], '\\\\'
print '\n\n\n'
nlData = []
lData = []
for tme in times:
nlData.append(np.array(list(csv.reader(open('6-abEerrOfAllModels_' + tme + '_lessFeatures.csv','r')))).astype(float))
lData.append(np.diag(np.array(list(csv.reader(open('3-abEmat_Linear_' + tme + '_lessFeatures.csv','r')))).astype(float)))
nlData = np.array(nlData)
lData = np.array(lData)
for i, sub in enumerate(subjects):
print '\\hline', sub, '&', '%.4f'% np.nanmedian(nlData[0,i]), '(%.4f)'% calcIQR(nlData[0,i]), '&', '%.4f'% np.nanmin(nlData[0,i]), '&', '%.4f'% lData[0,i], '&', '%.4f'% np.nanmedian(nlData[1,i]), '(%.4f)'% calcIQR(nlData[1,i]), '&', '%.4f'% np.nanmin(nlData[1,i]), '&', '%.4f'% lData[1,i], '&', '%.4f'% np.nanmedian(nlData[2,i]), '(%.4f)'% calcIQR(nlData[2,i]), '&', '%.4f'% np.nanmin(nlData[2,i]), '&', '%.4f'% lData[2,i], '\\\\'
| true |