blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f6aa96443ea33ef4cae9a68d45a4d02587d8633c | Python | DiegoMactire/python | /hello_world.py | UTF-8 | 407 | 3.8125 | 4 | [] | no_license | #exibe uma saudação simples
print('\nHello world!\n')
print("Hello world!\n")
hello_world_0 = 'Hello world!\n'
print(hello_world_0)
hello_world_1 = "Hello world!\n"
print(hello_world_1)
hello_0 = 'Hello '
world_0 = 'world!\n'
print(hello_0 + world_0)
hello_1 = "Hello "
world_1 = "world!\n"
print(hello_1 + world_1)
hello_2 = 'Hello '
world_2 = 'world!\n'
message = hello_2 + world_2
print(message)
| true |
c234f3111f6595899ec78bf757512df4effdd735 | Python | glowing713/boostcamp-algo-study | /DP/0204/BDH_정수 삼각형.py | UTF-8 | 417 | 2.703125 | 3 | [] | no_license | def solution(d):
answer = 0
for i in range(1, len(d)):
for j in range(len(d[i])):
if j == 0:
d[i][j] = d[i - 1][j] + d[i][j]
elif j == i:
d[i][j] = d[i - 1][j - 1] + d[i][j]
else:
d[i][j] = max(d[i - 1][j - 1] + d[i][j],
d[i - 1][j] + d[i][j])
answer = max(d[-1])
return answer | true |
afdc9a7aa88ce99dacb5a71a30c0c48500eab33e | Python | Dabronxx/Employee-Database | /SalariedEmployee.py | UTF-8 | 711 | 3.578125 | 4 | [] | no_license | #Import Employees class
from Employee import*
#Class is derived from Employee class
class SalariedEmployee(Employee):
#Constructor that takes in id number, first and last name, months worked and monthly salary
def __init__(self, id, fname, lname, monthsWorked, monthlySalary):
Employee.__init__(self, id, fname, lname)
self.salary = monthlySalary
self.months = monthsWorked
#Returns yearly pay by multiplying months worked and monthly salary
def yearlyPay(self):
return self.salary * self.months
#Returns monthly salary
def monthlySalary(self):
return self.salary
#Returns months worked
def monthsWorked(self):
return self.months
| true |
f1a9c3522fbe798082b51256c93e9b84fbade512 | Python | Neuromorphs18/foosball2018 | /table_api/game.py | UTF-8 | 455 | 2.703125 | 3 | [] | no_license | import redis
import time
from roboplayer import RoboPlayer
# Setup RoboCop
sp1 = "/dev/cu.usbmodem142121"
sp2 = "/dev/cu.usbmodem142131"
rp = RoboPlayer(sp1, sp2)
# Setup Redis
database = redis.StrictRedis("192.168.0.150")
database.set("pos", "0;0")
database.set("vel", "0;0")
while True:
xpos, ypos = database.get('pos').decode('utf-8').split(";")
xpos = float(xpos)
ypos = float(ypos)
rp.rule_player(xpos, ypos)
time.sleep(0.05)
| true |
f8d790b5ddb30723ffda8679f3980f4f3d959c20 | Python | Nathanator/networkzero | /networkzero/sockets.py | UTF-8 | 6,076 | 2.65625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import marshal
import time
import zmq
from . import config
from . import core
_logger = core.get_logger(__name__)
def _serialise(message):
return marshal.dumps(message)
def _unserialise(message_bytes):
return marshal.loads(message_bytes)
def _serialise_for_pubsub(topic, data):
topic_bytes = topic.encode(config.ENCODING)
data_bytes = _serialise(data)
return [topic_bytes, data_bytes]
def _unserialise_for_pubsub(message_bytes):
topic_bytes, data_bytes = message_bytes
return topic_bytes.decode(config.ENCODING), _unserialise(data_bytes)
class Socket(zmq.Socket):
def __repr__(self):
return "<Socket %x on %s>" % (id(self), getattr(self, "address", "<No address>"))
def _get_address(self):
return self._address
def _set_address(self, address):
self.__dict__['_address'] = address
tcp_address = "tcp://%s" % address
if self.type in (zmq.REQ, zmq.SUB):
self.connect(tcp_address)
elif self.type in (zmq.REP, zmq.PUB):
self.bind(tcp_address)
#
# ZeroMQ has a well-documented feature whereby the
# newly-added subscriber will always miss the first
# few posts by a publisher. Just to avoid the surprise,
# we hackily avoid this here by having each socket
# wait a short while once it's bound/connected.
#
if self.type in (zmq.SUB, zmq.PUB):
time.sleep(0.5)
address = property(_get_address, _set_address)
class Context(zmq.Context):
_socket_class = Socket
context = Context()
#
# Global mapping from address to socket. When a socket
# is needed, its address (ip:port) is looked up here. If
# a mapping exists, that socket is returned. If not, a new
# one is created of the right type (REQ / SUB etc.) and
# returned
#
class Sockets:
try_length_ms = 500 # wait for .5 second at a time
def __init__(self):
self._sockets = {}
self._poller = zmq.Poller()
def get_socket(self, address, type):
"""Create or retrieve a socket of the right type, already connected
to the address. Address (ip:port) must be fully specified at this
point. core.address can be used to generate an address.
"""
caddress = core.address(address)
if (caddress, type) not in self._sockets:
socket = context.socket(type)
socket.address = caddress
self._poller.register(socket)
#
# Do this last so that an exception earlier will result
# in the socket not being cached
#
self._sockets[(caddress, type)] = socket
return self._sockets[(caddress, type)]
def intervals_ms(self, timeout_ms):
"""Generate a series of interval lengths, in ms, which
will add up to the number of ms in timeout_ms. If timeout_ms
is None, keep returning intervals forever.
"""
if timeout_ms is config.FOREVER:
while True:
yield self.try_length_ms
else:
whole_intervals, part_interval = divmod(timeout_ms, self.try_length_ms)
for _ in range(whole_intervals):
yield self.try_length_ms
yield part_interval
def _receive_with_timeout(self, socket, timeout_s, use_multipart=False):
"""Check for socket activity and either return what's
received on the socket or time out if timeout_s expires
without anything on the socket.
This is implemented in loops of self.try_length_ms milliseconds
to allow Ctrl-C handling to take place.
"""
if timeout_s is config.FOREVER:
timeout_ms = config.FOREVER
else:
timeout_ms = int(1000 * timeout_s)
ms_so_far = 0
try:
for interval_ms in self.intervals_ms(timeout_ms):
sockets = dict(self._poller.poll(interval_ms))
ms_so_far += interval_ms
if socket in sockets:
if use_multipart:
return socket.recv_multipart()
else:
return socket.recv()
else:
raise core.SocketTimedOutError(timeout_s)
except KeyboardInterrupt:
raise core.SocketInterruptedError(ms_so_far / 1000.0)
def wait_for_message(self, address, wait_for_s):
socket = self.get_socket(address, zmq.REP)
_logger.debug("socket %s waiting for request", socket)
try:
return _unserialise(self._receive_with_timeout(socket, wait_for_s))
except (core.SocketTimedOutError, core.SocketInterruptedError):
return None
def send_message(self, address, request, wait_for_reply_s):
socket = self.get_socket(address, zmq.REQ)
socket.send(_serialise(request))
return _unserialise(self._receive_with_timeout(socket, wait_for_reply_s))
def send_reply(self, address, reply):
socket = self.get_socket(address, zmq.REP)
return socket.send(_serialise(reply))
def send_notification(self, address, topic, data):
socket = self.get_socket(address, zmq.PUB)
return socket.send_multipart(_serialise_for_pubsub(topic, data))
def wait_for_notification(self, address, topic, wait_for_s):
socket = self.get_socket(address, zmq.SUB)
if isinstance(topic, str):
topics = [topic]
else:
topics = topic
for t in topics:
socket.set(zmq.SUBSCRIBE, t.encode(config.ENCODING))
try:
result = self._receive_with_timeout(socket, wait_for_s, use_multipart=True)
unserialised_result = _unserialise_for_pubsub(result)
return unserialised_result
except (core.SocketTimedOutError, core.SocketInterruptedError):
return None, None
_sockets = Sockets()
def get_socket(address, type):
return _sockets.get_socket(address, type)
| true |
4abf2439dcc3a36e7bd16d86659d2d8467e80fbf | Python | dspec12/100daysofcode-with-python-course | /days/01-03-datetimes/stopwatch.py | UTF-8 | 657 | 3.640625 | 4 | [
"MIT"
] | permissive | from datetime import datetime
def stopwatch():
start_stamp = datetime.now()
input('Stopwatch started press "enter" to stop..')
stop_stamp = datetime.now()
timer = stop_stamp - start_stamp
return timer
def main():
while True:
print("::Stopwatch App::\nCommands: Start - Quit")
selection = input("Enter a command: \n").lower()
if selection == "quit":
break
elif selection == "start":
timer = stopwatch()
print("Stopwatch stopped with a total time of {}\n".format(timer))
else:
print("Not a command\n")
if __name__ == "__main__":
main()
| true |
4cd0d9c6378d001d05a10d261d061c23bbd5078f | Python | gahan9/DS_lab | /practical_5/linear_hashing.py | UTF-8 | 5,753 | 3.5 | 4 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Author: Gahan Saraiya
GiT: https://github.com/gahan9
StackOverflow: https://stackoverflow.com/users/story/7664524
Implementation of linear hashing
"""
from collections import OrderedDict
from math import log2, log
class LinearHashing(object):
def __init__(self, *args, **kwargs):
self.threshold = kwargs.get('threshold', 0.7)
self.data_capacity_per_bucket = kwargs.get('data_capacity_per_bucket', 2) # capacity to store data per bucket
self.total_data = 0 # keep count of total inserted record
self.buffer = {key: [] for key in range(2)} # initial buffer
self.index_counter = 0 # keep index counter from where we are supposed to start split in phase
self.previous_phase = 1 # keeping phase number to reset index counter
self.has_title = None
@property
def current_phase(self):
return int(log2(len(self.buffer)))
@property
def buffer_capacity(self):
return self.data_capacity_per_bucket * len(self.buffer)
@property
def threshold_outbound(self):
return ((self.total_data + 1) / self.buffer_capacity) > self.threshold
def hash_function(self, value, flag=0):
"""
:param value: value on which hash function to be applied
:param flag: set flag to 1 if splitting the bucket
:return:
"""
if not flag:
# if no splitting require
return value % (2 ** self.previous_phase)
else:
# if splitting require
return value % (2 ** (self.current_phase + 1))
def set_index_counter_if(self):
"""
set index counter from where splitting to be done to 0 when phase changes
:return: None
"""
if self.current_phase != self.previous_phase:
self.index_counter = 0
self.previous_phase = self.current_phase
def insert(self, value, print_status=0):
"""
:param value: value to be inserted
:param print_status: set to 1 if
:return:
"""
self.set_index_counter_if()
buffer_capacity_before_insert = self.buffer_capacity
if self.threshold_outbound:
# buffer to be extend
self.buffer[len(self.buffer)] = []
buffer_index = self.hash_function(value)
self.buffer[buffer_index] = self.buffer.setdefault(buffer_index, []) + [value]
# bucket to be split
bucket_to_split = self.buffer[self.index_counter]
self.buffer[self.index_counter] = []
for data in bucket_to_split:
buffer_idx = self.hash_function(data, flag=1)
self.buffer[buffer_idx] = self.buffer.setdefault(buffer_idx, []) + [data]
self.index_counter += 1
else:
buffer_index = self.hash_function(value)
# self.buffer[buffer_index].append(value)
self.buffer[buffer_index] = self.buffer.setdefault(buffer_index, []) + [value]
self.total_data += 1
if print_status:
self.pretty_print(value, buffer_capacity_before_insert)
return True
def pretty_print(self, value, buffer_capacity_beefore_insert):
data_dict = OrderedDict()
data_dict["Sr No."] = self.total_data
data_dict["Element"] = value
data_dict["SplitIndex"] = self.index_counter
data_dict["Phase"] = self.current_phase
data_dict["Ratio"] = round(self.total_data / buffer_capacity_beefore_insert, 2)
data_dict["Threshold"] = self.threshold
# data_dict["Previous Phase"] = self.previous_phase
if not self.has_title:
print(" ".join(data_dict.keys()) + " " + "RESULT")
self.has_title = True
print(" ".join("{:^{}s}".format(str(v), len(k)) for k, v in data_dict.items()), end=" ")
print(self.buffer)
def delete(self):
return NotImplementedError
def __repr__(self):
return "\n".join(
"{:>03d} -> {}".format(i, self.buffer[i]) if len(self.buffer[i]) <= self.data_capacity_per_bucket
else "{:>03d} -> {} => {}".format(i, self.buffer[i][:self.data_capacity_per_bucket], self.buffer[i][self.data_capacity_per_bucket:])
for i in sorted(self.buffer))
def __str__(self):
return "\n".join(
"{:>03d} -> {}".format(i, self.buffer[i]) if len(self.buffer[i]) <= self.data_capacity_per_bucket
else "{:>03d} -> {} => {}".format(i, self.buffer[i][:self.data_capacity_per_bucket], self.buffer[i][self.data_capacity_per_bucket:])
for i in sorted(self.buffer))
def test(flag=None):
"""
test fuction to test functionality of program
"""
if not flag:
capacity = 3
elif len(flag) == 2:
capacity = int(flag[1])
elif len(flag) > 2:
capacity, total_elements = map(int, flag[1:3])
print("Capacity per bucket (without chaining): {}".format(capacity))
hash_bucket = LinearHashing(data_capacity_per_bucket=capacity, threshold=0.7)
import random
input_lis = list(random.randint(0, 500) for i in range(total_elements))
for i in input_lis:
hash_bucket.insert(i, print_status=1)
print("Capacity per bucket (without chaining): {}".format(capacity))
hash_bucket = LinearHashing(data_capacity_per_bucket=capacity, threshold=0.7)
input_lis = [3, 2, 4, 1, 8, 14, 5, 10, 7, 24, 17, 13, 15]
for i in input_lis:
hash_bucket.insert(i, print_status=1)
print("Final Bucket Status")
print(hash_bucket)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
test(sys.argv)
else:
test()
| true |
bab6ca90978de19c53f57cd9fcd70044fb2f0a3d | Python | snqutest/Demo_AT | /lib/doexcel.py | UTF-8 | 745 | 3.421875 | 3 | [] | no_license | #-*-coding:utf-8-*-
from xlrd import open_workbook
wb = open_workbook('./config/api_test.xlsx')
#根据工作表的名称获取表的内容
ws = wb.sheet_by_name('test_sheet')
#获取所有工作表的名称
print (wb.sheet_names())
#通过工作表的索引
print (wb.sheet_by_index(0))
#获取工作表名称,行数和列数
print (ws.name)
print (ws.nrows,ws.ncols)
#获取第1行的内容,以列表形式表示
row1 = ws.row_values(0)
#获取第1列的内容,以列表形式表示
col1 = ws.col_values(0)
#通过行数值的多少遍历表格中的值
for i in range(0,ws.nrows):
print (ws.row_values(i))
#获取单元格的值
print (ws.cell(0,0).value)
print (ws.cell(1,1).value)
| true |
9ce160042aa4d013e0ab34613c04a0f610e3c6de | Python | hvd/ghost | /ghost.py | UTF-8 | 3,164 | 3.546875 | 4 | [
"MIT"
] | permissive | import sys
import random
wordbuffer = ''
#Flag to indicate if there is a game winner.
gamewinner = False
#These are actually words with odd number of letters so optimal for computer
optimalwords = {}
evenwords = {}
wordset = set()
player1 = 'human'
player2 = 'computer'
def isloser(word):
if word in wordset and len(word)%2==0:
print player1 + " wins."
sys.exit()
elif word in wordset and len(word)%2!=0:
print player2 + " wins."
sys.exit()
else:
print "gameplay continues:"
def playermove(currentword):
humanletter = raw_input('Enter your letter human:')
currentword = currentword + str(humanletter).strip()
currentword = currentword.lower()
print "human plays " + currentword
isloser(currentword)
if not gamewinner:
computermove(currentword)
def optimalmove(currentword):
firstletter=currentword[0]
listofwords = optimalwords.get(firstletter)
listofopponentwords = evenwords.get(firstletter)
subsetopponent = [w for w in listofopponentwords if currentword in w]
#subset of winning words.
subset = [s for s in listofwords if currentword in s]
# Remove all losing plays
for i in subsetopponent:
for j in subset:
if i in j:
subset.remove(j)
#strategy to lengthen gameplay in case no winning word exists
if not subset:
max_length,longest_element = max([(len(x),x) for x in subsetopponent])
wordtoplay=longest_element
partword = currentword + wordtoplay[len(currentword)]
return partword
index = random.randint(0,len(subset)-1)
wordtoplay = subset[index]
partword = currentword + wordtoplay[len(currentword)]
print "Computer Plays " + currentword + wordtoplay[len(currentword)]
return partword
def computermove(currentword):
partword = optimalmove(currentword)
isloser(partword)
if not gamewinner:
playermove(partword)
def letsplayghost():
print "Spooky Times human, lets play ghost"
currentword = ''
playermove(currentword)
#Read the words and store them in a dict for later comprehension
def init():
f = open('WORD.LST', 'r')
for line in f:
word = str(line)
word = word.strip()
wordlength = len(word)
letter = word[0]
# print(wordlength)
if wordlength%2!=0 and wordlength>4:
wordset.add(word)
if letter in optimalwords:
wordlist = optimalwords[letter]
wordlist.append(word)
optimalwords[letter] = wordlist
else:
optimalwords[letter] = [word]
elif wordlength%2==0 and wordlength>4:
wordset.add(word)
if letter in evenwords:
wordlist = evenwords[letter]
wordlist.append(word)
evenwords[letter] = wordlist
else:
evenwords[letter] = [word]
letsplayghost()
init()
#for k,v in optimalwords.items():
# print(k,v)
# print('\n')
#for k,v in evenwords.items():
# print(k,v)
# print('\n') | true |
4ab4a3d99ebd411506b23b8da3096c1ee80e56c9 | Python | hb918902/Python_easypractices | /0011/filter.py | UTF-8 | 465 | 3.5 | 4 | [] | no_license | #! /usr/bin/env python3
# coding = utf-8
import re
import os
def filter_word(a):
flag = 0
with open('filtered_word.txt', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip(os.linesep)
if re.search(line, a):
flag += 1
if flag:
print('Freedom')
else:
print('Human Rights')
if __name__ == '__main__':
str1 = input("Input some words: ")
filter_word(str1)
| true |
0f15d49f13a4ae29efb121e2df4105ac38f81e9e | Python | johng42/sense | /Raspberry_Pi/simple_read_T_H.py | UTF-8 | 3,531 | 3.53125 | 4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license"
] | permissive | # simple_read_T_H.py
# Example code for using the Sense board to measure humidity
# and temperature.
# This example is designed to run with Python 3 on a Raspberry Pi.
# Measures and displays the humidity and temperature, demonstrating
# the decoding of the signed temperature value. The data are also
# read out and displayed a second time, using a "data category read"
# of all Air measurement data.
# Copyright 2020 Metriful Ltd.
# Licensed under the MIT License - for further details see LICENSE.txt
# For code examples, datasheet and user guide, visit https://github.com/metriful/sense
from Sense_functions import *
# Set up the GPIO and I2C communications bus
(GPIO, I2C_bus) = SenseHardwareSetup()
# Tell the Pi to monitor READY for a falling edge event (high-to-low voltage change)
GPIO.add_event_detect(READY_pin, GPIO.FALLING)
# Initiate an on-demand data measurement
I2C_bus.write_byte(i2c_7bit_address, ON_DEMAND_MEASURE_CMD)
# Now wait for the ready signal (falling edge) before continuing
while (not GPIO.event_detected(READY_pin)):
pass
# We now know that newly measured data are ready to read.
#########################################################
# HUMIDITY
# Read the humidity value from the Sense board
raw_data = I2C_bus.read_i2c_block_data(i2c_7bit_address, H_READ, H_BYTES)
# Decode the humidity: the first byte is the integer part, the
# second byte is the fractional part to one decimal place.
humidity_integer = raw_data[0]
humidity_fraction = raw_data[1]
# Print it: the units are percentage relative humidity.
print("Humidity = " + str(humidity_integer) + "." + str(humidity_fraction) + " %")
#########################################################
# TEMPERATURE
# Read the temperature value from the Sense board
raw_data = I2C_bus.read_i2c_block_data(i2c_7bit_address, T_READ, T_BYTES)
# Decode and print the temperature:
# Find the positive magnitude of the integer part of the temperature by
# doing a bitwise AND of the first byte with TEMPERATURE_VALUE_MASK
temperature_positive_integer = raw_data[0] & TEMPERATURE_VALUE_MASK
# The second byte is the fractional part to one decimal place
temperature_fraction = raw_data[1]
# If the most-significant bit is set, the temperature is negative (below 0 C)
if ((raw_data[0] & TEMPERATURE_SIGN_MASK) == 0):
# Bit not set: temperature is positive
sign_string = "+"
else:
# Bit is set: temperature is negative
sign_string = "-"
# Print the temperature: the units are degrees Celsius.
print("Temperature = " + sign_string + str(temperature_positive_integer) + "."
+ str(temperature_fraction) + " C")
#########################################################
# AIR DATA
# Rather than reading individual data values as shown above, whole
# categories of data can be read in one I2C transaction
# Read all Air data in one transaction
raw_data = I2C_bus.read_i2c_block_data(i2c_7bit_address, AIR_DATA_READ, AIR_DATA_BYTES)
# Use the example function to decode the values and return then as a Python dictionary
air_data = extractAirData(raw_data)
# Print the values obtained
print("Temperature = {:.1f} C".format(air_data['T_C']))
print("Pressure = " + str(air_data['P_Pa']) + " Pa")
print("Humidity = {:.1f} %".format(air_data['H_pc']))
print("Gas sensor resistance = " + str(air_data['G_ohm']) + " ohm")
# Or just use the following function for printing:
# writeAirData(None, air_data, False)
#########################################################
GPIO.cleanup()
| true |
cc15e90859ad3bf75afe1ff07a2b6a65a299d871 | Python | jigerjain/eYantra-2k14_15 | /mask_hsv.py | UTF-8 | 1,565 | 2.75 | 3 | [] | no_license | import numpy as np
import cv2
# Initialize camera
img = cv2.imread('1.jpg')
############################################
# Sample Pixel --> A - [213,154,150] , B - [113,154,150]
param1 = [15,180,180]
param2 = [30,255,200]
############################################
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
## Do the processing
lower = np.array(param1) ## Convert the parameters into a form that OpenCV can understand
upper = np.array(param2)
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(img, img, mask= mask)
green = np.uint8([[[0,150,200 ]]])
hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)
print hsv_green
###processing####################
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,100,255,0)
mask_inv = cv2.bitwise_not(mask)
contours,hierarchy = cv2.findContours(mask_inv,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
print len(contours)
#img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
#ret, mask1 = cv2.threshold(mask, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
cv2.drawContours(img,contours,-1,(0,150,200),5)
##image display
print mask.shape
print gray.shape
cv2.imshow('image2',img)
#cv2.imshow('jiggy2',mask)
#cv2.imshow('image',gray)
#cv2.imshow('jiggy',mask_inv)
#######################################3
############################################
## Close and exit
cv2.waitKey(0)
cv2.destroyAllWindows()
############################################
| true |
8c52e63207c165c635f174c3af453ba6b0e383f8 | Python | jplusplus/uc_scraper | /uc/utils.py | UTF-8 | 781 | 2.96875 | 3 | [
"MIT"
] | permissive | # encoding:utf-8
import pandas as pd
def parse_int(val):
"""Parse int from str."""
return int(float(val))
def parse_result_page(html):
"""
"""
dfs = []
for df in pd.read_html(html, thousands=' '):
grouper = df.columns[0]
value_cols = df.columns[1:].tolist()
df.columns = ["group"] + value_cols
df_long = pd.melt(df, id_vars="group", value_vars=df.columns[1:],
var_name="column")
df_long["value"] = df_long["value"].apply(pct_to_float)
df_long["grouper"] = grouper
dfs.append(df_long)
df = pd.concat(dfs, sort=False).drop_duplicates().to_dict("records")
return df
def pct_to_float(x):
try:
return float(x.strip('%'))/100
except:
return x
| true |
80d6776eae25e63fe91dda256b993c0ddb6da215 | Python | Haavi97/ITI8565-ML-FinalProject | /data_loading.py | UTF-8 | 434 | 2.796875 | 3 | [] | no_license | import pandas
def load_data(csv_file='water_potability.csv'):
try:
return pandas.read_csv(csv_file)
except FileNotFoundError:
print('**************')
print('MISSING DATA FILE')
print('The data must be downloaded from the Kagle link in the README file')
print('**************')
return 0
if __name__ == '__main__':
df = pandas.read_csv('water_potability.csv')
print(df) | true |
63f701673f57b985aeaa9fea925d074093254a11 | Python | morojenoe/Kaggle-Digit_Recognizer | /main.py | UTF-8 | 618 | 2.546875 | 3 | [] | no_license | from pandas import read_csv
from prepare_data import *
from draw_digit import show_digit_from_df
import time
import pickle
if __name__ == "__main__":
time.clock()
# train_data = read_csv("train.csv")
# pickle.dump(train_data, open("train_data.pkl", 'wb'))
train_data = pickle.load(open("train_data.pkl", 'rb'))
# train_data = reduce_df_to_01(train_data)
# pickle.dump(train_data, open("train_data.pkl", 'wb'))
pattern = get_patterns(train_data)
train_data = apply_pattern(train_data, pattern)
for i in range(0, 10):
show_digit_from_df(train_data, i)
| true |
97a9fdcf17c82fe3c4ca4809947c53accb46c9e7 | Python | PrannoyNamala/Stereo-Vision_Implementation | /functions.py | UTF-8 | 7,888 | 2.546875 | 3 | [] | no_license | import cv2
import glob
import numpy as np
import docx
import scipy.optimize as opt
def array_from_doc(str_to_convert):
split_string = str_to_convert.split('[')
split_string = split_string[-1].split(']')
split_string = split_string[0].split(';')
_toconvert = []
for i in range(0, len(split_string)):
split_row = split_string[i].split()
split_row = list(map(float, split_row))
_toconvert.append(split_row)
numbers_array = np.array(_toconvert)
return numbers_array
def getText(number):
doc = docx.Document('Dataset ' + number + '/Groundtruth.docx')
fulltext = []
for para in doc.paragraphs:
fulltext.append(para.text)
properties_dict = {}
for entry in fulltext:
split_pair = entry.split('=')
properties_dict[split_pair[0]] = split_pair[1]
properties_dict['cam0'] = array_from_doc(properties_dict['cam0'])
properties_dict['cam1'] = array_from_doc(properties_dict['cam1'])
return properties_dict
def image_loader(number):
image_list = []
for filename in glob.glob('Dataset ' + number + '/*.png'):
im = cv2.imread(filename)
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = cv2.resize(im, (1920, 1080), interpolation=cv2.INTER_AREA)
image_list.append(im)
return image_list
def feature_matching(img_list):
# Initiate ORB detector
orb = cv2.ORB_create()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img_list[0], None)
kp2, des2 = orb.detectAndCompute(img_list[1], None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1, des2)
# Sort them in the order of their distance.
matches = sorted(matches, key=lambda x: x.distance)
# Draw first 10 matches.
img3 = cv2.drawMatches(img_list[0], kp1, img_list[1], kp2, matches, None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
pointslist_img1 = []
pointslist_img2 = []
for match in matches:
pointslist_img1.append(np.array(((kp1[match.queryIdx].pt[0]), (kp1[match.queryIdx].pt[1]), (1))))
pointslist_img2.append(np.array(((kp2[match.trainIdx].pt[0]), (kp2[match.trainIdx].pt[1]), (1))))
return pointslist_img1, pointslist_img2
def matrix_bulider(img_1points, img_2points):
A = []
for point_1, point_2 in zip(img_1points, img_2points):
row = [point_1[0] * point_2[0], point_1[1] * point_2[0], point_2[0], point_1[0] * point_2[1],
point_1[1] * point_2[1], point_2[1], point_1[0], point_1[1], 1]
A.append(row)
A = np.array(A)
u, l, vt = np.linalg.svd(A)
f = np.reshape(vt[-1, :], (3, 3)).transpose()
return f
def matrix_estimate(pointslist_img1, pointslist_img2):
n = 0
N = len(pointslist_img1)
iterations = 0
k = np.log(1 - 0.5) / np.log(1 - (0.5 ** 8))
f_best = matrix_bulider(pointslist_img1[0:8], pointslist_img2[0:8])
while iterations < k:
int_list = np.random.randint(0, N, size=8)
img1_selection = []
img2_selection = []
for ii in int_list:
img1_selection.append(pointslist_img1[ii])
img2_selection.append(pointslist_img2[ii])
f = matrix_bulider(img1_selection, img2_selection)
s = 0
for j in range(0, N):
prod = np.abs(np.transpose(pointslist_img2[j]).dot(f.dot(pointslist_img1[j])))
if prod < 0.0005:
s += 1
if n < s:
n = s
f_best = f
iterations += 1
u, l, v = np.linalg.svd(f_best)
l = np.array([[l[0], 0, 0], [0, l[1], 0], [0, 0, 0]])
F = np.dot(u, l)
f_best = np.dot(F, v)
return f_best / f_best[2, 2]
def get_solutions(e):
u, d, vt = np.linalg.svd(e)
w = np.array(((0, -1, 0), (1, 0, 0), (0, 0, 1)))
c = u[:, 2].reshape(3, 1)
r1 = u.dot(w.dot(vt))
r2 = u.dot(np.transpose(w).dot(vt))
return [(r1, c), (r1, -c), (r2, c), (r2, -c)]
def skew(x):
return np.array([[0, -x[2], x[1]], [x[2], 0, x[0]], [x[1], x[0], 0]])
def lin_triangulation(K, C1, R1, C2, R2, X1, X2):
i3c = np.hstack((np.eye(3), -C1))
P1 = K.dot(R1).dot(i3c)
i3c = np.hstack((np.eye(3), -C2))
P2 = K.dot(R2).dot(i3c)
sz = X1.shape[0]
X = np.zeros((sz, 3))
for i in range(sz):
skew1 = skew(X1[i, :])
skew2 = skew(X2[i, :])
A = np.vstack((np.dot(skew1, P1), np.dot(skew2, P2)))
_, _, v = np.linalg.svd(A)
x = v[-1] / v[-1, -1]
x = np.reshape(x, (len(x), -1))
X[i, :] = x[0:3].T
return X
def disambiguate_pose(pairs, positions):
best = 0
for i in range(len(pairs)):
N = positions[i].shape[0]
n = 0
for j in range(N):
if (np.dot(pairs[i][0][2, :], (positions[i][j].reshape(3, 1) - pairs[i][1])) > 0) and \
positions[0][0][-1] >= 0:
n = n + 1
if n > best:
C = pairs[i][1]
R = pairs[i][0]
X = positions[i]
best = n
return X, R, C
def minimizeFunction(init, K, x1, x2, R1, C1, R2, C2):
sz = len(x1)
X = np.reshape(init, (sz, 3))
X = np.hstack((X, np.ones((sz, 1))))
i3c = np.hstack((np.eye(3), -C1))
P1 = K.dot(R1).dot(i3c)
i3c = np.hstack((np.eye(3), -C2))
P2 = K.dot(R2).dot(i3c)
u1 = np.divide((np.dot(P1[0, :], X.T).T), (np.dot(P1[2, :], X.T).T))
v1 = np.divide((np.dot(P1[1, :], X.T).T), (np.dot(P1[2, :], X.T).T))
u2 = np.divide((np.dot(P2[0, :], X.T).T), (np.dot(P2[2, :], X.T).T))
v2 = np.divide((np.dot(P2[1, :], X.T).T), (np.dot(P2[2, :], X.T).T))
# print(u1.shape,x1.shape)
# assert u1.shape[0] == x1.shape[0], "shape not matched"
error1 = ((x1[:, 0] - u1) + (x1[:, 1] - v1))
error2 = ((x2[:, 0] - u2) + (x2[:, 1] - v2))
# print(error1.shape)
error = sum(error1, error2)
return sum(error)
def nonlin_tirangulation(K, x1, x2, X_init, R1, C1, R2, C2):
sz = len(x1)
init = X_init.flatten()
# Tracer()()
optimized_params = opt.least_squares(
fun=minimizeFunction,
x0=init,
method="dogbox",
args=[K, x1, x2, R1, C1, R2, C2])
X = np.reshape(optimized_params.x, (sz, 3))
return X
def drawlines(img1, img2, lines, pts1, pts2):
r, c = img1.shape
img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
for r, pt1, pt2 in zip(lines, pts1, pts2):
color = tuple(np.random.randint(0, 255, 3).tolist())
x0, y0 = map(int, [0, -r[2] / r[1]])
x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)
img1 = cv2.circle(img1, tuple((int(pt1[0]), int(pt1[1]))), 5, color, -1)
img2 = cv2.circle(img2, tuple((int(pt2[0]), int(pt2[1]))), 5, color, -1)
return img1, img2
def feature_binary(img, points_list):
img_dup = np.zeros_like(img)
for point in points_list:
img_dup[point[1], point[0]] = 255
return img_dup
def template_match(template, img):
res = cv2.matchTemplate(img, template, cv2.TM_SQDIFF)
_, _, min_loc, _ = cv2.minMaxLoc(res)
return min_loc
def get_windows(template_length, img):
h, w = img.shape
dict_return = {}
center_location = (template_length + 1) / 2
for i in range(0, h - template_length):
for j in range(0, w - template_length):
template = img[i:i + template_length, j:j + template_length]
location = (j + center_location, i + center_location)
dict_return[location] = template
return dict_return
def compute_disparity(point_img1, point_img2, f, b):
d = point_img1[0] - point_img2[0]
if d == 0:
d = 0.001
return f * b / d, d
| true |
8387972178c88cfb7162fdf8b68a25a742945dad | Python | shahp7575/coding-with-friends | /Parth/LeetCode/Medium/combination_sum.py | UTF-8 | 1,354 | 3.953125 | 4 | [] | no_license | """
DFS Backtracking
Runtime: 108 ms
Memory: 14.1 MB
"""
from typing import List
class Solution:
"""
Problem #39:
Combination Sum
Given an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target.
You may return the combinations in any order
The same number may be chosen from candidates an unlimited number of times.
Two combinations are unique if the frequency of at least one of the chosen numbers is different.
It is guaranteed that the number of unique combinations that sum up to target is less than 150 combinations for the given input.
"""
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
res = []
candidates.sort()
self.target = target
self.dfs(candidates, [], res)
return res
def dfs(self, candidates, path, res):
if sum(path) > self.target:
return
if sum(path) == self.target:
res.append(path)
return
for i in range(len(candidates)):
self.dfs(candidates[i:], path + [candidates[i]], res)
if __name__ == "__main__":
result = Solution()
candidates = [2,3,6,7]
target = 7
print(result.combinationSum(candidates, target)) | true |
d5aaf4c1428c61f4b04f7119a6f7569e75124d27 | Python | simvisage/mxn | /mxn/tests/test05_cross_section_I_mixed.py | UTF-8 | 2,280 | 2.546875 | 3 | [] | no_license | '''
Created on 15. 2. 2014
@author: Vancikv
'''
from mxn.cross_section import \
CrossSection
from mxn.matrix_cross_section import \
MatrixCrossSection, MCSGeoRect, MCSGeoI
from mxn.reinf_layout import \
RLCBar, RLCTexLayer
import numpy as np
def test_cross_section_mn():
'''Test the moment and normal force calculated for a cross section.
I - shaped cross section with mixed reinforcement. Change of geometry
to rectangular also tested.
'''
ge = MCSGeoI(height=0.4, height_up=0.05, width_up=0.25, height_lo=0.05, width_lo=0.35, width_st=0.05)
mcs = MatrixCrossSection(geo=ge, n_cj=20, material='default_mixture',
material_law='constant')
'''Cross section geometry + matrix
'''
bar1 = RLCBar(x=0.025, z=0.025, material='bar_d10')
bar2 = RLCBar(x=0.125, z=0.025, material='bar_d10')
bar3 = RLCBar(x=0.225, z=0.025, material='bar_d10')
bar4 = RLCBar(x=0.325, z=0.025, material='bar_d10')
bar1.material_.area = 0.00005
'''Four steel reinforcement bars in lower flange
'''
tl1 = RLCTexLayer(z_coord=0.01, material='default_fabric', material_law='fbm')
tl2 = RLCTexLayer(z_coord=0.39, material='default_fabric', material_law='fbm')
'''Two layers of textile reinforcement
'''
cs = CrossSection(reinf=[tl1, tl2, bar1, bar2, bar3, bar4],
matrix_cs=mcs,
eps_lo=0.002,
eps_up=-0.0033,
)
tl1.material_law_.set(sig_tex_u=1216., eps_u=0.014, m=0.5)
tl1.material_.set(s_0=0.02, A_roving=0.461)
assert np.allclose([cs.M, cs.N], [201.35521782599423, -1152.7647363907902])
bar1.material_.area = 0.0004
assert np.allclose([cs.M, cs.N], [274.03855115932754, -685.51473639079018])
ge.height_lo = 0.06
assert np.allclose([cs.M, cs.N], [279.41793763239417, -685.51473639079018])
cs.eps_lo = 0.004
assert np.allclose([cs.M, cs.N], [324.31926663193633, -323.07965392270125])
mcs.geo = MCSGeoRect(height=0.4, width=0.4)
assert np.allclose([cs.M, cs.N], [613.20118671511852, -2927.1270652809549])
if __name__ == '__main__':
test_cross_section_mn()
| true |
c95fb2b1b45ead06e37b6432b36668d445f3d8d1 | Python | nicorellius/fiblist | /source/functional_tests/test_list_item_validation.py | UTF-8 | 1,657 | 2.734375 | 3 | [
"Unlicense"
] | permissive | # file: test_list_item_validation.py
# date: 2014-0811
# description: functional tests for fiblist project
from .base import FunctionalTest
from unittest import skip
class ItemValidationTest(FunctionalTest):
def test_cannot_add_empty_list_items(self):
# User goes to home page and enters empty item. Hits enter to submit.
self.browser.get(self.server_url)
self.browser.find_element_by_id('id_new_item').send_keys('\n')
# Home page refreshes, and user gets an error message
# saying empty lists are invalid.
error = self.browser.find_element_by_css_selector('.has-error')
self.assertEqual(error.text, "You cannot submit an empty list item.")
# User then enters a valid item and hists enter. This works.
self.browser.find_element_by_id('id_new_item').send_keys(
'Flash light\n'
)
self.check_for_row_in_list_table('1: Flash light')
# User tries again to enter empty item, error message.
self.browser.find_element_by_id('id_new_item').send_keys('\n')
# User gets empty item error message again.
self.check_for_row_in_list_table('1: Flash light')
error = self.browser.find_element_by_css_selector('.has-error')
self.assertEqual(error.text, "You cannot submit an empty list item.")
# User can correct this error by entering valid list item.
self.browser.find_element_by_id('id_new_item').send_keys(
'Batteries\n'
)
self.check_for_row_in_list_table('1: Flash light')
self.check_for_row_in_list_table('2: Batteries')
self.fail('write me!')
| true |
d4af279785696c7c9aa22e34266e2fd3b05cf0d8 | Python | pjz987/pdx_code_intro_class | /wacky_functions.py | UTF-8 | 293 | 3.359375 | 3 | [] | no_license | '''
filename : wacky_functions.py
'''
def noisy_add(num1, num2):
print(f"ADDING {num1} AND {num2}!!:D")
return num1 + num2
def bad_add(num1, num2):
print(num1 + num2)
def return4(in_thing):
print(in_thing)
return 4
def print4(in_thing):
print(4)
return in_thing
| true |
58dc02c3cd1db4ec0609eb617a37f92bcd0cee1b | Python | parkerhancock/requests-cache | /requests_cache/cache_control.py | UTF-8 | 7,747 | 2.625 | 3 | [
"BSD-2-Clause"
] | permissive | """Utilities for determining cache expiration and other cache actions"""
from datetime import datetime, timedelta, timezone
from email.utils import parsedate_to_datetime
from fnmatch import fnmatch
from logging import getLogger
from typing import Any, Dict, Mapping, Optional, Tuple, Union
from requests import PreparedRequest, Response
# Value that may be set by either Cache-Control headers or CachedSession params to disable caching
DO_NOT_CACHE = 0
# Currently supported Cache-Control directives
CACHE_DIRECTIVES = ['max-age', 'no-cache', 'no-store']
# All cache-related headers, for logging/reference; not all are supported
REQUEST_CACHE_HEADERS = [
'Cache-Control',
'If-Unmodified-Since',
'If-Modified-Since',
'If-Match',
'If-None-Match',
]
RESPONSE_CACHE_HEADERS = ['Cache-Control', 'ETag', 'Expires', 'Age']
CacheDirective = Tuple[str, Union[None, int, bool]]
ExpirationTime = Union[None, int, float, str, datetime, timedelta]
ExpirationPatterns = Dict[str, ExpirationTime]
logger = getLogger(__name__)
class CacheActions:
"""A dataclass that contains info on specific actions to take for a given cache item.
This is determined by a combination of cache settings and request + response headers.
If multiple sources are provided, they will be used in the following order of precedence:
1. Cache-Control request headers (if enabled)
2. Cache-Control response headers (if enabled)
3. Per-request expiration
4. Per-URL expiration
5. Per-session expiration
"""
def __init__(
self,
key: str,
request: PreparedRequest,
cache_control: bool = False,
**kwargs,
):
"""Initialize from request info and cache settings"""
self.key = key
if cache_control and has_cache_headers(request.headers):
self._init_from_headers(request.headers)
else:
self._init_from_settings(url=request.url, **kwargs)
def _init_from_headers(self, headers: Mapping):
"""Initialize from request headers"""
directives = get_cache_directives(headers)
do_not_cache = directives.get('max-age') == DO_NOT_CACHE
self.expire_after = directives.get('max-age')
self.skip_read = do_not_cache or 'no-store' in directives or 'no-cache' in directives
self.skip_write = do_not_cache or 'no-store' in directives
def _init_from_settings(
self,
url: str = None,
request_expire_after: ExpirationTime = None,
session_expire_after: ExpirationTime = None,
urls_expire_after: ExpirationPatterns = None,
**kwargs,
):
"""Initialize from cache settings"""
# Check expire_after values in order of precedence
expire_after = coalesce(
request_expire_after,
get_url_expiration(url, urls_expire_after),
session_expire_after,
)
do_not_cache = expire_after == DO_NOT_CACHE
self.expire_after = expire_after
self.skip_read = do_not_cache
self.skip_write = do_not_cache
@property
def expires(self) -> Optional[datetime]:
"""Convert the user/header-provided expiration value to a datetime"""
return get_expiration_datetime(self.expire_after)
def update_from_response(self, response: Response):
"""Update expiration + actions based on response headers, if not previously set by request"""
directives = get_cache_directives(response.headers)
do_not_cache = directives.get('max-age') == DO_NOT_CACHE
self.expire_after = coalesce(self.expires, directives.get('max-age'), directives.get('expires'))
self.skip_write = self.skip_write or do_not_cache or 'no-store' in directives
def coalesce(*values: Any, default=None) -> Any:
"""Get the first non-``None`` value in a list of values"""
return next((v for v in values if v is not None), default)
def get_expiration_datetime(expire_after: ExpirationTime) -> Optional[datetime]:
"""Convert an expiration value in any supported format to an absolute datetime"""
# Never expire
if expire_after is None or expire_after == -1:
return None
# Expire immediately
elif expire_after == DO_NOT_CACHE:
return datetime.utcnow()
# Already a datetime or datetime str
elif isinstance(expire_after, datetime):
return to_utc(expire_after)
elif isinstance(expire_after, str):
return parse_http_date(expire_after)
# Otherwise, it must be a timedelta or time in seconds
if not isinstance(expire_after, timedelta):
expire_after = timedelta(seconds=expire_after)
return datetime.utcnow() + expire_after
def get_cache_directives(headers: Mapping) -> Dict:
"""Get all Cache-Control directives, and handle multiple headers and comma-separated lists"""
if not headers:
return {}
cache_directives = headers.get('Cache-Control', '').split(',')
kv_directives = dict([split_kv_directive(value) for value in cache_directives])
if 'Expires' in headers:
kv_directives['expires'] = headers['Expires']
return kv_directives
def get_url_expiration(
url: Optional[str], urls_expire_after: ExpirationPatterns = None
) -> ExpirationTime:
"""Check for a matching per-URL expiration, if any"""
if not url:
return None
for pattern, expire_after in (urls_expire_after or {}).items():
if url_match(url, pattern):
logger.debug(f'URL {url} matched pattern "{pattern}": {expire_after}')
return expire_after
return None
def has_cache_headers(headers: Mapping) -> bool:
"""Determine if headers contain cache directives **that we currently support**"""
has_cache_control = any([d in headers.get('Cache-Control', '') for d in CACHE_DIRECTIVES])
return has_cache_control or bool(headers.get('Expires'))
def parse_http_date(value: str) -> Optional[datetime]:
"""Attempt to parse an HTTP (RFC 5322-compatible) timestamp"""
try:
return parsedate_to_datetime(value)
except (TypeError, ValueError):
logger.debug(f'Failed to parse timestamp: {value}')
return None
def split_kv_directive(header_value: str) -> CacheDirective:
"""Split a cache directive into a ``(header_value, int)`` key-value pair, if possible;
otherwise just ``(header_value, True)``.
"""
header_value = header_value.strip()
if '=' in header_value:
k, v = header_value.split('=', 1)
return k, try_int(v)
else:
return header_value, True
def to_utc(dt: datetime):
"""All internal datetimes are UTC and timezone-naive. Convert any user/header-provided
datetimes to the same format.
"""
if dt.tzinfo:
dt.astimezone(timezone.utc)
dt = dt.replace(tzinfo=None)
return dt
def try_int(value: Optional[str]) -> Optional[int]:
"""Convert a string value to an int, if possible, otherwise ``None``"""
return int(str(value)) if str(value).isnumeric() else None
def url_match(url: str, pattern: str) -> bool:
"""Determine if a URL matches a pattern
Args:
url: URL to test. Its base URL (without protocol) will be used.
pattern: Glob pattern to match against. A recursive wildcard will be added if not present
Example:
>>> url_match('https://httpbin.org/delay/1', 'httpbin.org/delay')
True
>>> url_match('https://httpbin.org/stream/1', 'httpbin.org/*/1')
True
>>> url_match('https://httpbin.org/stream/2', 'httpbin.org/*/1')
False
"""
if not url:
return False
url = url.split('://')[-1]
pattern = pattern.split('://')[-1].rstrip('*') + '**'
return fnmatch(url, pattern)
| true |
3db760dac59ad8f9d6ac22cb57289f8e2fdae3ac | Python | a123priya/hackerrank-python | /10_Days_of_Statistics/Day_1_Quartiles.py | UTF-8 | 326 | 2.96875 | 3 | [] | no_license | from statistics import median
N = int(input())
X = sorted(list(map(int, input().split())))
if N % 2 == 0:
lower = X[:N//2]
upper = X[N//2:]
else:
lower = X[:N//2]
upper = X[N//2+1:]
print(int(median(lower)), int(median(X)), int(median(upper)), sep = "\n")
# Caner Dabakoğlu
# GitHub: https://github.com/cdabakoglu
| true |
4ce52beca96e2dfc39d1350e1af72e6f985e1423 | Python | SungYuJeong/TYoung | /pyWordCount.py | UTF-8 | 6,078 | 2.609375 | 3 | [] | no_license | # Word counting with pymongo
# -*- coding: utf8 -*-
import sys, logging, glob, json
import pyUtilsClass, pyDAOClass
dao = pyDAOClass.DAO()
def setDBConnection(dbConfig):
dao.setClient(dbConfig["host"], dbConfig["port"], dbConfig["id"], dbConfig["pw"])
dao.setDB(dbConfig["database"])
dao.setCollection(dbConfig["collections"]["wordcount"])
return dao
'''
{ "book": {
"postag": ["", ...],
"count": xxx
},
"have": {
"postag": ["", ...],
"count": xxx
}, ...
}
'''
def countWord(wordList):
wordCountList = {}
for morph in wordList:
word = str(morph)[:str(morph).rfind('/')]
postag = str(morph)[str(morph).rfind('/')+1:]
if wordCountList.get(word) != None:
if not wordCountList[word]["postag"].__contains__(postag):
wordCountList[word]["postag"].append(postag)
wordCountList[word]["count"] += 1
else:
wordInfo = { "postag" : [postag], "count": 1}
wordCountList[word] = wordInfo
return wordCountList
def getCountArray(month, count):
countArray = [0] * 12
countArray[int(month) - 1] = count
return countArray
def getPostagList(dbInfo, newData):
for tag in newData:
if not dbInfo.__contains__(tag):
dbInfo.append(tag)
return json.dumps(dbInfo)
def getDailyCount(dailyCount, day, count):
dailyCount[int(day)-1] += count
return dailyCount
def getMonthlyCount(monthlyCount, month, count):
monthlyCount[int(month) - 1] += count
return monthlyCount
def getSubjectCount(subjectCount, subject, count):
subjectCount[subject] += count
return subjectCount
'''
{
"word": "south",
"postag": [ "RB", "JJ", "VBP", "NN", "VB", "NNP"],
"count": {
"totalCount": 114,
"headerCount": 3,
"yearly": {
"2020": xxxx,
},
"monthly": {
"2020": [0,0,0,0,114,0,0,0,0,0]
},
"daily": {
"202005": [0,0,0,0,0,0,0,0,0, ..... ],
"202006": [0,0,0,0,0,0,0,0,0, ..... ]
},
"bySubject": {
"Nat'l/Politics": xxx,
"Sports": xxx,
}
}
}
'''
# word, postag, count, subject, date,isHeader
def getInsertData(word, postag, count, subject, date, isHeader, subjectJson):
year = date.split('-')[0]
month = date.split('-')[1]
day = date.split('-')[2]
monthlyCount = [0] * 12
dailyCount = [0] * 31
subjectCount = [0] * 8
result = '{"word": "%s", "postag": %s, "count": { "totalCount": %s, "headerCount": %s, "yearly": { "%s": %s }, "monthly": { "%s": %s }, "daily": { "%s": %s }, "bySubject": %s }}' \
% (word, json.dumps(postag), count, count if isHeader else 0, year, count, year, getMonthlyCount(monthlyCount, month, count), year+month, getDailyCount(dailyCount, day, count), getSubjectCount(subjectCount, subjectJson[subject], count))
return result
def getUpdateData(dbInfo, postag, count, subject, date, isHeader, subjectJson):
year = date.split('-')[0]
month = date.split('-')[1]
day = date.split('-')[2]
result = ('{"$set": {"postag": %s, "count": {"totalCount": %s, "headerCount": %s, "yearly": {"%s": %s}, "monthly": {"%s": %s}, "daily": {"%s": %s}, "bySubject": %s }}}' % \
(getPostagList(dbInfo["postag"], postag), dbInfo["count"]["totalCount"] + count, dbInfo["count"]["headerCount"] + count if isHeader else dbInfo["count"]["headerCount"], \
year, dbInfo["count"]["yearly"][year] + count, \
year, getMonthlyCount(dbInfo["count"]["monthly"][year], month, count), \
year+month, getDailyCount(dbInfo["count"]["daily"][year+month], day, count), \
getSubjectCount(dbInfo["count"]["bySubject"], subjectJson[subject], count)))
return result
def countWordinDB(date, subject, isHeader, countWordList, subjectJson):
for wordInfo in countWordList:
dbInfo = dao.select('{"word": "%s"}' % wordInfo)
if dbInfo is None:
#insert data
# word, postag, count, subject, date,isHeader
dao.insert(getInsertData(wordInfo, countWordList[wordInfo]['postag'], int(countWordList[wordInfo]['count']), subject, date, isHeader, subjectJson))
else:
#update data
dao.update('{"word": "%s"}' % wordInfo, getUpdateData(dbInfo, countWordList[wordInfo]['postag'], int(countWordList[wordInfo]['count']), subject, date, isHeader, subjectJson))
if __name__ == '__main__':
if len(sys.argv) < 3:
logging.error("Argument error")
logging.error(" Allowed argument :: (SERVER) (DATE) (DB_CONFIG_FILE)")
logging.error(" DATE: yyyy-mm-dd ex> 2020-06-18")
exit()
sysEnv = sys.argv[1]
date = sys.argv[2]
dbConfigFile = sys.argv[3]
utils = pyUtilsClass.Utils()
pyUtilsClass.setLogging2Console()
# config file setting
config = utils.readJsonFile(utils.getLocalPath() + "/../config/config.json")
NLPFileList = glob.glob(utils.getLocalPath() + "/.." + str(config["DEFAULT"]["NLPAnalysis"]["Target"]).replace("%date", date) + '/*')
# db connection setting
dbConfig = utils.readJsonFile(utils.getLocalPath() + "/../config/" + dbConfigFile)
dao = setDBConnection(dbConfig[sysEnv])
# subject json
subjectJson = utils.readJsonFile(utils.getLocalPath() + "/../config/subject.json")
for targetFile in NLPFileList:
jsonTarget = utils.readJsonFile(targetFile)
contextWordCount = countWord(jsonTarget["context"])
headlineWordCount = countWord(jsonTarget["headline"])
crawlingDate = jsonTarget["crawlingDate"]
subject = jsonTarget["subject"]
countWordinDB(crawlingDate, subject, False, contextWordCount, subjectJson)
countWordinDB(crawlingDate, subject, True, headlineWordCount, subjectJson)
dao.setClose()
| true |
80d5928e28e11fe75f1d6085f93af19ede6dd174 | Python | ksks1986/CarND_Final | /ros/src/tl_detector/light_classification/tl_make_model.py | UTF-8 | 9,063 | 2.625 | 3 | [
"MIT"
] | permissive | import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import flatten
import cv2
import csv
import random
from collections import Counter
def AffineTransImage(image):
#parameter
angle_range = 5
scale_range_min = 0.5
scale_range = 1
shift_range = 5
rotation_angle = (np.random.rand() - 0.5) * angle_range
scale = (np.random.rand() * scale_range) + scale_range_min
shiftx = (np.random.rand() - 0.5) * shift_range
shifty = (np.random.rand() - 0.5) * shift_range
#size of image
size = (image.shape[1], image.shape[0])
#center of rotation
center = (int(size[0]/2), int(size[1]/2))
#Rotation Matrix
RotMat = cv2.getRotationMatrix2D(center, rotation_angle, scale)
#shift Matrix
ShiftMat = np.array([[0, 0, shiftx], [0, 0, shifty]])
#Affine Matrix
AffineMat = RotMat + ShiftMat
#Affine transformation(No padding)
X_mod = cv2.warpAffine(image, AffineMat, size, flags=cv2.INTER_LINEAR)
return X_mod
## Model Architecture
EPOCHS = 35
BATCH_SIZE = 32
def tl_Model(x, train_flag):
#parameter
mu = 0 #parameter of initial value
sigma = 0.05 #parameter of initial value
# Layer 1: Convolutional. Input = 400x300x3. Output = 390x290x6.
W1 = tf.Variable(tf.truncated_normal([11, 11, 3, 6], mean=mu, stddev=sigma), name='W1', trainable=True)
b1 = tf.Variable(tf.truncated_normal([6], mean=mu, stddev=sigma), name='b1', trainable=True)
strides = [1, 1, 1, 1]
conv1 = tf.nn.conv2d(x, W1, strides=strides, padding='VALID')
conv1 = tf.nn.bias_add(conv1, b1)
# ReLU
conv1 = tf.nn.relu(conv1)
# Pooling. Input = 390x290x6. Output = 78x58x6.
ksize = [1, 5, 5, 1]
strides = [1, 5, 5, 1]
pool1 = tf.nn.max_pool(conv1, ksize, strides=strides, padding='SAME')
# Layer 2: Convolutional. Output = 58x38x12.
W2 = tf.Variable(tf.truncated_normal([21, 21, 6, 12], mean=mu, stddev=sigma), name='W2', trainable=True)
b2 = tf.Variable(tf.truncated_normal([12], mean=mu, stddev=sigma), name='b2', trainable=True)
strides = [1, 1, 1, 1]
conv2 = tf.nn.conv2d(pool1, W2, strides=strides, padding='VALID')
conv2 = tf.nn.bias_add(conv2, b2)
# ReLU
conv2 = tf.nn.relu(conv2)
# Pooling. Input = 58x38x12. Output = 29x19x12.
ksize = [1, 2, 2, 1]
strides = [1, 2, 2, 1]
pool2 = tf.nn.max_pool(conv2, ksize, strides=strides, padding='SAME')
# Flatten. Input = 29x19x12. Output = 6612.
flat1 = flatten(pool2)
# dropout
flat1_drop = tf.nn.dropout(flat1, keep_prob=0.9)
flat1 = tf.cond(train_flag, lambda: flat1_drop, lambda: flat1)
# Layer 4: Fully Connected. Input = 6612. Output = 120.
W4 = tf.Variable(tf.truncated_normal([6612, 120], mean=mu, stddev=sigma), name='W4', trainable=True)
b4 = tf.Variable(tf.truncated_normal([120], mean=mu, stddev=sigma), name='b4', trainable=True)
fcon1 = tf.add(tf.matmul(flat1, W4), b4)
# ReLU
fcon1 = tf.nn.relu(fcon1)
# dropout
fcon1_drop = tf.nn.dropout(fcon1, keep_prob=0.9)
fcon1 = tf.cond(train_flag, lambda: fcon1_drop, lambda: fcon1)
# Layer 5: Fully Connected. Input = 120. Output = 84.
W5 = tf.Variable(tf.truncated_normal([120, 84], mean=mu, stddev=sigma), name='W5', trainable=True)
b5 = tf.Variable(tf.truncated_normal([84], mean=mu, stddev=sigma), name='b5', trainable=True)
fcon2 = tf.add(tf.matmul(fcon1, W5), b5)
# ReLU
fcon2 = tf.nn.relu(fcon2)
# dropout
fcon2_drop = tf.nn.dropout(fcon2, keep_prob=0.9)
fcon2 = tf.cond(train_flag, lambda: fcon2_drop, lambda: fcon2)
# Layer 6: Fully Connected. Input = 84. Output = 3.
W6 = tf.Variable(tf.truncated_normal([84, 3], mean=mu, stddev=sigma), name='W6', trainable=True)
b6 = tf.Variable(tf.truncated_normal([3], mean=mu, stddev=sigma), name='b6', trainable=True)
logits = tf.add(tf.matmul(fcon2, W6), b6)
return logits
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, train_flag:False})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
################### Main Sequence ###################
#def make_model():
#input image
images_list = []
#colors
colors_list = []
#read result&image file
f = open('./result.csv', 'r')
reader = csv.reader(f)
for row in reader:
fileID = row[0]
colors_list.append(row[1])
img = cv2.imread('./img/' + str(fileID) + '.jpg')
images_list.append(img)
#shuffle <---Time Series Data??
random.seed(0)
random.shuffle(colors_list)
random.seed(0)
random.shuffle(images_list)
colors = np.array(colors_list)
images = np.array(images_list)
#split train, test, valid
data_size = len(colors_list)
train_rate = 0.8
validation_rate = 0.1
train_size = int(data_size * train_rate)
validation_size = int(data_size * validation_rate)
train_x = images[0:train_size]
train_y = colors[0:train_size]
validation_x = images[train_size:train_size+validation_size]
validation_y = colors[train_size:train_size+validation_size]
test_x = images[train_size+validation_size:]
test_y = colors[train_size+validation_size:]
print(images.shape)
print(len(colors))
print(train_x.shape)
print(len(train_y))
print(np.sum(train_y=='0'))
print(np.sum(train_y=='1'))
print(np.sum(train_y=='2'))
print(validation_x.shape)
print(len(validation_y))
print(np.sum(validation_y=='0'))
print(np.sum(validation_y=='1'))
print(np.sum(validation_y=='2'))
print(test_x.shape)
print(len(test_y))
print(np.sum(test_y=='0'))
print(np.sum(test_y=='1'))
print(np.sum(test_y=='2'))
##Flattening the number of examples per label
counter = Counter(train_y)
max_sample = max(counter.values())
for i in range(len(train_y)):
if i % 50 == 0:
print(i)
if counter[train_y[i]] < max_sample:
adding = int((max_sample - counter[train_y[i]]) / counter[train_y[i]]) + 1
for j in range(adding):
train_x = np.append( train_x, np.array([train_x[i]]), axis=0 )
train_y = np.append( train_y, train_y[i] )
print(train_x.shape)
print(len(train_y))
print(np.sum(train_y=='0'))
print(np.sum(train_y=='1'))
print(np.sum(train_y=='2'))
#Pre-process the data set(Global Contrast Normalization)
#for i in range(len(train_y)):
# train_x[i] = (train_x[i] - np.mean(train_x[i])) / np.std(train_x[i])
#for i in range(len(validation_y)):
# validation_x[i] = (validation_x[i] - np.mean(validation_x[i])) / np.std(validation_x[i])
#for i in range(len(test_y)):
# test_x[i] = (test_x[i] - np.mean(test_x[i])) / np.std(test_x[i])
print('\n------------------------------------------------')
print('-----------------Model Learning-----------------')
tf.reset_default_graph()
x = tf.placeholder(tf.float32, (None, 400, 300, 3))
y = tf.placeholder(tf.int32, (None))
train_flag = tf.placeholder(tf.bool)
one_hot_y = tf.one_hot(y, 3)
#learning rate
learning_rate = 0.0005
logits = tl_Model(x, train_flag)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(train_x)
print("Training...\n")
for i in range(EPOCHS):
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = train_x[offset:end], train_y[offset:end]
#Data Augmentation
batch_x_mod = np.zeros_like(batch_x)
for j in range(len(batch_x)):
batch_x_mod[j] = AffineTransImage( batch_x[j] )
sess.run(training_operation, feed_dict={x: batch_x_mod, y: batch_y, train_flag: True})
train_accuracy = evaluate(train_x, train_y)
validation_accuracy = evaluate(validation_x, validation_y)
print("EPOCH {} ...".format(i+1))
print("Train Accuracy = {:.3f}".format(train_accuracy))
print("Validation Accuracy = {:.3f}\n".format(validation_accuracy))
#stop at good validation accuracy model
if validation_accuracy > 0.95:
break
modelname = './tl_classifier_model'
vars_to_train = tf.trainable_variables()
saver = tf.train.Saver(vars_to_train)
saver.save(sess, modelname)
print("Model saved")
print()
print(one_hot_y)
| true |
3185df25189d22ab50940048133f595dacb7c0d9 | Python | thiago-allue/portfolio | /12_back_dev/FastAPI/FastAPI_Udemy/PythonRefresher/Functions Assignment/FunctionAssignment.py | UTF-8 | 470 | 4.15625 | 4 | [
"MIT"
] | permissive | """
Function Assignment
- Create a function that takes in 3 parameters(firstname, lastname, age) and
returns a dictionary based on those values
"""
def user_dictionary(firstname, lastname, age):
created_user_dictionary = {
"firstname": firstname,
"lastname": lastname,
"age": age
}
return created_user_dictionary
solution_dictionary = user_dictionary(firstname="Eric", lastname="Roby", age=32)
print(solution_dictionary)
| true |
ac425ba741e7e6434de79ec40decbe424d850033 | Python | HankDaly/arch | /town-design/20181104_geometry/test.py | UTF-8 | 4,855 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from geometry import Point2D, PointVec, Vector, VectorTwoPts, RectangleCornerPoint, RectangleEdgePoint, Phrase, Polyline, Rectangle, RectangleRelation, ReverseVector
from rectangleShortestPath import findShortestPath
def constructNormalRec(start_pt_lst, length, width):
start_pt = Point2D(start_pt_lst)
vec_lst = []
length_vec = Vector(length, 0.0)
width_vec = Vector(0.0, width)
vec_lst = [length_vec, width_vec, ReverseVector(length_vec), ReverseVector(width_vec)]
rec = Rectangle(vec_lst, start_pt)
#print("print(rec.vec_lst): ")
#for vec in rec.vec_lst:
# print(vec)
return rec
def constructNotNormalRecsSample1():
start_pt1, start_pt2 = Point2D([-2.989484, 2.030411]), Point2D([-0.040854, -9.81334])
vec_lst1 = [ Vector(4.523321,-1.382918), Vector(3.008505,9.840376), Vector(-4.523321,1.382918), Vector(-3.008505,-9.840376) ]
vec_lst2 = [Vector(8.19364, 4.667886), Vector(-2.098816, 3.684097), Vector(-8.19364, -4.667886), Vector(2.098816, -3.684097)]
rec1 = Rectangle(vec_lst1, start_pt1)
rec2 = Rectangle(vec_lst2, start_pt2)
return rec1, rec2
def constructRelation(rec1, rec2):
relation = RectangleRelation(rec1, rec2)
"""
print('rec1, rec2:')
print(relation.rec1, relation.rec2)
print('relation.cornerVisiable_dict: ')
print(relation.cornerVisiable_dict)
print('relation.cornerShortestPath_dict: ')
print(relation.cornerShortestPath_dict)
print('relation.corner_vec_dict: ')
print(relation.corner_vec_dict)
print('')
print('relation.isParallel: ')
print(relation.isParallel)
print('relation.gapClass: ')
print(relation.gapClass)
print('relation.gapDistance: ')
print(relation.gapDistance)
"""
"""
print('print(relation.cornerVisiable_dict): ')
print(relation.cornerVisiable_dict)
print('print(relation.cornerShortestPath_dict): ')
for corner1 in relation.cornerVisiable_dict:
for corner2_i in range(len(relation.cornerVisiable_dict[corner1])):
corner2 = relation.cornerVisiable_dict[corner1][corner2_i]
print(corner1, corner2)
path = relation.cornerShortestPath_dict[corner1][corner2_i]
print(path.vec_lst[0])
print(path.vec_lst[1])
"""
return relation
def testFindShortestPath(relation):
#edge_index1 = 0; length1 = 3.5
#edge_index2 = 1; length2 = 2.2
path = findShortestPath(relation, edge_index1=0, length1=0.0, edge_index2=0, length2=0.0)
#print('path: ')
#print(path)
print('path.keys: ')
print(path.__dict__.keys())
print('path.start_pt.x,y:')
print(path.start_pt.x, path.start_pt.y)
print('path.vec_lst: ')
for vec in path.vec_lst:
print(vec)
print('len = ' + str(path.length))
return path
def testRectangleCornerPoint():
# getRectangleCornerPath()向量方向对应出
rec1 = constructNormalRec([0.0, 4.0], 4.0, 3.0)
pt = RectangleCornerPoint(rec1, 0)
def testRectangleCornerPoint(rec, index, length):
pt = RectangleCornerPoint(rec, index)
print('index = ' + str(index))
print(pt.index_this, pt.index_before, pt.index_after, pt.index_cross)
pt_edge = RectangleEdgePoint(rec, index, length)
print('pt_edge: ')
print(pt_edge.index_this, pt_edge.index_before, pt_edge.index_after, pt_edge.index_cross)
def arrangeRectangleWithEdgePoly():
# 根据地块边界多段线布置建筑
# 按矩形序得到的矩形间的最短路
# num = 10
# rec_lst = [None] * num # 所有矩形列表
rec1 = constructNormalRec([0.0, 4.0], 4.0, 3.0)
rec2 = constructNormalRec([10.0, 0.0], 3.0, 5.0)
rec3 = constructNormalRec([0.0, -3.0], 3.3, 2.33)
rec4 = constructNormalRec([-1.5, -8.0], 4.4, 2.2)
rec_lst = [rec1, rec2, rec3, rec4]
edge_poly = None # 地块边界
path_lst = []
for i in range(len(rec_lst)-1):
rec1 = rec_lst[i]; rec2 = rec_lst[i+1]
relation = constructRelation(rec1, rec2)
path = findShortestPath(relation, edge_index1=0, length1=0.0, edge_index2=0, length2=0.0)
path_lst.append(path)
return path_lst
def testArrangeRectangleWithEdgePoly():
path_lst = arrangeRectangleWithEdgePoly()
print('path_lst: ')
print(path_lst)
def main():
# ZeroDivisionError: float division by zero
rec1 = constructNormalRec([0.0, 4.0], 4.0, 3.0)
rec2 = constructNormalRec([10.0, 0.0], 3.0, 5.0)
#rec1, rec2 = constructNotNormalRecsSample1()
relation = constructRelation(rec1, rec2)
#index = 3; length = 3
#testRectangleCornerPoint(rec1, index, length)
#testFindShortestPath(relation)
return
if __name__ == '__main__':
main()
#testRectangleCornerPoint()
#testArrangeRectangleWithEdgePoly()
| true |
5128d06ca06265ef7844cff55ce9f8124df8c4ba | Python | mathieuprn/Conception-of-a-risk-score-model-of-traffic-accidents-in-Chicago | /Data preparation/map_grid.py | UTF-8 | 3,934 | 3.046875 | 3 | [] | no_license | import numpy as np
from config import Config
class MapGrid:
"""
This class stores the rotation logic.
If ll, ul, lr and ur are input the city boundaries dict is not used for construction.
"""
def __init__(self, city, city_boundaries, x_grid_count, y_grid_count, ll=None, ul=None, lr=None, ur=None):
self.city = city
self.x_grid_count = x_grid_count
self.y_grid_count = y_grid_count
if city:
self.ll = np.matrix(city_boundaries[city]['ll'])
self.lr = np.matrix(city_boundaries[city]['lr'])
self.ul = np.matrix(city_boundaries[city]['ul'])
self.ur = np.matrix(city_boundaries[city]['ur'])
else:
self.ll = np.matrix(ll)
self.lr = np.matrix(lr)
self.ul = np.matrix(ul)
self.ur = np.matrix(ur)
self.centerx = (self.lr[0,0] + self.ul[0,0]) / 2
self.centery = (self.lr[0,1] + self.ul[0,1]) / 2
self.ll_c = self.ll - np.matrix([[self.centerx, self.centery]])
self.ul_c = self.ul - np.matrix([[self.centerx, self.centery]])
self.ur_c = self.ur - np.matrix([[self.centerx, self.centery]])
self.lr_c = self.lr - np.matrix([[self.centerx, self.centery]])
self.theta = np.arctan(abs(self.ll_c[0,1] - self.lr_c[0,1]) / abs(self.ll_c[0,0] - self.lr_c[0,0]))
self.cos_theta = np.cos(self.theta)
self.sin_theta = np.sin(self.theta)
self.rot_matrix = np.matrix([[self.cos_theta, -self.sin_theta],
[self.sin_theta, self.cos_theta]])
self.reverse_rot_matrix = np.matrix([[self.cos_theta, self.sin_theta],
[-self.sin_theta, self.cos_theta]])
self.ur_norm = self.rot_matrix @ self.ur_c.T
self.ul_norm = self.rot_matrix @ self.ul_c.T
self.ll_norm = self.rot_matrix @ self.ll_c.T
self.lr_norm = self.rot_matrix @ self.lr_c.T
#self.ul_norm[0,0], self.ll_norm[0,0] = -0.034, -0.034
#self.lr_norm[0,0], self.ur_norm[0,0] = 0.034, 0.034
self.x_dist = 2 * (self.lr_norm[0, 0])
self.y_dist = 2 * (self.ul_norm[1, 0])
def get_grid(self, lon, lat):
"""
:param lon: longitude - float
:param lat: latitude - float
:return: locations in the grid for the lon lat pair as non-negative integers. (xgrid, ygrid), if grid not within
scope returns None.
"""
lon_norm = (lon - self.centerx) * self.rot_matrix[0, 0] + (lat - self.centery) * self.rot_matrix[0, 1]
lat_norm = (lon - self.centerx) * self.rot_matrix[1, 0] + (lat - self.centery) * self.rot_matrix[1, 1]
gridx = np.floor(((lon_norm - self.ll_norm[0,0]) / self.x_dist) * self.x_grid_count)
gridy = np.floor(((lat_norm - self.ll_norm[1,0]) / self.y_dist) * self.y_grid_count)
if gridx < 0 or gridx >= self.x_grid_count or gridy < 0 or gridy >= self.y_grid_count:
return None
return (gridx, gridy)
def get_grid_center(self, gridx, gridy):
"""
:param gridx: (0 - (x_grid_count -1))
:param gridy: (0 - (y_grid_count -1))
:return: (lat, lon)
"""
lon_norm = (gridx + 0.5) * self.x_dist / self.x_grid_count - self.x_dist / 2
lat_norm = (gridy + 0.5) * self.y_dist / self.y_grid_count - self.y_dist / 2
coord = np.matrix([[lon_norm, lat_norm]]) @ self.rot_matrix + np.matrix([[ self.centerx, self.centery]])
return (coord[0, 0], coord[0, 1])
def main():
m = MapGrid(Config.city, Config.city_boundaries, Config.mapsize, Config.mapsize)
print(m.get_grid_center(0, 0))
print(m.get_grid(-87.732081736, 41.9536468999999))
#print(m.theta)
#print(m.cos_theta)
#print(m.rot_matrix)
if __name__ == '__main__':
main() | true |
61624a24956b0b7dfdcfec5d60189163d278b438 | Python | jacksparling1/JackSparling-Python | /Unit2ALab/Unit2ALab.py | UTF-8 | 216 | 3.171875 | 3 | [] | no_license | #print (int(5) + 4.5)
#print (int(5) + 5)
#answer1 = input ( )
#print (int(answer1) + 5)
i = 100
j = 200
x = 300
y = 100
print (i==j or i>x)
print (i<j) and (i>=y)
print (i==x) and (i==y)
print (i<j and i<x and i<y)
| true |
88921e695cf46a7fddde8483d87fac76de987403 | Python | SangeCoder/MyMediaLite_Python | /Evaluate.py | UTF-8 | 3,807 | 2.890625 | 3 | [] | no_license | __author__ = 'jake221'
import numpy as np
import CalcPreRec
import CalcAuc
import CalcNdcg
import CalcMrr
import time
class Evaluate():
'''
evaluate the effectiveness of the recommendation model using eight different information retrieval metrics
'''
def __init__(self,user_vecs,item_vecs,train_matrix,test_matrix,test_users,candidate_items):
self.user_vecs = user_vecs
self.item_vecs = item_vecs
self.train_matrix = train_matrix
self.test_matrix = test_matrix
self.test_users = test_users
self.candidate_items = candidate_items
def CalcMetrics(self):
'''
:return: eight metrics
'''
num_users = 0
ret = np.zeros((8,1))
user_num = len(self.test_users)
precision = np.zeros((user_num,2))
recall = np.zeros((user_num,2))
map = np.zeros((user_num,1))
auc = np.zeros((user_num,1))
ndcg = np.zeros((user_num,1))
mrr = np.zeros((user_num,1))
AtN = [5,10]
t0 = time.time()
print 'Start evaluating...'
for i in xrange(user_num):
user_id = self.test_users[i]
# print 'user_id',user_id
# find items that user has rated in the test set
test_nonzero_idx = self.test_matrix[user_id,:].nonzero()
test_items_idx = test_nonzero_idx[0]
correct_items = np.intersect1d(test_items_idx,self.candidate_items)
# find items that user has rated in the train set
train_nonzero_idx = self.train_matrix[user_id,:].nonzero()
# print 'train_nonzero_idx',train_nonzero_idx
train_items_idx = train_nonzero_idx[0]
# print 'self.candidate_items',self.candidate_items
candidate_items_in_train = np.intersect1d(train_items_idx,self.candidate_items)
num_eval_items = self.candidate_items.size - candidate_items_in_train.size
# if user has not rated any items in test set or all items in test set are relevant then continue
if correct_items.size == 0 | num_eval_items - correct_items.size == 0:
continue
# generate a item recommendation list for user_id
recommendation_list = self.GenerateLists(self.user_vecs,self.item_vecs,user_id,self.candidate_items)
ignore_items = train_items_idx
precision[i,:],recall[i,:],map[i] = CalcPreRec.PrecisionAndRecall(recommendation_list, correct_items, ignore_items, AtN)
auc[i] = CalcAuc.AUC(recommendation_list, correct_items, ignore_items)
ndcg[i] = CalcNdcg.NDCG(recommendation_list, correct_items, ignore_items)
mrr[i] = CalcMrr.MRR(recommendation_list, correct_items, ignore_items)
num_users = num_users + 1
t1 = time.time()
print 'Evaluation finished in %f seconds' % (t1 - t0)
ret[0] = sum(auc) / (num_users * 1.0)
ret[1] = sum(precision[:,0]) / (num_users * 1.0)
ret[2] = sum(precision[:,1]) / (num_users * 1.0)
ret[3] = sum(map) / (num_users * 1.0)
ret[4] = sum(recall[:,0]) / (num_users * 1.0)
ret[5] = sum(recall[:,1]) / (num_users * 1.0)
ret[6] = sum(ndcg) / (num_users * 1.0)
ret[7] = sum(mrr) / (num_users * 1.0)
return ret
def GenerateLists(self,user_vecs,item_vecs,user_id,candidate_items):
predict_list = np.zeros((candidate_items.size,1))
for i in range(candidate_items.size):
predict_list[i] = np.dot(user_vecs[user_id,:], item_vecs[candidate_items[i],:])
list_asc = np.argsort(predict_list,axis=0)
sorted_list = list_asc[::-1]
return sorted_list | true |
a90702f464ed154d0bbb6e0fb5febe6d22b0a0df | Python | saurabhthesuperhero/Python_Dicegame | /Dicegame.py | UTF-8 | 999 | 3.609375 | 4 | [] | no_license | from Dicegame_Functions import *
init = 0
initDice = 0
while True:
#initialisatie
if init == 0:
Qroll = Initialisatie()
init = 1
#Quit doublecheck:
if Qroll == "n" or Qroll == "N":
Qroll = QuitCheck()
if Qroll == "y":
break
elif Qroll == "n" or "N":
init = 0
#playing the 'game'
elif Qroll == "y" or Qroll == "Y":
if initDice == 0:
DiceNumber = initiateDice()
DiceSides = initiateDiceSides()
initDice = 1
DiceRoll = roll(DiceNumber, DiceSides) #rolls the dice and saves the roll
DiceDupes = dupelist(DiceRoll, DiceNumber, DiceSides)#list of the number of times a number has been rolled
print ("your Diceroll is:" + str(DiceRoll))
print ("Number of times you got a number(in order):" + str(DiceDupes))
Qroll = input("Play again? (y/n): ")
else:
print ("please enter either 'y' or 'n' (or 'Y'/'N')")
init = 0
| true |
c0191ae2bdffe92d552e2f8bac957daafe92ca8d | Python | sacko87/h4x0r | /exploit-exercises.com/protostar/net3.py | UTF-8 | 717 | 2.890625 | 3 | [] | no_license | #!/usr/bin/python
import sys
import struct
import socket
# create a socket to connect to the service
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# connect the the service
client.connect((sys.argv[1], 2996))
# send the attack
# > 2 bytes is the length of the packet
client.send('\x00\x1f')
# > 1 byte to dictate the command
client.send('\x17')
# > three null-terminated elements preceeded
# with their length using one byte.
# > resource: 'net3'
client.send('\x05net3\x00')
# > username: 'awesomesauce'
client.send('\x0dawesomesauce\x00')
# > password: 'password'
client.send('\x09password\x00')
# get the response
result = client.recv(256)
print result[2:]
# finally close
client.close()
| true |
77fa1460407a604abaeb50cc0adace70500f3f39 | Python | MIT-6819-team/TF_colorization | /preprocessing/create_dataset_saturation_index.py | UTF-8 | 1,496 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import glob2, itertools, cv2, sys, ujson, gzip
import numpy as np
from joblib import Parallel, delayed
# Winter Guerra <winterg@mit.edu> Nov. 27 2016
# Make sure that the user gave us enough arguments
assert len(sys.argv) == 3, "Not enough arguments. EX. ./<script>.py /path/to/trainingset/ .JPEG"
dataset_location = sys.argv[1]
file_extension = sys.argv[2]
num_jobs=4
print("Looking for files of type", file_extension, "from location", dataset_location)
def get_saturation(f):
# Load image
img = cv2.imread(f)
# Convert BGR to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Average the S layer
S_avg = np.average(hsv[:,:,1])/255.0 # 255 since that is the bitdepth
return S_avg
# Get list of files
filenames = glob2.iglob(dataset_location + "**/*" + file_extension)
# DEBUG
#filenames = itertools.islice(filenames, 5)
def worker_func(f):
# get saturation values for all files
saturation = get_saturation(f)
# Get filename of file relative to the dataset root
relative_filename = f[len(dataset_location):]
return (relative_filename, saturation)
# iterate in parallel and remove said files
results = Parallel(n_jobs=num_jobs, verbose=5, backend='threading')(delayed(worker_func)(f) for f in filenames)
# Turn the results into a dictionary
file_dict = dict(results)
# Now, print the results to a binary json file
with gzip.open('./saturation_index.json.gz', 'wt') as f:
ujson.dump(file_dict, f, double_precision=4)
#print(list(itertools.islice(filenames, 5)))
| true |
e15a636db94c9976c4e4ce5aa6c39c9eb6b762d7 | Python | FalconMadhab/deit_recommender | /database.py | UTF-8 | 3,900 | 2.734375 | 3 | [] | no_license | '''
Functions related to querying food database sr_legacy.db
'''
import os
import sys
import sqlite3 as sql
from constants import SEARCH_RESTRICT
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(
os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def search_food(food_name, type_res, fd_res):
con = sql.connect(resource_path('sr_legacy/sr_legacy.db'))
cur = con.cursor()
# Split search term for multi-word searches eg, 'chocolate milk'
split_food_names = food_name.split()
wildcard_padded_split_food_names = ['%' + name + '%' for name in split_food_names]
# Return entire list of foods if no search term supplied
if len(split_food_names) == 0:
sql_stmt = (
'SELECT food_group_id, long_desc FROM food_des '
'ORDER BY food_group_id '
)
parameters = []
fd_grps, fd_grps_tuple = get_fd_grps(type_res, fd_res)
# Otherwise, return filtered search result
# We need to call UPPER because INSTR is not case-sensitive.
# Order by the sum of how early each term appears in the search result strings.
if len(split_food_names) > 0:
sql_stmt = (
'SELECT food_group_id, long_desc FROM food_des '
'WHERE long_desc LIKE ? '
+ (len(wildcard_padded_split_food_names)-1) * ' AND long_desc LIKE ?' +
'AND food_group_id not in ' + fd_grps_tuple +
' ORDER BY INSTR(UPPER(long_desc), UPPER(?))'
+ (len(split_food_names)-1) * ' + INSTR(UPPER(long_desc), UPPER(?))' + ' ASC'
)
parameters = wildcard_padded_split_food_names + fd_grps + split_food_names
cur.execute(sql_stmt, parameters)
return cur.fetchall()
# Create string of the form (?,?,?) where number of ? == number of restricted food groups
def get_fd_grps(type_res, fd_res):
if SEARCH_RESTRICT not in type_res.res:
fd_grps_tuple = '()'
fd_grps = []
else:
fd_grps = list(fd_res.res)
if len(fd_res.res) == 1:
fd_grps_tuple = '(?)'
elif len(fd_res.res) > 1:
fd_grps_tuple = str(tuple(q_mark for q_mark in len(fd_grps)*'?'))
fd_grps_tuple = fd_grps_tuple.replace("'", "")
return fd_grps, fd_grps_tuple
def get_nutrition_units(nut_ids):
con = sql.connect(resource_path('sr_legacy/sr_legacy.db'))
cur = con.cursor()
sql_stmt = (
'SELECT units '
'FROM nutr_def '
'WHERE id = ?'
)
units = []
for nut_id in nut_ids:
cur.execute(sql_stmt, [nut_id])
units.append(cur.fetchall()[0])
cur.close()
con.commit()
return units
def describe_food(food_id):
con=sql.connect(resource_path('sr_legacy/sr_legacy.db'))
cur=con.cursor()
cur.execute("SELECT amount FROM nut_data where food_id = ?", [food_id])
nut_values=cur.fetchall()
for i in range(len(nut_values)):
nut_values[i]=nut_values[i][0]
return nut_values
def get_food_id(food_name):
con=sql.connect(resource_path('sr_legacy/sr_legacy.db'))
cur=con.cursor()
cur.execute(
"SELECT id FROM food_des where long_desc = ?", [food_name])
food_id=cur.fetchall()
for i in range(len(food_id)):
food_id=food_id[i][0]
return food_id
def get_food_name(food_id):
con=sql.connect(resource_path('sr_legacy/sr_legacy.db'))
cur=con.cursor()
cur.execute(
"SELECT long_desc FROM food_des where id = ?", [food_id])
food_name=cur.fetchall()
for i in range(len(food_name)):
food_name=food_name[i][0]
return food_name
if __name__ == '__main__':
import spartan
type_res = spartan.Restriction('type_res.csv')
fd_res = spartan.Restriction('fd_res.csv')
result = search_food('', type_res, fd_res)
| true |
8ccc8347fe445c237dad562b18aa1bdddcf4a137 | Python | miniguiti/basic-python | /Dicionarios.py | UTF-8 | 501 | 3.78125 | 4 | [] | no_license | #Dicionarios não possuem indice, ou seja, é idetificado por sua chave
meu_dicionario = {"A":"AMEIXA", "B": "BOLA", "C": "CACHORRO"}
#Exibindo por chave
print(meu_dicionario["A"])
#Exibindo
for chave in meu_dicionario:
print(chave + ":" + meu_dicionario[chave])
# EXIBIBINDO COM FUNÇÕES:
#Exibe chaves
for chave in meu_dicionario.keys():
print(i)
#Exibe tupla (chave e valores)
for chave in meu_dicionario.items():
print(i)
#Exibe valores
for chave in meu_dicionario.values():
print(i) | true |
458e791aea7d190288ac5a8cebf0808be5fdd7b6 | Python | GAYATHRI-2003/Quiz-app | /quiz app.py | UTF-8 | 24,840 | 3.3125 | 3 | [] | no_license | p=0
q=0
r=0
flag =0
def score(a,b):
global p
global q
global r
if (a==1):
if (b==1):
p=p+2
else:
p=p-1
elif (a==2):
if (b==1):
q=q+6
else:
q=q-3
elif (a==3):
if (b==1):
r=r+10
else:
r=r-5
def display():
global p
global q
global r
global flag
flag=1
print('---------------------------------------------')
print('---------------------------------------------')
print(' SCORE BOARD')
print('---------------------------------------------')
print('YOUR SCORE IN LEVEL 1:',p)
print('YOUR SCORE IN LEVEL 2:',q)
print('YOUR SCORE IN LEVEL 3:',r)
print('---------------------------------------------')
print('TOTAL SCORE :',p+q+r)
print('---------------------------------------------')
print('---------------------------------------------')
def sports():
print('\nLEVEL 1!!')
print('\nQUIZ :1')
print("How many number of rings are present in the olympic flag?")
print("A. 4 \nB. 6 \nC. 5 \nD. 7")
ans=input("Enter your answer:").lower()
if ans == 'c. 5' or ans == 'c' or ans == '5' or ans == 'c.5':
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("The correct answer is: C. 5")
score(1,0)
print('\nQUIZ :2')
print("Who holds the record of being the first Indian in history to win an Asian gold medal in javelin throw(men's event)?")
print("A. Neeraj Chopra \nB. Shivpal Singh \nC. Davinder singh Kang \nD. Gurtej Singh")
ans=input("Enter your answer with space:").lower()
if ans == 'a. neeraj chopra' or ans == 'a' or ans == 'neeraj chopra' or ans == 'a.neeraj chopra' :
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("The correct answer is: A. Neeraj chopra")
score(1,0)
print('\nQUIZ :3')
print(" What is the nickname of Sachin Tendulkar?")
print("A. The Little Genius \nB. The Little Master\nC. The God of Cricket\nD. Super Star of Cricket")
ans=input("Enter your answer with space:").lower()
if ans == 'b.the little master' or ans == 'b' or ans == 'the little master' or ans == 'b. the little master' :
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("The correct answer is: B. The Little Master")
score(1,0)
print('\nQUIZ :4')
print("How many players are on a baseball team?")
print("A. 10 \nB. 8 \nC. 9 \nD. 11")
ans=input("Enter your answer:").lower()
if ans == 'c.9' or ans == 'c' or ans == '9' or ans == 'c. 9' :
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("The correct answer is: C. 9")
score(1,0)
print('\nQUIZ :5')
print("What does NBA stands for?")
print("A. National Basketball Assembly \nB. Nation's Basketball Association \nC. Nation's Basketball Assembly \nD. National Basketball Association")
ans=input("Enter your answer:").lower()
if ans == 'd. national basketball association' or ans == 'd' or ans == 'national basketball association ' or ans == 'd.national basketball association':
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("The correct answer is: D. National Basketball Association")
score(1,0)
print("\nDo you want to go next level!!")
print("\nEnter 'YES' to go next level or 'NO' to exit:")
opt=input("Enter your choice:").lower()
if opt == 'yes':
print('\nLEVEl 2!!')
print('\nQUIZ :1')
print("Who is the first batsman to cross 10,000 runs in tests ?")
print("A. Sunil Gavaskar \nB. Sachin Tendulkar\nC. Allan Border\nD. Brian Lara")
ans=input("Enter your answer with space:"). lower()
if ans == 'a.sunil gavaskar' or ans == 'a' or ans == 'sunil gavaskar' or ans == 'a. sunil gavaskar' :
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("The correct answer is: A. Sunil Gavaskar")
score(2,0)
print('\nQUIZ :2')
print("What sport is dubbed the ‘king of sports’ ?")
print("A. Kabadi \nB. Soccer\nC. Hockey \nD. Badmitton")
ans=input("Enter your answer:"). lower()
if ans == 'b.soccer' or ans == 'b' or ans == 'soccer' or ans == 'b. soccer':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("The correct answer is: B. Soccer")
score(2,0)
print('\nQUIZ :3')
print("Who won the first ever Cricket World Cup in 1975 ?")
print("A. Australia\nB. England \nC. India\nD.West Indies")
ans=input("Enter your answer with space:"). lower()
if ans == 'd. west indies' or ans == 'd' or ans == 'west indies' or ans == 'd.west indies':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("The correct answer is: D. West Indies")
score(2,0)
print('\nQUIZ :4')
print(" Who set a new record for most number of sixes by an individual in an ODI innings?")
print("A. Virat Kholi\nB. Sachin Tendulkar\nC. Eoin Morgan\nD. Chris Gayle")
ans=input("Enter your answer with space:"). lower()
if ans == 'c. eoin morgan' or ans == 'c' or ans == 'eoin morgan' or ans == 'c.eoin morgan':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("The correct answer is: C. Eoin Morgan")
score(2,0)
print('\nQUIZ :5')
print(" In Kho-Kho , the players occupying the squares are known as?")
print("A. Lobby\nB. Raiders\nC. Chasers\nD. Chukke")
ans=input("Enter your answer with space:"). lower()
if ans == 'c. chasers' or ans == 'c' or ans == 'chasers' or ans == 'c.chasers':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("The correct answer is: C. Chasers")
score(2,0)
else:
display()
global flag
if flag ==0:
print("\nDo you want to go next level!")
print("\nEnter 'YES' to go next level or 'NO' to exit:")
opt=input("Enter your choice:").lower()
if opt == 'yes':
print('\nLEVEl 3!!')
print('\nQUIZ :1')
print("Who bowled the fastest delivery ever of 100.2mph?")
print("A. Brett Lee\nB. Shoaib Akhtar\nC. Shaun Tait\nD. Jeffrey Thompson")
ans=input("Enter your answer with space:"). lower()
if ans == 'b. shoaib akhtar' or ans == 'b' or ans == 'shoaib akhtar' or ans == 'b.shoaib akhtar ' :
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("The correct answer is: B. Shoaib Akhtar")
score(3,0)
print('\nQUIZ :2')
print("What country has competed the most times in the summer Olympics without winning any medal at all?")
print("A. Liechtenstein\nB. South Africa\nC. Philipiness\n D. Srilanka")
ans=input("Enter your answer:"). lower()
if ans == 'a. liechtenstein' or ans == 'a' or ans == 'liechtenstein' or ans == 'a.liechtenstein':
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("The correct answer is: A. Liechtenstein")
score(3,0)
print('\nQUIZ :3')
print("Which of the following Indian Sports Team is also known as “The Bhangra Boys?")
print("A. Volley Ball\nB. Basket ball \nC. Cricket\nD. Foot Ball")
ans=input("Enter your answer with space:"). lower()
if ans == 'd. foot ball' or ans == 'd' or ans == 'foot ball' or ans == 'd.foot ball':
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("The correct answer is: D. Foot Ball")
score(3,0)
print('\nQUIZ :4')
print("The World Military Cup organized by the International Military Sports Council (CISM) involves which among the following sports?")
print("A. Volley Ball \nB. Cricket\nC. Foot Ball\nD. Basket Ball")
ans=input("Enter your answer with space:"). lower()
if ans == 'c. foot ball' or ans == 'c' or ans == 'foot ball' or ans == 'c.foot ball':
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("The correct answer is: C. Foot Ball")
score(3,0)
print('\nQUIZ :5')
print("The ”Marquess of Queensberry rules” is a code of generally accepted rules in which of the following sports?")
print(" A. Chess\nB. Boxing\n C. Hockey\n D. Tennis")
ans=input("Enter your answer with space:"). lower()
if ans == 'b.boxing' or ans == 'b' or ans == 'boxing' or ans == 'b. boxing':
print("Correct answer!!")
score(3,1)
print("Congratulations !! You completed all the questions!!")
display()
else:
print("Wrong answer!!")
print("The correct answer is: B. Boxing")
score(3,0)
print("Congratulations !! You completed all the questions!!")
display()
else:
print("Congratulations !! You completed all the questions!!")
display()
def general():
print("\nLEVEL 1!!")
print("\nQuiz:1")
print("Who was the first Indian Women in space?")
print("A. Kalpana Chawla\nB. Sunitha Williams\nC. Koneru Humpy\nD. None of the above")
print("Enter your Answer")
answer=input().lower()
if(answer=="a" or answer=="a. kalpana chawla" or answer=="a.kalpana chawla" or answer=="kalpana chawla"):
print("correct answer")
score(1,1)
else:
print("wrong answer")
print("Correct answer: A. Kalpana Chawla")
score(1,0)
print("\nQuiz:2")
print("Who was the first Man to Climb Mount Everest Without Oxygen?")
print("A. Junko Tabei\nB. Reinhold Messner\nC. Peter Habeler\nD. Phu Dorji")
print("Enter your Answer")
answer=input().lower()
if(answer=="d" or answer=="d. phu dorji" or answer=="d.phu dorji" or answer=="phu dorji"):
print("correct answer")
score(1,1)
else:
print("wrong answer")
print("Correct answer: D. Phu Dorji")
score(1,0)
print("\nQuiz:3")
print("Who wrote the Indian National Anthem?")
print("A. Bakim Chandra Chatterji\nB. Rabindranath Tagore\nC. Swami Vivekanand\nD. None of the above")
print("Enter your Answer")
answer=input().lower()
if(answer=="b" or answer=="b. rabindranath tagore" or answer=="b.rabindranath tagore" or answer=="rabindranath tagore"):
print("correct answer")
score(1,1)
else:
print("wrong answer")
print("Correct answer: B. Rabindranath Tagore")
score(1,0)
print("\nQuiz:4")
print("Who was the first Indian Scientist to win a Nobel Prize?")
print("A. CV Raman\nB. Amartya Sen\nC. Hargobind Khorana\nD. Subramanian Chrandrashekar")
print("Enter your Answer")
answer=input().lower()
if(answer=="a" or answer=="a. cv raman" or answer=="a.cv raman" or answer=="cv raman"):
print("correct answer")
score(1,1)
else:
print("wrong answer")
print("Correct answer: A. CV Raman")
score(1,0)
print("\nQuiz:5")
print("Who was the first Indian to win the Booker Prize?")
print("A. Dhan Gopal Mukerji\nB. Nirad C. Chaudhuri\nC. Arundhati Roy\nD. Aravind Adiga")
print("Enter your Answer")
answer=input().lower()
if(answer=="c" or answer=="c. arundhati roy" or answer=="c.arundhati roy" or answer=="arundhati roy"):
print("correct answer")
score(1,1)
else:
print("wrong answer")
print("Correct answer: C. Arundhati Roy")
score(1,0)
print("\nDo you want to go next level!!")
print("\nEnter 'YES' to go next level or 'NO' to exit:")
opt=input("Enter your choice:").lower()
if opt == 'yes':
print('\nLEVEl 2!!')
print("\nQuiz:1")
print("How many string does a bass guitar usually have?")
print("A. four\nB. five\nC. three\nD. six")
print("Enter your Answer")
answer=input().lower()
if(answer=="a" or answer=="a. four" or answer=="a.four" or answer=="four"):
print("correct answer")
score(2,1)
else:
print("wrong answer")
print("Correct answer: A. four")
score(2,0)
print("\nQuiz:2")
print("Which continent has the biggest population?")
print("A. Africa\nB. Europe\nC. Asia\nD. Antarctica")
print("Enter your Answer")
answer=input().lower()
if(answer=="c" or answer=="c. asia" or answer=="c.asia" or answer=="asia"):
print("correct answer")
score(2,1)
else:
print("wrong answer")
print("Correct answer: C. Asia")
score(2,0)
print("\nQuiz:3")
print("Who developed the theory of relativity?")
print("A. Issac Newton\nB. Albert Einstein\nC. Charles Darwin\nD. Marie Curie")
print("Enter your Answer")
answer=input().lower()
if(answer=="b" or answer=="b. albert einstein" or answer=="b.albert einstein" or answer=="albert einstein"):
print("correct answer")
score(2,1)
else:
print("wrong answer")
print("Correct answer: B. Albert Einstein")
score(2,0)
print("\nQuiz:4")
print("Blood is filtered by which pair of organs?")
print("A. Liver\nB. Kidneys\nC. Heart\nD. Lungs")
print("Enter your Answer")
answer=input().lower()
if(answer=="b" or answer=="b. kidneys" or answer=="b.kidneys" or answer=="kidneys"):
print("correct answer")
score(2,1)
else:
print("wrong answer")
print("Correct answer: B. Kidneys")
score(2,0)
print("\nQuiz:5")
print("In which year World Trade Organisation came into existence?")
print("A. 1992\nB. 1993\nC. 1994\nD. 1995")
print("Enter your Answer")
answer=input().lower()
if(answer=="d" or answer=="d. 1995" or answer=="d.1995" or answer=="1995"):
print("correct answer")
score(2,1)
else:
print("wrong answer")
print("Correct answer: D. 1995")
score(2,0)
else:
display()
global flag
if flag == 0:
print("\nDo you want to go next level!")
print("\nEnter 'YES' to go next level or 'NO' to exit:")
opt=input("Enter your choice:").lower()
if opt == 'yes':
print('\nLEVEl 3!!')
print("\nQuiz:1")
print("Which of the following personalities gave ‘The Laws of Heredity’?")
print("(A) Robert Hook\n(B) G.J. Mendel\n(C) Charles Darwin\n(D) William Harvey")
print("Enter your Answer")
answer=input().lower()
if(answer=="b" or answer=="b. g.j.mendel" or answer=="b.g.j.mendel" or answer=="g.j.mendel"):
print("correct answer")
score(3,1)
else:
print("wrong answer")
print("Correct answer: (B) G.J. Mendel")
score(3,0)
print("\nQuiz:2")
print("Who created a famous Geet Govind?")
print("(A) Bana Bhatt\n(B) Kalidas\n(C) Jayadev\n(D) Bharat Muni")
print("Enter your Answer")
answer=input().lower()
if(answer=="c" or answer=="c. jayadev" or answer=="c.jayadev" or answer=="jayadev"):
print("correct answer")
score(3,1)
else:
print("wrong answer")
print("Correct answer: (C) Jayadev")
score(3,0)
print("\nQuiz:3")
print("Which of the following represents the Finance Commissions that have been set-up so far?")
print("(A) 10\n(B) 11\n(C) 12\n(D) 13")
print("Enter your Answer")
answer=input().lower()
if(answer=="d" or answer=="d. 13" or answer=="d.13" or answer=="13"):
print("correct answer")
score(3,1)
else:
print("wrong answer")
print("Correct answer: (D) 13")
score(3,0)
print("\nQuiz:4")
print("Which of the following is the largest and the deepest ocean of the world?")
print("(A) Arctic\n(B) Atlantic\n(C) Pacific\n(D) Indian")
print("Enter your Answer")
answer=input().lower()
if(answer=="c" or answer=="c. pacific" or answer=="c.pacific" or answer=="pacific"):
print("correct answer")
score(3,1)
else:
print("wrong answer")
print("Correct answer: (C) Pacific")
score(3,0)
print("\nQuiz:5")
print("Which Mughal ruler was called 'Alamgir'?")
print("(A) Aurangzeb\n(B) Jahangir\n(C) Akbar\n(D) Shah Jahan")
print("Enter your Answer")
answer=input().lower()
if(answer=="a" or answer=="a. aurangzeb" or answer=="a.aurangzeb" or answer=="aurangzeb"):
print("correct answer")
score(3,1)
print("Congratulations !! You completed all the questions!!")
display()
else:
print("wrong answer")
print("Correct answer: (A) Aurangzeb")
score(3,0)
print("Congratulations !! You completed all the questions!!")
display()
else:
print("\nCongratulations !! You completed all the questions!!")
display()
def technology():
print('\nLEVEL 1!!')
print('\nQUIZ :1')
print("OS computer abbreviation usually means ?")
print("A.Order of Significance \nB. Open Software \nC. Operating System \nD. Optical Sensor")
ans=input("Enter your answer:").lower()
if ans == 'c.Operating System' or ans == 'c' or ans == 'Operating System' or ans == 'c.Operating System':
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("correct answer: c.Operating System")
score(1,0)
print('\nQUIZ :2')
print("MOV extension refers usually to what kind of file?")
print("A. Image file \nB. Animation/movie file \nC. Audio file \nD. MS Office document ")
ans=input("Enter your answer with space:").lower()
if ans == 'b. Animation/movie file' or ans == 'b' or ans == 'Animation/movie file' or ans == 'b.Animation/movie file' :
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("correct answer: b.Animation/movie file")
score(1,0)
print('\nQUIZ :3')
print("What is part of a database that holds only one type of information?")
print("A. Report \nB. Field \nC. Record \nD. File")
ans=input("Enter your answer with space:").lower()
if ans == 'b.Field' or ans == 'b' or ans == 'Field' or ans == 'b. Field' :
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("correct answer: b. Field")
score(1,0)
print('\nQUIZ :4')
print("Most modern TV's draw power even if turned off. The circuit the power is used in does what function?")
print("A. Sound \nB. Remote control \nC. Color balance \nD. High voltage")
ans=input("Enter your answer:").lower()
if ans == 'b.Remote control' or ans == 'b' or ans == 'Remote control' or ans == 'b. Remote control' :
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("correct answer: b. Remote control")
score(1,0)
print('\nQUIZ :5')
print("Which is a type of Electrically-Erasable Programmable Read-Only Memory?")
print("A. Flash \nB. Flange \nC. Fury \nD. FRAM")
ans=input("Enter your answer:").lower()
if ans == 'a. Flash' or ans == 'a' or ans == 'Flash ' or ans == 'a.Flash':
print("Correct answer!!")
score(1,1)
else:
print("Wrong answer!!")
print("correct answer: a.Flash")
score(1,0)
print("\nDo you want to go next level!!")
print("\nEnter 'YES' to go next level or 'NO' to exit:")
opt=input("Enter your choice:").lower()
if opt == 'yes':
print('\nLEVEl 2!!')
print('\nQUIZ :1')
print("In which decade was the SPICE simulator introduced?")
print("A. 1950s \nB. 1960s \nC. 1970s\nD. 1980s")
ans=input("Enter your answer with space:"). lower()
if ans == 'c. 1970s' or ans == 'c' or ans == '1970s' or ans == 'c. 1970s' :
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("correct answer: c.1970s")
score(2,0)
print('\nQUIZ :2')
print("The purpose of choke in tube light is ?")
print("A. To decrease the current \nB. To increase the current \nC. To decrease the voltage momentarily \nD. To increase the voltage momentarily")
ans=input("Enter your answer:"). lower()
if ans == 'd. To increase the voltage momentarily' or ans == 'd' or ans == 'To increase the voltage momentarily' or ans == 'd. To increase the voltage momentarily':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("correct answer: d. To increase the voltage momentarily")
score(2,0)
print('\nQUIZ :3')
print("In the United States the television broadcast standard is...?")
print("A. PAL \nB. NTSC \nC. SECAM \nD. RGB")
ans=input("Enter your answer with space:"). lower()
if ans == 'b. NTSC' or ans == 'b' or ans == 'NTSC' or ans == 'b.NTSC':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("correct answer: b.NTSC")
score(2,0)
print('\nQUIZ :4')
print("What do we call a collection of two or more computers that are located within a limited distance of each other and that are connected to each other directly or indirectly?")
print("A. Inernet \nB. Interanet \nC. Local Area Network \nD. Wide Area Network")
ans=input("Enter your answer with space:"). lower()
if ans == 'c. Local Area Network' or ans == 'c' or ans == 'Local Area Network' or ans == 'c.Local Area Network':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("correct answer: c.Local Area Network")
score(2,0)
print('\nQUIZ :5')
print("Sometimes computers and cache registers in a foodmart are connected to a UPS system. What does UPS mean?")
print("A. United Parcel Service \nB. Uniform Product Support \nC. Under Paneling Storage \nD. Uninterruptable Power Supply")
ans=input("Enter your answer with space:"). lower()
if ans == 'd. Uninterruptable Power Supply' or ans == 'd' or ans == 'Uninterruptable Power Supply' or ans == 'd.Uninterruptable Power Supply':
print("Correct answer!!")
score(2,1)
else:
print("Wrong answer!!")
print("correct answer: d.Uninterruptable Power Supply")
score(2,0)
else:
display()
global flag
if flag ==0:
print("\nDo you want to go next level!")
print("\nEnter 'YES' to go next level or 'NO' to exit:")
opt=input("Enter your choice:").lower()
if opt == 'yes':
print('\nLEVEl 3!!')
print('\nQUIZ :1')
print("Who co-founded Hotmail in 1996 and then sold the company to Microsoft?")
print("A. Shawn Fanning \nB. Ada Byron Lovelace \nC. Sabeer Bhatia \nD. Ray Tomlinson")
ans=input("Enter your answer with space:"). lower()
if ans == 'c. Sabeer Bhatia' or ans == 'c' or ans == 'Sabeer Bhatia' or ans == 'c.Sabeer Bhatia ' :
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("correct answer: c.Sabeer Bhatia ")
score(3,0)
print('\nQUIZ :2')
print("'.TMP' extension refers usually to what kind of file?")
print("A.Compressed Archive file\nB. Image file \nC. Temporary file \nD. Audio file ")
ans=input("Enter your answer:"). lower()
if ans == 'c. Temporary file' or ans == 'c' or ans == 'Temporary file' or ans == 'c.Temporary file':
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("correct answer: c.Temporary file ")
score(3,0)
print('\nQUIZ :3')
print("In the UK, what type of installation requires a fireman's switch?")
print("A. Neon Lighting \nB. High Pressure Sodium Lighting \nC. Water Features \nD. Hotel Rooms")
ans=input("Enter your answer with space:"). lower()
if ans == 'a. Neon Lighting' or ans == 'a' or ans == 'Neon Lighting' or ans == 'a.Neon Lighting':
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("correct answer: a.Neon Lighting ")
score(3,0)
print('\nQUIZ :4')
print("Who created Pretty Good Privacy (PGP)?")
print("A. Phil Zimmermann\nB. Tim Berners-Lee \nC. Marc Andreessen \nD. Ken Thompson ")
ans=input("Enter your answer with space:"). lower()
if ans == 'a. Phil Zimmermann' or ans == 'a' or ans == 'Phil Zimmermann' or ans == 'a.Phil Zimmermann':
print("Correct answer!!")
score(3,1)
else:
print("Wrong answer!!")
print("correct answer: a.Phil Zimmermann ")
score(3,0)
print('\nQUIZ :5')
print("What do we call a network whose elements may be separated by some distance? It usually involves two or more small networks and dedicated high-speed telephone lines.")
print("A. URL (Universal Resource Locator) \nB. LAN (Local Area Network) \nC. WAN (Wide Area Network) \nD. World Wide Web")
ans=input("Enter your answer with space:"). lower()
if ans == 'a.URL (Universal Resource Locator)' or ans == 'a' or ans == 'URL (Universal Resource Locator)' or ans == 'a. URL (Universal Resource Locator)':
print("Correct answer!!")
score(3,1)
print("\nCongratulations !! You completed all the questions!!")
display()
else:
print("Wrong answer!!")
print("correct answer: a.Universal Resource Locator ")
score(3,0)
print("\nCongratulations !! You completed all the questions!!")
display()
else:
print("\nCongratulations !! You completed all the questions!!")
display()
print("Welcome to QUIZ MASTER ! ")
print("press 1 for GENERAL questions\npress 2 for SPORTS questions\npress 3 for TECHNOLOGY questions")
print("Let's start the Quiz!!")
choice=int(input("Enter your choice"))
if(choice==1):
general()
elif choice==2:
sports()
elif choice==3:
technology()
else:
print("INVALID INPUT ! PLEASE ENTER THE CORRECT INPUT...")
| true |
1256ed0af9c0b289df8ad22a232c5d3f0c1e64a1 | Python | Pshypher/recursive-programming | /chapter_01/exercise_01_06.py | UTF-8 | 309 | 3.9375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 10 07:09:39 2019
@author: Pshypher
"""
# Decomposition: f(n) => n, f(n-1)
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n - 1)
# Test
print(factorial(0))
print(factorial(1))
print(factorial(5))
print(factorial(9)) | true |
83d3611e29a1fac2e1c2b1dcb66d2e315ab37fb9 | Python | 5Bears/Wayfarer-Backend | /NearbyAirports.py | UTF-8 | 636 | 2.828125 | 3 | [] | no_license | from APIFunctions import build_url, get_result
from Location import get_latitude, get_longitude
base_url = 'https://api.sandbox.amadeus.com/v1.2/airports/nearest-relevant?'
latitude = 37.7749 #Replace with function call get_latitude
longitude = -122.4194 #Replace with function call get_longitude
query_parameters = [ ('apikey', 't7q7kkgRGApdJFDr40W2pGVVAdk7mt7V'),
('latitude', latitude),
('longitude', longitude)]
airports = get_result(build_url(base_url, query_parameters))
def nearby_airports(airports):
list_of_airports = []
for i in airports:
if i["distance"] < 80:
list_of_airports.append(i["airport"])
return list_of_airports | true |
05ad74b9a9d7b92bd1956e87c234eeacd66a05e4 | Python | xuexi321er/NewStock | /test/t_numpy.py | UTF-8 | 222 | 2.625 | 3 | [] | no_license | # -*- coding:utf-8-*-
import numpy as np
def t_create_numpy():
data = np.array([[1,2,3],[4,5,6]], dtype='int')
#data = np.empty([1,7], dtype='str')
print(data)
if __name__ == '__main__':
t_create_numpy() | true |
608e066e67e54c0c92f10d0afd629aa896920c1e | Python | juancassioo/python-sistemas | /Atividades_Faculdade/Questao02.py | UTF-8 | 105 | 3.0625 | 3 | [] | no_license | numero1 = 3
if ((numero1 +1) %2) == 0:
print("numero1 par")
else:
print("numero1 ímpar") | true |
bf179a7b620122a349e3af9201280c56a41fbfd6 | Python | KZH001/huihui | /KPython/wenjian/open1.py | UTF-8 | 350 | 2.53125 | 3 | [] | no_license | # coding=utf-8
import sys
with open('file1','r+') as f:
print >>f,"光芒万丈,魅力演说"
# 标准输入 在所传输文件显示内容
# print >>sys.stdin,"光芒万丈,魅力演说"
# 标准输出在终端输出内容
# print >>sys.stdout,"光芒万丈,魅力演说"
#标准出错
# print >>sys.stderr,"光芒万丈,魅力演说"
| true |
8e2cab858cbce32fa34e1d74d2a378505efc55b0 | Python | tucpy/basic_python | /Chap 5/Bai_5.5.py | UTF-8 | 924 | 3.453125 | 3 | [] | no_license | n = int(input('Nhập n: '))
A = 0
chuoi_A = ''
B = 0
chuoi_B = ''
C = 1
chuoi_C = ''
D = 1
chuoi_D = ''
E = 0
chuoi_E = ''
i = 1
while i<=n:
# Tính A
if i%2!=0:
A = A + i
chuoi_A = chuoi_A + str(i) + ' + '
# Tính B
if i%2==0:
B = B + i
chuoi_B = chuoi_B + str(i) + ' + '
# Tính C
C = C * i
chuoi_C = chuoi_C + str(i) + ' * '
# Tính D
if i%3==0:
D = D * i
chuoi_D = chuoi_D + str(i) + ' * '
# Tính E
count = 0
for j in range(1, i+1):
if i%j == 0:
count = count + 1
if count == 2:
E = E + i
chuoi_E = chuoi_E + str(i) + ' + '
i = i + 1
# In kết quả
print('A =', chuoi_A, '=', A)
print('B =', chuoi_B, '=', B)
print('C =', chuoi_C, '=', C)
print('D =', chuoi_D, '=', D)
print('E =', chuoi_E, '=', E) | true |
712f02aff7c718b6a708b868e60213341878bdd3 | Python | kakuLeaner/HiloGame | /HiLo.Game1.py | UTF-8 | 1,232 | 3.921875 | 4 | [] | no_license |
# Kakungulu, P
# HiLoGame
# introduce game
print('You will give the program the max number then you will guess the number')
print('*'*50 + '\n')
print('Give a maximum number then guess my number')
print('*'*50 + '\n')
import random
choiceMade = "y"
while choiceMade.lower() == 'y':
# Ask user for Max number
maxNum = int(input('What should the maximum number for this game be? '))
print('\n')
# computer guess
randomNum = random.randint(1, maxNum)
guessNum = int(input('Guess my number: '))
# user guesses wrong
while guessNum != randomNum:
if guessNum < randomNum:
print('Your guess is too low.')
print('\n')
if guessNum > randomNum:
print('You guess is too high.')
print('\n')
guessNum = int(input('Guess my number: '))
#if user guesses right
if guessNum == randomNum:
print('You guessed my number!')
print('\n')
# ask user to play again.
choiceMade = input('Do you wish to play again?'' ''(Y/N): ')
print('\n')
#user ends playing
choiceMade.lower() == 'n'
print('Thank you for playing!')
print(input('\n\nHit Enter to Close\n'))
| true |
92abf485ce49219ec6a3e9af9a624412a3597418 | Python | Aasthaengg/IBMdataset | /Python_codes/p04030/s576914749.py | UTF-8 | 139 | 2.984375 | 3 | [] | no_license | s = input()
ans = []
for i in s:
if i=='B':
if len(ans)==0:
continue
ans.pop()
else:
ans.append(i)
new = ''.join(ans)
print(new) | true |
609c8864556faada8a05670c43468a4cae8c38ae | Python | ruipgil/fullstack-coding-challenge | /test_shinarnews.py | UTF-8 | 864 | 2.609375 | 3 | [] | no_license | import os
import unittest
import tempfile
import shinarnews.db as db
from shinarnews.Story import Story
from shinarnews.shinarnews import get_top_stories, retrieve_stories, STORIES_PER_PAGE
class TestShinarnews(unittest.TestCase):
def test_get_top_stories(self):
get_top_stories()
top_stories = db.previous_top_stories()
self.assertEqual(len(top_stories), STORIES_PER_PAGE)
for story in top_stories:
self.assertTrue(isinstance(story, int))
def test_retrieve_stories(self):
top_stories = db.previous_top_stories()
stories = retrieve_stories(top_stories)
self.assertTrue(isinstance(stories, list))
self.assertEqual(len(stories), len(top_stories))
for story in stories:
self.assertTrue(isinstance(story, Story))
if __name__ == '__main__':
unittest.main()
| true |
412f4657194ce1eb20125cf910649c802dd918e9 | Python | medabkasm/autoserre | /python/multi_client/drive_deployement.py | UTF-8 | 2,061 | 2.8125 | 3 | [] | no_license | from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from datetime import datetime
import random
import time
from colors import *
i = 0
data = ""
class Deployement: # class responsible for google drive api
def __init__(self):
self.data = ''
self.drive = None
self.dataFile = None
def drive_auth(self): # authentication with settings.yaml file
try:
gauth = GoogleAuth()
# Create local webserver and auto handles authentication.
gauth.LocalWebserverAuth()
self.drive = GoogleDrive(gauth)
return self.drive
print(CGREEN+"Authentication with api is done successfully"+CEND)
except Exception as err:
print(CRED+"Error :: api authentication failed :: {}".format(str(err))+CEND)
return -1
def set_data(self,filePath,title=''): # create title under this format : eg: HUM%_date.txt or TEMP_date.csv
if filePath:
self.filePath = filePath
else:
print(CRED+"Error :: invalid file path {}".format(filePath)+CEND)
return -1
try:
self.dataFile = self.drive.CreateFile()
except Exception as err:
print(CRED+"Error :: cannot create file :: {}".format(str(err))+CEND)
return -1
if title:
self.dataFile['title'] = title
try:
self.dataFile.SetContentFile(self.filePath)
print(CGREEN+"data file setted successfully for drive uploading"+CEND)
return 1
except Exception as err:
print(CRED+"Error :: cannot set data properly :: {}".format(str(err))+CEND)
return -1
def upload_file(self):
try:
self.dataFile.Upload()
print(CGREEN+"File {} uploaded successfully".format(self.filePath)+CEND)
return 1
except Exception as err:
print(CGRED+"Error :: file {} cannot be uploaded :: {}".format(self.filePath,str(err))+CEND)
return -1
| true |
6d3828988c60ed1e83099e5d320bade3c5ec9ff6 | Python | bnpy/bnpy | /bnpy/datasets/zzz_unsupported/HashtagK9.py | UTF-8 | 3,646 | 3.0625 | 3 | [
"BSD-3-Clause"
] | permissive | '''
HashtagK9.py
Simple toy dataset of 9 Gaussian components with diagonal covariance structure.
Generated data form a "hashtag"-like shapes when plotted in 2D.
'''
import scipy.linalg
import numpy as np
from bnpy.data import XData
# User-facing fcns
###########################################################
def get_data(seed=8675309, nObsTotal=25000, **kwargs):
'''
Args
-------
seed : integer seed for random number generator,
used for actually *generating* the data
nObsTotal : total number of observations for the dataset.
Returns
-------
Data : bnpy XData object, with nObsTotal observations
'''
X, TrueZ = generate_data(seed, nObsTotal)
Data = XData(X=X, TrueZ=TrueZ)
Data.name = get_short_name()
Data.summary = get_data_info()
return Data
def get_data_info():
return 'Hashtag Toy Data. Ktrue=%d. D=%d.' % (K, D)
def get_short_name():
return 'HashtagK9'
# Create weights w
###########################################################
K = 9
D = 2
wExtra = 0.05
wH = 3. / 5 * (1.0 - wExtra)
wV = 2. / 5 * (1.0 - wExtra)
w = np.asarray([wH /
4, wH /
4, wH /
4, wH /
4, wV /
4, wV /
4, wV /
4, wV /
4, wExtra])
assert np.allclose(np.sum(w), 1.0)
# Create means Mu
###########################################################
Mu = np.asarray(
[[-4, -1],
[-4, +1],
[4, -1],
[4, +1],
[-5, 0],
[-3, 0],
[3, 0],
[5, 0],
[0, 0],
], dtype=np.float64)
# Shift left-side down, right-side up
# to break symmetry
Mu[Mu[:, 0] > 0, 1] += 0.5
Mu[Mu[:, 0] < 0, 1] -= 0.5
# Create covars Sigma
###########################################################
Sigma = np.zeros((K, D, D))
cholSigma = np.zeros((K, D, D))
Vmajor = 2.0
Vminor = Vmajor / 100
SigmaHoriz = np.asarray(
[[Vmajor, 0],
[0, Vminor]
])
SigmaVert = np.asarray(
[[Vminor, 0],
[0, Vmajor]
])
SigmaExtra = np.asarray(
[[25 * Vmajor, 0],
[0, Vmajor]
])
for k in range(K):
if k < 4:
Sigma[k] = SigmaHoriz
elif k < 8:
Sigma[k] = SigmaVert
else:
Sigma[k] = SigmaExtra
cholSigma[k] = scipy.linalg.cholesky(Sigma[k], lower=True)
# Generate Raw Data
###########################################################
def generate_data(seed, nObsTotal):
PRNG = np.random.RandomState(seed)
trueList = list()
Npercomp = PRNG.multinomial(nObsTotal, w)
X = list()
for k in range(K):
X.append(sample_data_from_comp(k, Npercomp[k], PRNG))
trueList.append(k * np.ones(Npercomp[k]))
X = np.vstack(X)
TrueZ = np.hstack(trueList)
# Shuffle the ordering of observations,
# so we don't have all examples from comp1 followed by all examples from
# comp2
permIDs = PRNG.permutation(X.shape[0])
X = X[permIDs]
TrueZ = TrueZ[permIDs]
return X, TrueZ
def sample_data_from_comp(k, Nk, PRNG):
return Mu[k, :] + np.dot(cholSigma[k].T, PRNG.randn(D, Nk)).T
# Visualize clusters
###########################################################
def plot_true_clusters():
from bnpy.viz import GaussViz
for k in range(K):
c = k % len(GaussViz.Colors)
GaussViz.plotGauss2DContour(Mu[k], Sigma[k], color=GaussViz.Colors[c])
# Main
if __name__ == "__main__":
from matplotlib import pylab
pylab.figure()
X, TrueZ = generate_data(42, 10000)
pylab.plot(X[:, 0], X[:, 1], 'k.')
plot_true_clusters()
pylab.axis('image')
pylab.show(block=True)
| true |
6ab5ef4b3ae02f1ba582820887b8fdc5bcb3589f | Python | ToninALV/Curso-em-video-Python | /ex040.py | UTF-8 | 582 | 3.75 | 4 | [] | no_license | print('Digite as notas de sua prova Bimestral e sua Avaliação Somativa.')
somativa = float(input('Avaliação Somativa: '))
bimestral = float(input('Prova Bimestral: '))
media = (somativa + bimestral)/2
if media >= 7:
print('\033[1;32mAPROVADO\033[1;32m')
print('Sua média foi de {:.1f}'.format(media))
elif media < 5:
print('\033[1;31mREPROVADO\033[1;31m')
print('Sua média foi de {:.1f}'.format(media))
elif media >= 5 and media < 7:
print('\033[1;36mRECUPERAÇÃO\033[1;36m')
print('Sua média foi de {:.1f}'.format(media))
| true |
2685f79e84b22d902c6c46222e6b06d6b8077767 | Python | AFFL-AI/data_operations | /Data-preprocessing/File_extension_conversion/train_test_split_for_img_classification/split_train_valid.py | UTF-8 | 1,794 | 2.796875 | 3 | [] | no_license | # Import modules
import os
import random
import shutil
#from shutil import movefile
# Set up empty folder structure if not exists
if not os.path.exists('data'):
os.makedirs('data')
else:
if not os.path.exists('data/train'):
os.makedirs('data/train')
if not os.path.exists('data/validation'):
os.makedirs('data/validation')
# Get the subdirectories in the main image folder
img_source_dir = './images'
subdirs = [subdir for subdir in os.listdir(img_source_dir) if os.path.isdir(os.path.join(img_source_dir, subdir))]
train_size = 0.90 # Define the percentage of images in the training folder
for subdir in subdirs:
subdir_fullpath = os.path.join(img_source_dir, subdir)
train_subdir = os.path.join('data/train', subdir)
validation_subdir = os.path.join('data/validation', subdir)
# Create subdirectories in train and validation folders
if not os.path.exists(train_subdir):
os.makedirs(train_subdir)
if not os.path.exists(validation_subdir):
os.makedirs(validation_subdir)
train_counter = 0
validation_counter = 0
# Randomly assign an image to train or validation folder
for filename in os.listdir(subdir_fullpath):
if filename.endswith(".jpg") or filename.endswith(".png"):
fileparts = filename.split('.')
if random.uniform(0, 1) <= train_size:
shutil.move(os.path.join(subdir_fullpath, filename), os.path.join(train_subdir, str(train_counter) + '.' + fileparts[1]))
train_counter += 1
else:
shutil.move(os.path.join(subdir_fullpath, filename), os.path.join(validation_subdir, str(validation_counter) + '.' + fileparts[1]))
validation_counter += 1 | true |
0f3af3603d9ad8c08e2c966ea8d07e66e22408b7 | Python | SIGISLV/HedgeTools | /HedgeTools/Script/ACaract_Obj/CNature_fonc/RoleHydro_v3.py | UTF-8 | 6,926 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: RoleHydro
# Purpose: A travers une bibliothèque de foctions cherche les candidats de l'entite de proximité
# qui suivent l'entité de référence.
# Attention il se base sur le FID ou OBJECTID.
#
# Author: Villierme
#
# Created: 15/07/2013
# Copyright: (c) Villierme 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
import arcpy, math, os
from arcpy import env
# bibliothèque de fonctions :
# *********************************************************************
def ChercheChampID(InFeature):
""" Focntion qui cherche un champ qui contient d'abord un champ nommé FID ou OBJECTID
sinon il cherche un champ avec ID à l'intérieur. """
listField=arcpy.ListFields(InFeature)
# on parcours la liste des champs
for field in listField:
if field.name=="FID": champId=field.name
elif field.name=="OBJECTID":champId=field.name
# si on ne trouve toujours pas le champId
if not champId:
for field in listField:
if "ID" in field.name : champId=field.name
return champId
def FeatureToDictionaire(InFeature, InIdField):
""" premet de convertir une classe d'entité vers un dictionnaire """
scur, dFeature = arcpy.SearchCursor(InFeature), {}
for row in scur:
g=arcpy.Geometry()
g=row.Shape
dFeature[row.getValue(InIdField)]={"geometrie": g,"angle":CalculerAngle(g.firstPoint,g.lastPoint)}
del row, scur
# on retourne le dictionnaire
return dFeature
def Proximite(InFeature,ProxiFeature, inametable):
""" Focntion qui génère la table de proximité """
# GenerateNearTable_analysis (in_features, near_features, out_table, {search_radius}, {location}, {angle}, {closest}, {closest_count})
arcpy.GenerateNearTable_analysis(InFeature, ProxiFeature, inametable, 15,"","","ALL",3 )
# on parcours la table :
scur, dProxi = arcpy.SearchCursor(inametable), {}
# on intègre les données de table dans un ditionnaire :
for row in scur:
InFid=row.getValue("IN_FID")
NearFid=row.getValue("NEAR_FID")
NearDist=row.getValue("NEAR_DIST")
IdNearTable=row.getValue("OBJECTID")
dProxi[IdNearTable]={"IN_FID":InFid, "NEAR_FID":NearFid, "NEAR_DIST":NearDist}
del row, scur
# on retourne la valeur :
return dProxi
def ChercherSuiveur(InFeatureRef, InFeatureFollow, InDictTableProxi, inAngle):
""" on cherche l'entité qui suivent les austres entités. """
# on récupère le champ id de l'entité d'entrée et on créer un dictionnaire.
inRefId=ChercheChampID(InFeatureRef)
dFeatRef=FeatureToDictionaire(InFeatureRef, inRefId)
# on récupère le id de l'entité qu'il faut suivre et on créer un dictionnaire
inFollowId=ChercheChampID(InFeatureFollow)
dFeatFollow=FeatureToDictionaire(InFeatureFollow,inFollowId)
# on récupère le dictionnaire de la table de proximité :
dProche=InDictTableProxi
# on parcours la table de proximité
lfeatRefFollow=[]
for proche in dProche:
# on récupère les identifiants à comparer :
IdRef=dProche[proche]["IN_FID"]
IdProche=dProche[proche]["NEAR_FID"]
RefAngle=dFeatRef[IdRef]["angle"]
FollowAngle=dFeatFollow[IdProche]["angle"]
# on compare les entités entre elles :
if RefAngle-inAngle<FollowAngle<RefAngle+inAngle:
lfeatRefFollow.append(IdRef)
# On retourne la liste des entités qui sont suiveur.
return lfeatRefFollow
def UpdateCopyRef(inFeature,inliste,inFeatureOut):
""" Une fonction qui permet de mettre à jour la table spécifiée """
# on fait une copie de la table d'entrée de Référence :
arcpy.Copy_management(inFeature,inFeatureOut)
# on ajoute un champ :
arcpy.AddField_management(inFeatureOut,"RHydro", "TEXT")
idChamp=ChercheChampID(inFeatureOut)
# met à jour la table entrée avec la liste en entrée :
ucur=arcpy.UpdateCursor(inFeatureOut)
for row in ucur:
if row.getValue(idChamp) in inliste:
row.setValue("RHydro","oui")
ucur.updateRow(row)
else:
row.setValue("RHydro","non")
ucur.updateRow(row)
return inFeatureOut
def CalculerAngle(InfirstPoint, InlastPoint):
import math
angle = math.atan2(InlastPoint.X-InfirstPoint.X, InlastPoint.Y-InfirstPoint.Y)*(180/math.pi)
# le calcule de l'arctangante fournit un angle compris entre 180 et -180 °
# donc il faut ajouter 360 ou 180 pour que l'angle soit dans les valeur positive pour comparer
# ici on ajoute 180 pour avoir l'angle entre 0 et 180.
if angle<0:
angle=angle+180
# on retourne la valeur
return angle
def HydroRole(polyhaid,Hydro,axehaie,output, geodata):
# Paramètre d'environnement :
arcpy.env.workspace = geodata
arcpy.env.overwriteOutput = True
# on construit des couches :
arcpy.MakeFeatureLayer_management(polyhaid,"LayPoly")
arcpy.MakeFeatureLayer_management(Hydro, "LayHydro")
arcpy.MakeFeatureLayer_management(axehaie, "LayAxehaie")
# on cherche l'intersection entre l'emprise de la haie et le réseau hydrologique.
arcpy.SelectLayerByLocation_management("LayPoly","INTERSECT","LayHydro")
# on sélectionne les haies (axe médian) et l'emprise (pour travail avec des lignes.
arcpy.SelectLayerByLocation_management("LayAxehaie", "INTERSECT", "LayPoly")
# On fractionne le réseau hydrologique pour obtenir l'orientation.
arcpy.SplitLine_management(Hydro, "splitHydro")
# on fait appel à la fonction qui transforme en dictionnaire une table de proximité.
# on récupère un dictionnaire.
dProxi=Proximite("LayAxehaie", "splitHydro", "dProxi")
# la fonction ChercherSuiveur retourne une liste des identifiants qui suivent un troçon hydrologique.
lFollow=ChercherSuiveur("LayAxehaie","splitHydro",dProxi,45)
# La fonction fait une copie de l'axe médian ajoute une colonne "Rhydro" et à partir de la liste des ID
# donne une valeur oui ou non selon que l'identifiant fait partie de la liste ou pas.
UpdateCopyRef(axehaie,lFollow,output)
# retourner le résultat:
return output
if __name__ == "__main__":
# ***************************************************************************************************
# les entrées sont récupérées ici :
polyhaid = arcpy.GetParameterAsText(0)
Hydro = arcpy.GetParameterAsText(1)
axehaie = arcpy.GetParameterAsText(2)
output = arcpy.GetParameterAsText(3)
# on définit l'environnement de travail :
workspace, nom = os.path.split(output)
# on lance la fonction :
HydroRole(polyhaid,Hydro, axehaie, output, workspace)
| true |
a0008a0628dbdb132d8baca655000669eb8d3413 | Python | kimxoals/Algorithm_Practice | /10/namu_1006.py | UTF-8 | 123 | 3.453125 | 3 | [] | no_license | def seq(n):
if n >= 2:
return seq(n//2) + seq(n-1)
else:
return 1
n = int(input())
print(seq(n))
| true |
a702f672d6c7af95296a8c4dfa713dfc92b05a14 | Python | chrismomdjian/Simple-Scraper | /scrape.py | UTF-8 | 413 | 3.8125 | 4 | [] | no_license | # Practicing using Beautiful Soup!
import bs4 as bs
import urllib.request
specified_url = input("Enter a url to check for links: ")
sauce = urllib.request.urlopen(specified_url).read()
soup = bs.BeautifulSoup(sauce, "lxml")
count = 0
list_item = 1
for url in soup.find_all("a"):
print(list_item, ": ", url.get("href"), sep="")
count += 1
list_item += 1
print("Total links: ", count, sep="")
| true |
c2c4c6548e94d366baf3d1b79d13028ae7fc22f0 | Python | kangnamQ/I_Do | /code/Montyhall4.py | UTF-8 | 885 | 3.46875 | 3 | [] | no_license | from random import randint
def monty_hall():
car = randint(0, 2)
first_choice = randint(0, 2)
opened_door = car
while opened_door == car or opened_door == first_choice:
opened_door = randint(0, 2)
second_choice = first_choice
while second_choice == first_choice or second_choice == opened_door:
second_choice = randint(0, 2)
return car == first_choice, car == second_choice
hits_of_first = hits_of_second = 0
total = 10000
for _ in range(total):
hit_of_first, hit_of_second = monty_hall()
if hit_of_first:
hits_of_first += 1
elif hit_of_second:
hits_of_second += 1
print(f"first: {hits_of_first}/{total} = {hits_of_first / total}")
print(f"second: {hits_of_second}/{total} = {hits_of_second / total}")
#출처: https: // comdoc.tistory.com / entry / Monty - Hall - problem[ComDoc]
| true |
e87357437dde38d3e7d198a0987b1da27024a1f9 | Python | weiHelloWorld/IE598_project | /plot_score_vs_step.py | UTF-8 | 1,034 | 2.828125 | 3 | [] | no_license | import argparse
import matplotlib.pyplot as plt
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str)
parser.add_argument("--fig_name", type=str, default='plot.png')
parser.add_argument("--data_file", type=str, default = None)
args = parser.parse_args()
running_reward = []
num_of_steps = []
with open(args.file, 'r') as in_f:
for line in in_f.readlines():
if 'running_reward' in line:
num_of_steps.append(int(line.split('step #')[1].split(',')[0]))
running_reward.append(float(line.split('running_reward = ')[1].split(',')[0]))
assert (len(running_reward) == len(num_of_steps))
fig, ax = plt.subplots()
ax.scatter(num_of_steps, running_reward)
if args.data_file is None:
data_file = args.file.replace('.out', '.txt')
else:
data_file = args.data_file
np.savetxt(data_file, np.vstack([num_of_steps, running_reward]))
ax.set_xlabel('number of steps')
ax.set_ylabel('running average of score')
fig.savefig(args.fig_name)
| true |
8d8a8d67883cc32fda14631634fc7bd5700314da | Python | sujata-c/Evaluation-Criteria-Assignment-2 | /tests/test_Employee.py | UTF-8 | 687 | 2.953125 | 3 | [] | no_license | import pytest
from datetime import datetime
from unittest.mock import Mock
from modules.Employee import Employee
mock = Mock()
employee = Employee()
@pytest.fixture
def records():
id = 101
name = "Rahul"
lastname = "Kumar"
join_date = datetime(2020, 9, 8)
experience_years = 5
records = [id, name, lastname, join_date, experience_years]
return records
def test_insert_employee(records):
assert employee.insert_table_values(records[0], records[1], records[2], records[3], records[4]) == records[1]
def test_update_employee():
assert employee.update_record(101, 5) == 1
def test_delete_employee():
assert employee.delete_record(101) == 1
| true |
da5850ac756600299d28b19c4b9458a780073eb5 | Python | deepaksaini0908/project | /djangoproject/calc/views.py | UTF-8 | 840 | 2.53125 | 3 | [] | no_license | from django.shortcuts import render
from django.http import HttpResponse
def home(request):
return render(request,'home.html',{'name':'Deepak Saini'})
def add(request):
a=int(request.POST['num1'])
b=int(request.POST['num2'])
choice=int(request.POST['num3'])
# s=int(val1)+int(val2)
if choice==1:
result=a+b
re1="addition"
elif(choice==2):
result=a-b
re1='Subtraction'
elif(choice==3):
result=a/b
re1='Divide'
elif(choice==4):
result=a*b
re1='Multiply'
elif(choice==5):
result=a**b
re1='Power'
else:
result='Invalid choice'
return render(request,'result.html',{'result':round(result,2),'choice':re1})
def image(request):
k=23
return render(request,'image.html',{'result':k})
| true |
2cf216cac5697e60f27e4f24bbdf5932f1970cd9 | Python | seongjin571/BigData_source | /python_source/facebook/numerisID요청.py | UTF-8 | 667 | 2.703125 | 3 | [] | no_license | import sys
import urllib.request
import json
if __name__ == '__main__':
page_name="sejongbamboo"
app_id="174720823286694"
app_secret="88c7a1323cbe3ebb2d470c24c0d75cab"
access_token=app_id+"|"+app_secret
base="http://graph.facebook.com/v2.11"
node="/"+page_name
parameters="/?access_token=%s"%access_token
url=base+node+parameters
req=urllib.request.Request(url)
try:
response=urllib.request.urlopen(req)
if response.getcode()==200:
data=json.loads(response.read().decode('utf-8'))
page_id=data['id']
print("%s Facebook Numeric ID : %s"%(page_name,page_id))
except Exception as e:
print(e)
| true |
63f9582bee6b77673ec3c8c02100c52d8280b5d8 | Python | tylercrosse/DATASCI400 | /lab/06/L06-A-2-JSONObjectinPython.py | UTF-8 | 684 | 3.78125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
# UW Data Science
# Please run code snippets one at a time to understand what is happening.
# Snippet blocks are sectioned off with a line of ####################
"""
"""
Example script for JSON assessor methods
"""
import json
# create JSON object
json_data = '{"name":"Steven", "city":"Seattle"}'
#notice the new variable in the explorer.
####################
# convert JSON object to python dictionary with json.loads()
python_obj = json.loads(json_data)
# notice the new variable in the explorer
####################
# print dictionary values by keys
print(python_obj["name"])
print(python_obj["city"])
################## | true |
636674239a22c3e38d924e98b950ee849eb6034e | Python | bbig3831/growth-curves | /final_scripts/plotters.py | UTF-8 | 4,355 | 3.15625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
def makeCountrySubplots(artDf, countries, model_dict, output_path, show_plot=False):
"""
Function to make Figure 2, a 3x2 subplot of country-level regressions
:param artDf: Pandas dataframe with ART coverage data
:param countries: List of 6 countries to plot
:param model_dict: Dictionary of lmfit.Model.ModelResult objects
:param output_path: Output path for PNG of subplots
:param show_plot: Show plot if true (useful for debugging)
:return:
"""
sns.set()
f, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2, sharex='none', figsize=(13, 15))
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
sns.set_style('darkgrid')
t2 = np.asarray(range(0, 26))
for ax, country in zip(axes, countries):
# ART coverage data
df = artDf[artDf['Country Code'] == country]
# Get time values
t = [int(col)-2000 for col in df if col.startswith('20')]
tStr = [col for col in df if col.startswith('20')]
y = df[tStr].values.tolist()[0]
ax.plot(t, y, 'bs')
ax.plot(t2, model_dict[country]['gompertz'].eval(t=t2), 'r-')
ax.plot(t2, model_dict[country]['logistic'].eval(t=t2), 'g--')
ax.set_xlim([0, 20])
start, end = ax.get_xlim()
ax.xaxis.set_ticks(np.arange(start, end + 1, 5))
ax.xaxis.set_ticklabels(['2000', '2005', '2010', '2015', '2020'], fontsize=12)
ax.set_xlabel(df['Country Name'].iloc[0], fontsize=13)
if ax in [ax1, ax3, ax5]:
ax.set_ylabel('% ART Coverage', fontsize=13)
plt.rc('ytick', labelsize=12)
ax4.legend(['Data', 'Gompertz', 'Logistic'], loc='center left', bbox_to_anchor=(1, 0.5), fontsize=13)
if output_path:
plt.savefig(output_path, bbox_inches='tight', pad_inches=0.5)
if show_plot:
plt.show()
# Clear figure
plt.clf()
# Add function for making caterpillar plots
def makeCaterpillarPlot(df, metric_dict, xlabel, output_path, sort_list=[True, True], show_plot=False):
"""
Make caterpillar plot of estimated parameter values.
:param df: Melted dataframe for single group of metrics
:param metric_dict: Dict of metric names/labels to use for melting/plotting
:param xlabel: Label for x-axis
:param output_path: Path to store output graph
:param sort_list: List of booleans for ascending sort in strip plot
:param show_plot: (Optional) Show output graph prior to storing
:return:
"""
# TODO: add error bars around estimates
# TODO: jitter values by category
meltedDf = pd.melt(df, id_vars='Country Name', value_vars=metric_dict.keys(), var_name='Metric', value_name='Value')
meltedDf.replace({'Metric':metric_dict}, inplace=True)
sns.set()
ax = sns.stripplot(x='Value', y='Country Name',
data=meltedDf.sort_values(by=['Metric', 'Value'], ascending=sort_list), hue='Metric', size=9)
fig = ax.get_figure()
ax.set_ylabel('')
ax.set_xlabel(xlabel, fontsize=16)
fig.set_size_inches((10, 12))
ax.tick_params(axis='y', labelsize=13)
ax.tick_params(axis='x', labelsize=16)
ax.legend(fontsize=14, loc=7, frameon=True, facecolor='white')
if show_plot:
plt.show()
if output_path:
fig.savefig(output_path, dpi=300, bbox_inches='tight')
# Clear figure
fig.clf()
def makeBICPlot(df, output_path, show_plot=False):
"""
Makes Figure 5, a sorted graph of the difference in BIC values between the Gompertz and logistic regressions
:param df: Dataframe with delta_BIC data
:param output_path: Path to store output graph
:param show_plot: (Optional) Show output graph prior to storing
:return:
"""
sns.set()
ax = sns.stripplot(x='delta_BIC', y='Country Name', data=df.sort_values(by='delta_BIC', ascending=False),
color='dodgerblue', size=9)
fig = ax.get_figure()
ax.set_ylabel('')
ax.set_xlabel('$\Delta$BIC', fontsize=16)
fig.set_size_inches((10, 12))
ax.tick_params(axis='y', labelsize=13)
ax.tick_params(axis='x', labelsize=16)
plt.axvline(x=0, ls='--')
if show_plot:
plt.show()
if output_path:
fig.savefig(output_path, dpi=300, bbox_inches='tight')
# Clear figure
fig.clf()
| true |
cb7157d7c1761269dbaeaea4cb60ea7cba5af8fd | Python | pereirfe/getSuSyStatistics | /overtime.py | UTF-8 | 892 | 2.96875 | 3 | [] | no_license | #!/usr/bin/python
import turmas
import time
import json
import sys
def main():
try:
interval = int(sys.argv[1])
#int(raw_input('Select time interval in seconds:\n'))
lab = sys.argv[2] #raw_input('Select laboratory:\n')
except IndexError:
print "Use: overtime TIMEINSECONDS LAB"
return 0
print 'Kill this program with Ctrl-D'
while True:
try:
print 'Coletando dados de', time.ctime()
filename = './data/turmas/' + lab + '/' + str(int(time.time())) + '.json'
with open(filename, 'w') as f:
js = turmas.getConsolidateJson(lab)
json.dump(js,f)
print ""
time.sleep(interval)
except:
print "Unable to get data.. waiting.."
time.sleep(interval/5)
pass
if __name__ == '__main__':
main()
| true |
19f467ee03bcc1127e14c48b4fe8de686bf7c2cd | Python | UCLAX1/Guardian-V2 | /src/laptop/backend/dbConnection.py | UTF-8 | 2,735 | 2.625 | 3 | [] | no_license | import datetime
import mysql.connector
def create_db():
create_db_query = (
"CREATE TABLE IF NOT EXISTS Guardian_Data "
"(ID INT NOT NULL AUTO_INCREMENT, "
"Link_X INT NOT NULL, Link_Y INT NOT NULL, "
"Laser_X INT NOT NULL, Laser_Y INT NOT NULL, "
"Distance_Offset DECIMAL(8,2) NOT NULL, Angle_Offset DECIMAL(8,2) NOT NULL, "
"Time VARCHAR(255) NOT NULL, "
"PRIMARY KEY (ID))"
)
my_db = mysql.connector.connect(
host="localhost",
user="x1",
passwd="asme",
database="x1_guardian"
)
my_cursor = my_db.cursor()
my_cursor.execute(create_db_query)
def insert_data(laser_coords, link_coords, link_pos):
laser_x, laser_y = laser_coords
link_x, link_y = link_coords
distance_offset, angle_offset = link_pos
timestamp = datetime.datetime.now()
data = (link_x, link_y, laser_x, laser_y, distance_offset, angle_offset, str(timestamp))
insert_query = (
"INSERT INTO Guardian_Data "
"(Link_X, Link_Y, "
"Laser_X, Laser_Y, "
"Distance_Offset, Angle_Offset, "
"Time) "
"VALUES(%s,%s,%s,%s,%s,%s,%s)"
)
my_db = mysql.connector.connect(
host="localhost",
user="x1",
passwd="asme",
database="x1_guardian"
)
my_cursor = my_db.cursor()
my_cursor.execute(insert_query, data)
my_db.commit()
def retrieve_specific_data():
retrieve_query = "SELECT * FROM Guardian_Data ORDER BY ID DESC LIMIT 5"
my_db = mysql.connector.connect(
host="localhost",
user="x1",
passwd="asme",
database="x1_guardian"
)
my_cursor = my_db.cursor()
my_cursor.execute(retrieve_query)
my_result = my_cursor.fetchall()
for i in range(len(my_result)):
my_result[i] = ",".join(map(str,my_result[i]))
my_result = "\n".join(my_result)
return my_result
def retrieve_all_data():
retrieve_query = "SELECT * FROM Guardian_Data"
my_db = mysql.connector.connect(
host="localhost",
user="x1",
passwd="asme",
database="x1_guardian"
)
my_cursor = my_db.cursor()
my_cursor.execute(retrieve_query)
my_result = my_cursor.fetchall()
for i in range(len(my_result)):
my_result[i] = ",".join(map(str,my_result[i]))
my_result = "\n".join(my_result)
return my_result
def truncate_table():
truncate_query = "TRUNCATE TABLE Guardian_Data"
my_db = mysql.connector.connect(
host="localhost",
user="x1",
passwd="asme",
database="x1_guardian"
)
my_cursor = my_db.cursor()
my_cursor.execute(truncate_query)
| true |
dfe7f2e13f1f626eb7f948360d994de26aee8102 | Python | LarsenRidder/steppy | /steppy/server/server.py | UTF-8 | 4,462 | 2.578125 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
StepPy
:copyright: (c) 2016-2017 by Yann Gravrand.
:license: BSD, see LICENSE for more details.
"""
import gevent
import redis
from flask import Flask, render_template
from flask_sockets import Sockets
from gevent.wsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
# Inspired from https://github.com/heroku-examples/python-websockets-chat
class ServerBackend(object):
"""Interface for registering and updating WebSocket clients."""
def __init__(self, redis, redis_chan):
self.clients = list()
self.pubsub = redis.pubsub()
self.pubsub.subscribe(redis_chan)
def __iter_data(self):
for message in self.pubsub.listen():
data = message.get('data')
if message['type'] == 'message':
yield data
def register(self, client):
"""Register a WebSocket connection for Redis updates."""
self.clients.append(client)
def send(self, client, data):
"""Send given data to the registered client.
Automatically discards invalid connections."""
try:
client.send(data)
except Exception:
self.clients.remove(client)
def run(self):
"""Listens for new messages in Redis, and sends them to clients."""
for data in self.__iter_data():
for client in self.clients:
gevent.spawn(self.send, client, data)
def start(self):
"""Maintains Redis subscription in the background."""
gevent.spawn(self.run)
class PushingConsole(object):
"""Console pushing messages to remote WebSocket clients via Redis"""
def __init__(self, redis, redis_chan, terse):
self.redis = redis
self.redis_chan = redis_chan
self.terse = terse
def start(self):
pass
def big_print(self, msg):
self.redis.publish(self.redis_chan, msg)
def print_(self, msg):
if not self.terse:
self.redis.publish(self.redis_chan, msg)
class Server(object):
configspec = {
'server': {
'host': 'string(default="0.0.0.0")',
'port': 'integer(default=8080)',
'redis_url': 'string(default="")',
'redis_chan': 'string(default="steppy")',
'terse': 'boolean(default=True)' # if True, show on the browser BIG messages only
}
}
def __init__(self, config):
self.redis = None
self.backend = None
if config['server'].get('redis_url'):
self.redis = redis.from_url(config['server']['redis_url'])
self.redis_chan = config['server']['redis_chan']
self.backend = ServerBackend(self.redis, self.redis_chan)
else:
print('No redis configured, disabling Websockets and remote web console')
self.flask_host = config['server']['host']
self.flask_port = config['server']['port']
self.flask_app = Flask(__name__)
self.flask_app.add_url_rule('/', 'index', self._index)
sockets = Sockets(self.flask_app)
# sockets.add_url_rule('/submit', 'submit', self._inbox)
sockets.add_url_rule('/status', 'status', self._status)
self.console = PushingConsole(self.redis, self.redis_chan, config['server']['terse']) if self.redis else None
def start(self):
if self.backend:
self.backend.start()
gevent.spawn(self.run)
def run(self):
print('Remote StepPy console available on http://%s:%s/' % (self.flask_host, self.flask_port))
http_server = WSGIServer((self.flask_host, self.flask_port), self.flask_app, handler_class=WebSocketHandler)
http_server.serve_forever()
def _index(self):
return render_template('index.html')
# Not used for now
def _inbox(self, ws):
"""Receives incoming messages, inserts them into Redis."""
while not ws.closed:
# Sleep to prevent *constant* context-switches.
gevent.sleep(0.1)
message = ws.receive()
if message:
self.redis.publish(self.redis_chan, message)
def _status(self, ws):
print('Client connected:', ws.origin)
self.backend.register(ws)
self.redis.publish(self.redis_chan, 'Connected')
while not ws.closed:
# Context switch while `ChatBackend.start` is running in the background.
gevent.sleep(0.1)
| true |
c690983314f1704f84834249c4423f2f07adf751 | Python | fuxi5788/Avazu_CTR | /LR-FeiXie/code/CTR-LR.py | UTF-8 | 5,433 | 2.59375 | 3 | [] | no_license | import pandas as pd
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
import time
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import GridSearchCV
#保留的特征:目前去掉了Hour, 因为下面对hour重新进行的了特征提取
feature_list = ['C1', 'banner_pos', 'site_id', 'site_domain',
'site_category', 'app_id', 'app_domain', 'app_category', 'device_id',
'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14',
'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21']
#去掉原来会导致merge值减少的feature : site_id, site_domain, app_id,
# device_id, device_ip, device_model,device_conn_type,c14
# feature_list = ['C1', 'banner_pos',
# 'site_category', 'app_domain', 'app_category',
# 'device_type', 'device_conn_type', 'C14',
# 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21']
### 将所有特征的后验概率加入到训练数据集中,并替换掉原来的特征
def insert_click_rate(train_file, rate_file_path, feature_list, output):
train_rate = pd.read_csv(train_file)
print('before merge shape:',train_rate.shape)
for column in feature_list:
rate = pd.read_csv(rate_file_path+'clickVS'+column+'.csv', usecols=[column,'avg(click)'])
train_rate = pd.merge(train_rate,rate,how='left')
print('after {} merge ,shape:{}'.format(column,train_rate.shape))
train_rate.rename(columns={'avg(click)':column+'_rate'}, inplace = True)
train_rate.drop([column],inplace = True, axis=1)
train_rate.to_csv(output,index=False)
print('inset rate result shape:', train_rate.shape)
return train_rate
def ss_feature(df):
ss_X = StandardScaler()
ss_df = ss_X.fit_transform(df)
return ss_df
def mm_feature(df):
mm_X = MinMaxScaler()
mm_df = mm_X.fit_transform(df)
return mm_df
def fit_LR_model(X_train, y_train):
Cs = [0.01]
# 大量样本(6W+)、高维度(93),L2正则 --> 缺省用lbfgs
# LogisticRegressionCV比GridSearchCV快
lr = LogisticRegressionCV(Cs= Cs, cv = 3, scoring='neg_log_loss',
penalty='l1',solver='liblinear',n_jobs=-1)
lr.fit(X_train, y_train)
print("LR scores:",lr.scores_)
def find_LR_params(x_train, y_train):
param_LR= {'C':[0.1,1,2]}
gsearch_LR = GridSearchCV(estimator = LogisticRegression(penalty='l1',solver='liblinear'), param_grid=param_LR,cv=3)
print("Performing grid search...")
t0 = time()
gsearch_LR.fit(x_train,y_train)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % gsearch_LR.best_score_)
print("Best params: ")
print(gsearch_LR.best_params_)
#return gsearch_LR.grid_scores_, gsearch_LR.best_params_, gsearch_LR.best_score_
from sklearn.externals import joblib
1
模型保存
>>> os.chdir("workspace/model_save")
>>> from sklearn import svm
>>> X = [[0, 0], [1, 1]]
>>> y = [0, 1]
>>> clf = svm.SVC()
>>> clf.fit(X, y)
>>> clf.fit(train_X,train_y)
>>> joblib.dump(clf, "train_model.m")
# train_file = 'data/train_1_time_encode.csv'
#
# path = 'data/train_info/'
# train_raw = insert_click_rate(train_file = train_file,
# rate_file_path = path, feature_list = feature_list, output = 'data/train_1_rate.csv')
# print('train raw shape:',train_raw.shape)
# train_rate_file = pd.read_csv('data/train_1_rate.csv')
# y_train = train_rate_file['click']
# print('y train shape:',y_train.shape)
# X_train = train_rate_file.drop(['click','id'], axis=1)
# print(X_train.head())
# # X_train = ss_feature(X_train)
# X_train = mm_feature(X_train)
# print('train shape:',X_train.shape)
# fit_LR_model(X_train = X_train, y_train = y_train)
## 对50数据集进行验证
# train_file = 'data/train_50w_time_encode.csv'
# path = 'data/train_info/'
# train_raw = insert_click_rate(train_file = train_file,
# rate_file_path = path, feature_list = feature_list, output = 'data/train_50w_rate.csv')
# print('train raw shape:',train_raw.shape)
# train_rate_file = pd.read_csv('data/train_50w_rate.csv')
# y_train = train_rate_file['click']
# print('y train shape:',y_train.shape)
# X_train = train_rate_file.drop(['click','id'], axis=1)
# print(X_train.head())
# print('train shape:',X_train.shape)
# test = pd.read_csv('data/test_rate.csv')
# test.drop(['id'], axis=1, inplace=True)
# predict_y = predict(X_train = X_train, y_train = y_train, test = test)
#生成Test文件的编码文件
# train_file = 'data/test_time_encode.csv'
# path = 'data/train_info/'
# insert_click_rate(train_file = train_file,rate_file_path = path, feature_list = feature_list, output = 'data/test_rate.csv')
#生成Test文件的编码文件
# train_file = 'data/test_time_encode.csv'
# path = 'data/train_info/'
# insert_click_rate(train_file = train_file,rate_file_path = path,
# feature_list = feature_list, output = 'data/test_rate.csv')
# X_train = pd.read_csv('/Users/feixi/Documents/Study/CSDN/Projects/CTR/data/train_1_poly.csv')
y_train = pd.read_csv('/Users/feixi/Documents/Study/CSDN/Projects/CTR/data/train_1_rate.csv',usecols=['click'])
# y_train.reshape(-1)
print(y_train.values.reshape(-1))
# print('start predict cv, time:')
# fit_LR_model(X_train=X_train,y_train=y_train)
# print('finish time:') | true |
d10efd3e812e6b42f0b46a5185558c1b65e9523c | Python | alex-belonozhko/Projects-Python | /Gosha Dudar/OOP.py | UTF-8 | 2,029 | 3.765625 | 4 | [] | no_license | #Example 1
# class car:
# color = "color"
# engine = "engine"
# weight = "weight"
#
# def set(self, color, engine, weight):
# self.color = color
# self.engine = engine
# self.weight = weight
#
# class Mark (car):
# mark = "mark"
#
# def __init__(self, mark, weight):
# self.mark = mark
# self.weight = weight
#
# class mark (car):
# mark = "mark"
#
# def mk(self, mark):
# self.mark = mark
#
# Sens = Mark("Sens", "2499kg")
# Sens.set("grey", 2, "2500kg")
#
# Audi = mark()
# Audi.mk("Audi")
# Audi.set("white", 3, "3000kg")
#
# BMW = mark()
# BMW.set("black", 4, "3500kg")
# BMW.mk("BMW")
#
# print (BMW .mark)
#Example 2
# class Car:
# def __init__(self, mark, engine):
# self.mark = mark
# self.engine = engine
# print("Создан обьект класа Car с параметрами: Марка: {0} и Двигатель: {1}".format(self.mark, self.engine))
#
# def tell(self):
# print("Марка: {0}, Двигатель: {1}, ".format(self.mark, self.engine), end=" ")
#
# class Purchase:
# def __init__(self, mark, engine, profit):
# Car.__init__(self, mark, engine)
# self.profit = profit
# print("Вы продали машину Марки {0} с движком {1} по заработали {2}".format(self.mark, self.engine, self.profit))
# def tell(self):
# Car.tell(self)
# print("Стоимость: {0}".format(self.profit))
#
# class Sale:
# def __init__(self, mark, engine, cost):
# Car.__init__(self, mark, engine)
# self.cost = cost
# print("Вы купили машину Марки {0} с движком {1} по стоимости {2}".format(self.mark, self.engine, self.cost))
# def tell(self):
# Car.tell(self)
# print("Стоимость: {0}".format(self.cost))
#
# S = Sale("Sens", 2, 2000)
# P = Purchase("BMW", 4, 6000)
# Cars = [S, P]
# for car in Cars:
# car.tell()
| true |
ead96fbf8cf4bbe4957b71d87d1a149af67b58d4 | Python | ammarion/Complete_Python_Developer | /oop/class-att.py | UTF-8 | 274 | 3.265625 | 3 | [] | no_license | class Pearson:
def set_name(self, new_name, age):
self.name = new_name
self.age = age
# Pearson.do_workd = lambda self: f"do_workd called from {self}"
p = Pearson()
p.set_name('Ammar', 99)
Pearson.do_work = lambda self: f"do_work called from {self}"
| true |
ae37b18a772e0081a7c74606a22acf432097ff5e | Python | jmueric/Weather | /Weather scale.py | UTF-8 | 222 | 3.84375 | 4 | [] | no_license | weather = int(input("Hows your weather? "))
if weather >85:
print("It's too hot")
elif weather >80:
print("Looking pretty")
elif weather >65:
print("little chilly")
else:
print("It's too hot") | true |
9ec7498ae0c88c346cfd2d3df295e6488d318bca | Python | oTree-org/otree-docs | /source/_static/otree_python.py | UTF-8 | 4,361 | 4.71875 | 5 | [
"MIT"
] | permissive | # Comments start with a # symbol.
####################################################
## 1. Basics
####################################################
# integer
3
# float (floating-point number)
3.14
# Math is what you would expect
1 + 1 # => 2
8 - 1 # => 7
10 * 2 # => 20
35 / 5 # => 7.0
# Enforce precedence with parentheses
(1 + 3) * 2 # => 8
# Boolean Operators
# Note they are
True and False # => False
False or True # => True
# negate with not
not True # => False
not False # => True
# Equality is ==
1 == 1 # => True
2 == 1 # => False
# Inequality is !=
1 != 1 # => False
2 != 1 # => True
# More comparisons
1 < 10 # => True
1 > 10 # => False
2 <= 2 # => True
2 >= 2 # => True
# A string (text) is created with " or '
"This is a string."
'This is also a string.'
# Strings can be added too!
"Hello " + "world!" # => "Hello world!"
# None means an empty/nonexistent value
None # => None
####################################################
## 2. Variables, lists, and dicts
####################################################
# print() displays the value in your command prompt window
print("I'm Python. Nice to meet you!") # => I'm Python. Nice to meet you!
# Variables
some_var = 5
some_var # => 5
# Lists store sequences
li = []
# Add stuff to the end of a list with append
li.append(1) # li is now [1]
li.append(2) # li is now [1, 2]
li.append(3) # li is now [1, 2, 3]
# Access a list like you would any array
# in Python, the first list index is 0, not 1.
li[0] # => 1
# Assign new values to indexes that have already been initialized with =
li[0] = 42
li # => [42, 2, 3]
# You can add lists
other_li = [4, 5, 6]
li + other_li # => [42, 2, 3, 4, 5, 6]
# Get the length with "len()"
len(li) # => 6
# Here is a prefilled dictionary
filled_dict = dict(name='Lancelot', quest="To find the holy grail", favorite_color="Blue")
# Look up values with []
filled_dict['name'] # => 'Lancelot'
# Check for existence of keys in a dictionary with "in"
'name' in filled_dict # => True
'age' in filled_dict # => False
# set the value of a key with a syntax similar to lists
filled_dict["age"] = 30 # now, filled_dict["age"] => 30
####################################################
## 3. Control Flow
####################################################
# Let's just make a variable
some_var = 5
# Here is an if statement.
# prints "some_var is smaller than 10"
if some_var > 10:
print("some_var is totally bigger than 10.")
elif some_var < 10: # This elif clause is optional.
print("some_var is smaller than 10.")
else: # This is optional too.
print("some_var is indeed 10.")
"""
SPECIAL NOTE ABOUT INDENTING
In Python, you must indent your code correctly, or it will not work.
All lines in a block of code must be aligned along the left edge.
When you're inside a code block (e.g. "if", "for", "def"; see below),
you need to indent by 4 spaces.
Examples of wrong indentation:
if some_var > 10:
print("bigger than 10." # error, this line needs to be indented by 4 spaces
if some_var > 10:
print("bigger than 10.")
else: # error, this line needs to be unindented by 1 space
print("less than 10")
"""
"""
For loops iterate over lists
prints:
1
4
9
"""
for x in [1, 2, 3]:
print(x*x)
"""
"range(number)" returns a list of numbers
from zero to the given number MINUS ONE
the following code prints:
0
1
2
3
"""
for i in range(4):
print(i)
####################################################
## 4. Functions
####################################################
# Use "def" to create new functions
def add(x, y):
print('x is', x)
print('y is', y)
return x + y
# Calling functions with parameters
add(5, 6) # => prints out "x is 5 and y is 6" and returns 11
####################################################
## 5. List comprehensions
####################################################
# We can use list comprehensions to loop or filter
numbers = [3,4,5,6,7]
[x*x for x in numbers] # => [9, 16, 25, 36, 49]
numbers = [3, 4, 5, 6, 7]
[x for x in numbers if x > 5] # => [6, 7]
####################################################
## 6. Modules
####################################################
# You can import modules
import random
print(random.random()) # random real between 0 and 1
| true |
e0215a0b7b11a430dda46c92fc4febdf88dc391f | Python | jennyjohns/Diabetes-Dataset-DMT-Project | /Code/Preprocessing/csv2arff/csv2arff.py | UTF-8 | 2,463 | 3.078125 | 3 | [] | no_license | import csv
import sys
import os
#def read_data(numeric_col, data, nominal_dictionary, cols_size):
def read_data():
for row in content:
# replacing empty data with '?'
row_content = []
for index, cell in enumerate(row):
# ignoring the weight column, index: 5
if index == 5:
row_content.append('?')
elif cell == '':
row_content.append('?')
else:
row_content.append(cell)
# row = ['?' if cell == '' else cell for cell in row]
# adding the row to a data list
data.append(row_content)
for col in range(cols_size):
cell_data = row[col]
# identifying the attribute value set
if col not in numeric_col and cell_data != '?':
if col not in nominal_dictionary.keys():
nominal_dictionary[col] = set()
nominal_dictionary[col].add(cell_data)
def write_data_to_console():
# Printing @relation
print('@relation', name)
# Printing @attributes
for col in range(len(header)):
if col in numeric_col:
print('@attribute', header[col], 'numeric')
else:
print('@attribute', header[col], '{' + ','.join(sorted(nominal_dictionary[col])) + '}')
# Printing @data
print('@data')
for row in data:
print(','.join(row))
if len(sys.argv) != 2:
print('Usage: csv2arff.py <path to a csv file>')
sys.exit(0)
file_path = sys.argv[1]
try:
with open(file_path, 'r') as csvfile:
content = csv.reader(csvfile, delimiter = ',')
file_name = os.path.basename(file_path)
# name of the csv file.
name = os.path.splitext(file_name)[0]
# Numeric columns:
# 0 - Encounter ID, 1 - Patient number, 5 - Weight, 9 - Time in hospital, 12 - Number of lab procedures
# 13 - Number of procedures, 14 - Number of medications, 15 - Number of outpatient visits
# 16 - Number of emergency visits, 17 - Number of inpatient visits, 21 - Number of diagnoses
numeric_col = {0, 1, 5, 9, 12, 13, 14, 15, 16, 17, 21}
data = []
nominal_dictionary = {}
header = next(content);
cols_size = len(header)
read_data()
write_data_to_console()
except IOError:
print('File not found')
sys.exit(0)
| true |
6b5e7675ec5ffc5a2ff3c842aca915e18cae327b | Python | borismattijssen/mushrooms | /src/data/make_dataset.py | UTF-8 | 2,300 | 2.75 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import pickle
import click
import logging
from pathlib import Path
import os.path
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
project_dir = Path(__file__).resolve().parents[2]
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
@click.option('--pca-var', default=0.95)
@click.option('--pca-output', type=click.Path(), default=os.path.join(project_dir, 'models/pca.p'))
@click.option('--colnames-output', type=click.Path(), default=os.path.join(project_dir, 'models/colnames.p'))
def main(input_filepath, output_filepath, pca_var, pca_output, colnames_output):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
# read raw dataset
logger.info('reading dataset at {}'.format(input_filepath))
df = pd.read_csv(input_filepath)
y = df['class']
df = df.drop(columns=['class'])
# convert to one-hot encoding
logger.info('converting to one-hot encoded dataset')
dum = pd.get_dummies(df)
# save column names for later use
logger.info('saving one-hot encoded column names to disk')
pickle.dump(dum.columns, open(colnames_output, "wb"))
# reduce dimensionality
logger.info('reducing dimensionality with {} variance retained'.format(pca_var))
pca = PCA(n_components=pca_var)
trans = pca.fit_transform(dum)
logger.info('resulting in a dataset with shape {}'.format(trans.shape))
# save PCA model to cache location
logger.info('saving PCA to disk')
pickle.dump(pca, open(pca_output, "wb"))
# create output dateframe
logger.info('creating output dataframe')
headers = ['feat_{}'.format(i) for i in range(trans.shape[1])]
output_df = pd.DataFrame(data=trans, columns=headers)
output_df = output_df.join(y.replace('e', 0).replace('p', 1))
# write output
output_df.to_csv(output_filepath, index=False)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| true |
c94bb4feb7dbaa884456764adeb589c2f220c537 | Python | Liusihan-1/python | /1~100的累加.py | UTF-8 | 76 | 3.375 | 3 | [] | no_license | sum,n=0,1
while n<=100:
sum=sum+n
n=n+1
print("1+2+3+...+100=",sum)
| true |
f2eee6bdde8856c70c8121e35b797dc82c388cfa | Python | paramv/hrrnk-sol | /Sorting and Searching Tutorials/insertionsort.py | UTF-8 | 1,714 | 3.3125 | 3 | [
"MIT"
] | permissive | # def insertion_sort(arr):
# for j in range(1,len(arr)):
# key = arr[j]
# i = j-1
# print('Outside while => j: %s,key %s,arr[%s] %s' %(j,key,i,arr[i]))
# while i >= 0 and arr[i] > key:
# # print('\t setting arr[%s] as arr[%s]' % (i,i+1))
# arr[i+1]=arr[i]
# i-=1
# # print('\t %s ,i = %s' % (arr,i))
# print arr
# # print('setting arr[%s] as key(%s or arr[%s])' % (i+1,key,j))
# arr[i+1] = key
# # print('%s' % arr)
# return arr
# def insertion_sort_rev(arr):
# l = len(arr)
# for j in range(l-1,-1,-1):
# key = arr[j]
# i= j - 1
# # print('Outside while => j: %s,key %s,arr[%s] %s' %(j,key,i,arr[i]))
# while i >= 0 and arr[i] > key:
# # print('\t setting arr[%s] as arr[%s]' % (i-1,i))
# arr[i+1] = arr[i]
# i-=1
# # print('\t %s ,i = %s' % (arr,i))
# print arr
# arr[i+1] = key
# # print('%s' % arr)
# print arr
# # n = int(raw_input())
# # arr = map(lambda v:int(v),raw_input().split())
# print insertion_sort([4,2,6,8,3])
# insertion_sort_rev([2,4,6,8,3])
# def insertion_sort(arr):
# l = len(arr)
# for j in range(l-1,-1,-1):
# key = arr[j]
# i= j - 1
# while i >= 0 and arr[i] > key:
# arr[i+1] = arr[i]
# i-=1
# arr[i+1] = key
# print ' '.join(str(x) for x in arr)
# print ' '.join(str(x) for x in arr)
# return arr
def insertion_sort(arr):
shifts = 0
for j in range(1,len(arr)):
key = arr[j]
i = j-1
while i >= 0 and arr[i] > key:
shifts+=1
arr[i+1]=arr[i]
i-=1
arr[i+1] = key
print shifts
return arr
n = int(raw_input())
arr = map(lambda v:int(v),raw_input().split())
insertion_sort(arr)
# insertion_sort([2,1,3,1,2])
| true |
dba093d06d644681ff4ab0fc9fa7a758cd05900c | Python | bhavyakh/decrypto | /decrypto/cipher/vigenereBreaker.py | UTF-8 | 1,138 | 2.984375 | 3 | [
"MIT"
] | permissive | from decrypto.cipher.detectEnglish import isEnglish
from decrypto.cipher.vigcep import _decryptMessage
class VigenereBreaker:
def _hackVigenere(ciphertext):
fo = open('decrypto/static/dictionary.txt')
words = fo.readlines()
fo.close()
data = {}
for word in words:
word = word.strip() # remove the newline at the end
decryptedText = _decryptMessage(word, ciphertext)
if isEnglish(decryptedText, wordPercentage=40):
data.update({str(word): decryptedText[:100]})
if data is None:
return "N/A"
return data
@ classmethod
def decrypt(cls, message):
"""Decrypts Vigenere cipher without key
Uses dictionary attack on each key to get
a sentence on which English words %age can
be tested
Args:
message (str): Encrypted text
key (str): Key
Returns:
dict : {"Vigenere" : [output]}
"""
try:
data = cls._hackVigenere(message)
except:
data = "N/A"
return {"Vigenere": data}
| true |
9452633f1b308acb196695470313ebf9ad21356a | Python | SureshKrishnanSrinivasan/Python-Practise | /py4eb.py | UTF-8 | 1,328 | 3.765625 | 4 | [] | no_license | import random
user_score = 0
comp_score = 0
print("Game Rules are as follows:\n 1) Rock wins Scissor\n2) Scissor wins Paper\n 3) Paper wins Rock\n\n\n\n Choose: \n a) 1 - for Rock\n b) 2 - for Paper\n c) 3 - for Scissors\n d) quit - To get results")
while True:
user_input = input("Enter input: ")
if user_input == "quit":
if user_score>comp_score:
print("You scored",user_score)
print("Computer scored",comp_score)
print("You beat the computer by",user_score - comp_score,"points")
elif user_score < comp_score:
print("You scored",user_score)
print("Computer scored",comp_score)
print("Computer beat you by",comp_score - user_score,"points")
else:
print("You scored",user_score)
print("Computer scored",comp_score)
print("It was a draw")
break
values = ["rock","paper","scissor"]
comp = values[random.randint(0,2)]
user = values[int(user_input)-1]
if user == "rock" and comp == "scissor" or user =="scissor" and comp =="paper" or user == "paper" and comp == "rock" :
user_score +=1
if comp == "rock" and user == "scissor" or comp =="scissor" and user =="paper" or comp == "paper" and user == "rock" :
comp_score +=1
# rock scissor paper
# rock scissor, scissor paper, rock paper,
# rock rock, paper paper, scissor scissor | true |
dbea996f24281d0b752188e0e9dc27bd57f5b36b | Python | NKalu/vending_machine | /src/vending_features/coin_accept.py | UTF-8 | 2,090 | 3.421875 | 3 | [] | no_license | class CoinAcceptor(object):
def __init__(self):
# keep track of value of coins inserted
self.coin_value = 0
# number of coins inserted not in coin return
self.number_of_coins = 0
# keeping track of the coins the user inserted
self.coins_inserted = list()
super().__init__()
def accept_coins(self, coin_inserted: str):
# this functions will determine the value to assign
# the coin based on the length first (size)
# the string is then lowered and sorted and compared (weight)
# if the coin is invalid, it is sent to the coin_return function
if len(coin_inserted) == 4:
if self.weigh_coin(coin_inserted) == 'deim':
self.coin_value += .10
self.number_of_coins += 1
self.coins_inserted.append(coin_inserted)
elif len(coin_inserted) == 6:
if self.weigh_coin(coin_inserted) == 'ceikln':
self.coin_value += .05
self.number_of_coins += 1
self.coins_inserted.append(coin_inserted)
else:
self.coin_return(coin_inserted)
elif len(coin_inserted) == 7:
if self.weigh_coin(coin_inserted) == 'aeqrrtu':
self.coin_value += .25
self.number_of_coins += 1
self.coins_inserted.append(coin_inserted)
else:
self.coin_return(coin_inserted)
else:
self.coin_return(coin_inserted)
def clear_coins(self):
self.coin_value = 0
self.number_of_coins = 0
def return_coins_to_customer(self) -> str:
return '\n'.join([self.coin_return(coin) for coin in self.coins_inserted])
def weigh_coin(self, coin_to_weigh:str) -> str:
# returns the "weight" of the coin
return ''.join(sorted(coin_to_weigh.lower()))
def coin_return(self, coin_to_return: str) -> str:
# just prints that a coin has been sent to Coin Return
return f'{coin_to_return} sent to Coin Return'
| true |
480f3f2aa659de41e11a1d22b04baa49b02bdb22 | Python | german-mesa/wordCloud | /src/Example_SuccessFactor_1_Data_Extraction.py | UTF-8 | 3,244 | 2.59375 | 3 | [] | no_license | # https://www.scrapingbee.com/blog/selenium-python/
#
import io
import os
import time
import datetime
from os import listdir
from os.path import isfile, join
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
DRIVER_PATH = os.path.join(os.getcwd(), 'drivers', 'chromedriver')
url_navigation_list = [
# SSO at SAP
'https://accounts.sap.com/saml2/idp/usso/sap?sp=www.successfactors.com',
# Success Factors - Find internal Jobs
'https://performancemanager5.successfactors.eu/sf/careers/jobsearch?bplte_company=SAP',
# Success Factors - Search positions button
'https://performancemanager5.successfactors.eu/acme?bplte_company=SAP&fbacme_n=recruiting&recruiting%5fns=joblisting%20summary&itrModule=rcm',
]
def chrome_head_full_mode():
page_counter = 0
driver = webdriver.Chrome(executable_path=DRIVER_PATH)
for url in url_navigation_list:
driver.get(url)
time.sleep(10)
try:
# Select number of records per page
select = Select(driver.find_element_by_xpath("//select[@id='37:']"))
select.select_by_visible_text("150")
time.sleep(2)
while True:
# Endless loop until next button is disabled
element = driver.find_elements_by_id("36:_next")
if len(element) < 2:
break
# Export page for later use
page_counter = page_counter + 1
export_page_source(driver.page_source, page_counter)
time.sleep(15)
# Click on next button
WebDriverWait(driver, 15).until(EC.element_to_be_clickable((By.ID, '36:_next'))).click()
time.sleep(15)
except NoSuchElementException:
print('Could not find the element')
print("Done")
def get_file_name(page_counter):
current_day = datetime.date.today()
year, week_num, day_of_week = current_day.isocalendar()
return os.path.join(os.getcwd(), 'output',
'response-{week}-{counter}.html'.format(week=str(week_num), counter=page_counter))
def export_page_source(page_source, page_counter):
page_name = get_file_name(page_counter)
try:
with io.open(page_name, 'w') as file:
file.write(page_source)
except IOError:
print("I/O error")
def move_output_files():
print("Moving files to input directory...")
source_dir = os.path.join(os.getcwd(), 'output')
destination_dir = os.path.join(os.getcwd(), 'input')
for file in [f for f in listdir(source_dir) if isfile(join(source_dir, f))]:
source_file = os.path.join(source_dir, file)
destination_file = os.path.join(destination_dir, file)
print(f"Moving...{source_file} to {destination_file}")
os.rename(source_file, destination_file)
def main():
# Run chrome at head full mode
chrome_head_full_mode()
# Moving files to directory for next step in the process
move_output_files()
if __name__ == '__main__':
main()
| true |
170a86405e1ac2b20d0626e7e4726bcb24ba7281 | Python | justinclark-dev/CSC110 | /code/Chapter-6/file_write.py | UTF-8 | 383 | 3.8125 | 4 | [
"MIT"
] | permissive | # This program writes three lines of data
# to a file.
def main():
# Open a file named philosophers.txt.
outfile = open('philosophers.txt', 'w')
# Write the names of three philosphers
# to the file.
outfile.write('John Locke\n')
outfile.write('David Hume\n')
outfile.write('Edmund Burke\n')
# Close the file.
outfile.close()
# Call the main function.
main()
| true |
1f0547d9536ff7d5358de50c5a807ad1b78c3e71 | Python | sdeangelis/mtuq | /mtuq/dataset.py | UTF-8 | 7,071 | 2.859375 | 3 | [
"BSD-2-Clause"
] | permissive |
import obspy
import numpy as np
import warnings
from copy import copy
from obspy import Stream
from obspy.geodetics import gps2dist_azimuth
class Dataset(list):
""" Seismic data container
A list of ObsPy streams in which each stream corresponds to a single
seismic station
.. note::
Each supported file format has a corresponding reader that creates a
Dataset (see ``mtuq.io.readers``).
"""
def __init__(self, streams=[], id=None, tags=[]):
""" Constructor method
"""
self.id = id
for stream in streams:
self.append(stream)
for tag in copy(tags):
self.tag_add(tag)
def append(self, stream):
""" Appends stream to Dataset
"""
assert issubclass(type(stream), Stream),\
ValueError("Only Streams can be appended to a Dataset")
# create unique identifier
try:
stream.id = '.'.join([
stream.station.network,
stream.station.station,
stream.station.location])
except:
stream.id = '.'.join([
stream[0].stats.network,
stream[0].stats.station,
stream[0].stats.location])
if not hasattr(stream, 'tags'):
stream.tags = list()
if not hasattr(stream, 'station'):
warnings.warn("Stream lacks station metadata")
elif not hasattr(stream, 'origin'):
warnings.warn("Stream lacks origin metadata")
else:
(stream.distance_in_m, stream.azimuth, _) =\
gps2dist_azimuth(
stream.origin.latitude,
stream.origin.longitude,
stream.station.latitude,
stream.station.longitude)
super(Dataset, self).append(stream)
def select(self, origin=None, station=None, ids=None):
""" Selects streams that match the given station or origin
"""
selected = self
if station:
selected = self.__class__(id=self.id, streams=filter(
lambda stream: stream.station==station, selected))
if origin:
selected = self.__class__(id=self.id, streams=filter(
lambda stream: stream.origin==origin, selected))
if ids:
selected = self.__class__(id=self.id, streams=filter(
lambda stream: stream.id in ids, selected))
return selected
def apply(self, function, *args, **kwargs):
""" Applies function to all streams
Applies a function to each stream in the Dataset, identical to the
Python built-in ``apply``.
.. warning ::
Although ``map`` returns a new Dataset, it is possible, depending
on the behavior of the given function, that the streams or traces
of the original Dataset are overwitten.
See also ``mtuq.process_data.ProcessData``, which has an
`overwrite` keyword argument that is `False` by default.
"""
processed = []
for stream in self:
processed += [function(stream, *args, **kwargs)]
return self.__class__(
processed, id=self.id)
def map(self, function, *sequences):
""" Maps function to all streams
Maps a function to each stream in the Dataset. If one or more optional
sequences are given, the function is called with an argument list
consisting of corresponding items of each sequence, identical to the
Python built-in ``map``.
.. warning ::
Although ``map`` returns a new Dataset, it is possible, depending
on the behavior of the given function, that the streams or traces
of the original Dataset are overwitten.
See also ``mtuq.process_data.ProcessData``, which has an
`overwrite` keyword argument that is `False` by default.
"""
processed = []
for _i, stream in enumerate(self):
args = [sequence[_i] for sequence in sequences]
processed += [function(stream, *args)]
return self.__class__(
processed, id=self.id)
def max(self):
""" Returns maximum absolute amplitude over all traces
"""
max_all = -np.inf
for stream in self:
for trace in stream:
if not getattr(trace, 'weight', 1.):
continue
if trace.data.max() > max_all:
max_all = abs(trace.data).max()
return max_all
def sort_by_distance(self, reverse=False):
""" Sorts in-place by hypocentral distance
"""
self.sort_by_function(lambda data: data.distance_in_m,
reverse=reverse)
def sort_by_azimuth(self, reverse=False):
""" Sorts in-place by source-receiver azimuth
"""
self.sort_by_function(lambda data: data.azimuth,
reverse=reverse)
def sort_by_function(self, function, reverse=False):
""" Sorts in-place by user-supplied function
"""
self.sort(key=function, reverse=reverse)
def get_stations(self):
""" Returns station metadata from all streams
For Datasets created using ``mtuq.io.readers``, SAC headers or
other file metadata are used to populate the Station attributes
"""
stations = []
for stream in self:
stations += [stream.station]
return stations
def get_origins(self):
""" Returns origin metadata from all streams
What do these metadata represent?
- For Datasets created using ``mtuq.io.readers.sac``, origin metadata
represent catalog information read from SAC headers
- For Datasets created using ``GreensTensor.get_synthetics``, origin
metadata are inherited from the GreensTensor
"""
origins = []
for stream in self:
origins += [stream.origin]
if getattr(self, '_warnings', True):
if stream.origin!=self[0].origin:
warnings.warn(
"Different streams in the Dataset correpond to "
"different events.\n\n"
"This may be intentional. Feel free to disable this "
"warning by setting Dataset._warnings=False")
return origins
def tag_add(self, tag):
""" Appends string to tags list
Tags can be used to support customized uses, such as storing metdata not
included in ``Station`` or ``Origin`` objects
"""
if type(tag)!=str:
raise TypeError
for stream in self:
if tag not in stream.tags:
stream.tags.append(tag)
def tag_remove(self, tag):
""" Removes string from tags list
"""
for stream in self:
if tag in stream.tags:
stream.tags.remove(tag)
| true |
ef00ab3768ad6a484e9333afba48852c48433d77 | Python | guptabhis/Lin-Log-Reg-from-Scratch | /assign1.py | UTF-8 | 1,513 | 3.421875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 11 11:42:24 2019
@author: abhishek
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from numpy import zeros
from numpy import array
def compute_totalerror (b,x,y,n):
total_error = 0
for i in range(n):
total_error += (y[i]-(x[i,:]*b))**2
return total_error/float(n)
def step_gradient(x,y,b,a):
b_gradient = zeros(8)
b_gradient = np.transpose(b_gradient)
N= len(y)
for i in range(N):
for j in range(len(b_gradient)):
b_gradient[j] += (-2/N)*x[i,j]*(y[i]-(x[i,:]*b))
b = b -(a*b_gradient)
return b
def gradient_descent_runner(x,y,b,a,num_iterations,n):
for i in range(num_iterations):
b = step_gradient(x,y,b,a)
error = compute_totalerror(b,x,y,n)
print(error)
return b
def run ():
df = pd.read_csv("Book1.csv")
x= df.iloc[:,[0,1,2,3,4,5,6,7]].values
y = df.iloc[:,[8]].values
print(x[0,0])
n = len(x)
for i in range(n):
if x[i,0] == 'M':
x[i,0] = 1
if x[i,0] == 'F':
x[i,0] = 2
if x[i,0] == 'I':
x[i,0]= 3
print(x[:,0])
# hyperparameter alpha
a = 0.01
b = zeros(8)
num_iterations = 1000
b = gradient_descent_runner(x,y,b,a,num_iterations,n)
error = compute_totalerror(b,x,y,n)
print(b)
print(error)
if __name__ == '__main__':
run()
| true |
a3b4a5346007a868dfceab1ccf718c9d8d89f594 | Python | rawlini/STGAutomationProject | /challenges/Challenge4/Fibonnaci.py | UTF-8 | 116 | 2.59375 | 3 | [] | no_license | def Fibonnaci(Number):
a=0
b=1
i=1
while (i<= Number):
a,b= b+a,a
i=i+1
return b | true |
2c93bfe88c6de913a05e1299722404b14e7e7cab | Python | RajKGupta/awesome-python-codes | /alogrithms/sorting/InsertionSort.py | UTF-8 | 287 | 3.578125 | 4 | [] | no_license | def InsertionSort(l,len):
for i in range(1,len):
temp = l[i]
for j in range(i-1,-1,-1):
if temp<l[j]:
l[j+1]=l[j]
else:
break
l[j]=temp
l = [i for i in range(10,1,-1)]
InsertionSort(l,len(l))
print(l)
| true |
295cd193af2cd3e4d5d2c3bab31c895bfd474271 | Python | Langzzx/OMOOC2py | /_src/om2py3w/3wex0/client.py | UTF-8 | 236 | 2.75 | 3 | [
"MIT"
] | permissive | # Echo client program
import socket
HOST = 'daring.cwi.nl'
PORT = 50007
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.sendall('Hello World')
data = s.recv(1024)
s.close()
print 'Received', repr(data)
| true |
cbeb7c52b69ccb08063d53ce62a2cf21c57256d2 | Python | DragonofDwest/Tic-Tac-Toe | /coffee.py | UTF-8 | 6,085 | 3.59375 | 4 | [] | no_license | # water_tank = 400
# milk_tank = 540
# coffee_bean_bag = 120
# disposable_cups = 9
# cash = 550
#
#
# def stock():
# print(f"The coffee machine has: \n{water_tank} of water \n{milk_tank} of milk")
# print(f"{coffee_bean_bag} of coffee beans \n{disposable_cups} of disposable cups")
# print(f"{cash} of money")
#
#
# stock()
#
#
# operation = input('\nWrite action (buy, fill, take):\n>')
# if operation == "buy":
# coffee_maker = input('What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino: \n >')
# if coffee_maker == "1":
# disposable_cups -= 1
# water_tank -= 250
# milk_tank -= 0
# coffee_bean_bag -= 16
# cash += 4
# stock()
# elif coffee_maker == "2":
# disposable_cups -= 1
# water_tank -= 350
# milk_tank -= 75
# coffee_bean_bag -= 20
# cash += 7
# stock()
# elif coffee_maker == "3":
# disposable_cups -= 1
# water_tank -= 200
# milk_tank -= 100
# coffee_bean_bag -= 12
# cash += 6
# stock()
#
#
# elif operation == "take":
# print('i gave you $' + str(cash) + '\n')
# cash -= cash
# stock()
# elif operation == "fill":
# extra_water = int(input('Write how many ml of water do you want to add: \n>'))
# extra_milk = int(input('Write how many ml of milk do you want to add: \n>'))
# extra_coffee = int(input('Write how many grams of coffee beans do you want to add: \n>'))
# extra_cup = int(input('Write how many disposable cups of coffee do you want to add: \n>'))
#
# water_tank += extra_water
# milk_tank += extra_milk
# coffee_bean_bag += extra_coffee
# disposable_cups += extra_cup
# print('\n')
# stock()
class Coffee:
def __init__(self, water_tank=400, milk_tank=540, coffee_bean_bag=120, disposable_cups=9, cash=550):
self.water_tank = water_tank
self.milk_tank = milk_tank
self.coffee_bean_bag = coffee_bean_bag
self.disposable_cups = disposable_cups
self.cash = cash
def stock(self):
print(f"The coffee machine has: \n{self.water_tank} of water \n{self.milk_tank} of milk")
print(f"{self.coffee_bean_bag} of coffee beans \n{self.disposable_cups} of disposable cups")
print(f"{self.cash} of money")
def operate(self):
while True:
operation = input('\nWrite action (buy, fill, take, remaining, exit):\n>')
if operation == "buy":
coffee_maker = input('What do you want to buy? 1 - espresso, 2 - latte, 3 - cappuccino: \n >')
if coffee_maker == "1":
if self.water_tank < 250:
print("Sorry, not enough water!")
elif self.coffee_bean_bag < 16:
print("Sorry, not enough coffee beans!")
elif self.disposable_cups < 1:
print("Sorry, not enough disposable cups!")
else:
print("I have enough resources, making you a coffee!")
self.disposable_cups -= 1
self.water_tank -= 250
self.milk_tank -= 0
self.coffee_bean_bag -= 16
self.cash += 4
elif coffee_maker == "2":
if self.water_tank < 350:
print("Sorry, not enough water!")
elif self.milk_tank < 75:
print("Sorry, not enough milk!")
elif self.coffee_bean_bag < 20:
print("Sorry, not enough coffee beans!")
elif self.disposable_cups < 1:
print("Sorry, disposable cups")
else:
print("I have enough resources, making you a coffee!")
self.disposable_cups -= 1
self.water_tank -= 350
self.milk_tank -= 75
self.coffee_bean_bag -= 20
self.cash += 7
elif coffee_maker == "3":
if self.water_tank < 200:
print("Sorry, not enough water!")
elif self.milk_tank < 100:
print("Sorry, not enough milk!")
elif self.coffee_bean_bag < 12:
print("Sorry, not enough coffee beans!")
elif self.disposable_cups < 1:
print("Sorry, disposable cups")
else:
print("I have enough resources, making you a coffee!")
self.disposable_cups -= 1
self.water_tank -= 200
self.milk_tank -= 100
self.coffee_bean_bag -= 12
self.cash += 6
elif operation == "take":
print('i gave you $' + str(self.cash) + '\n')
self.cash -= self.cash
elif operation == "fill":
extra_water = int(input('Write how many ml of water do you want to add: \n>'))
extra_milk = int(input('Write how many ml of milk do you want to add: \n>'))
extra_coffee = int(input('Write how many grams of coffee beans do you want to add: \n>'))
extra_cup = int(input('Write how many disposable cups of coffee do you want to add: \n>'))
self.water_tank += extra_water
self.milk_tank += extra_milk
self.coffee_bean_bag += extra_coffee
self.disposable_cups += extra_cup
print('\n')
elif operation == "remaining":
self.stock()
elif operation == "exit":
break
coffee_machine = Coffee()
coffee_machine.operate()
| true |
f19c01f1d4a46e1493c21e7ba0c183accf9cf84c | Python | lthoangg/FlappyBird | /main.py | UTF-8 | 1,821 | 3.09375 | 3 | [] | no_license | import pygame
import os
import bird, background
import Pipe
import Base
pygame.font.init()
STAT_FONT = pygame.font.SysFont("comicsans", 50)
def draw(win, bird, bg, pipe, base, score):
bg.draw(win)
for p in pipe:
p.draw(win)
bird.draw(win)
base.draw(win)
text = STAT_FONT.render("Score: " + str(score), 1, (255, 255, 255))
win.blit(text, (500 - 10 - text.get_width(), 10))
pygame.display.update()
def move(win, bird, pipe, base):
bird.move()
bird.keyListener()
for p in pipe:
p.move()
base.move()
def main():
pygame.init()
width = 500
height = 800
WIN = pygame.display.set_mode((width,height))
pygame.display.set_icon(pygame.image.load(os.path.join("imgs","bird1.png")))
pygame.display.set_caption("Flappy Bird")
run = True
clock = pygame.time.Clock()
b = bird.Bird()
bg = background.bg()
pipe = Pipe.Pipe()
pipes = [pipe]
base = Base.Base()
score = 0
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
print("Your score: " + str(score))
run = False
pygame.quit()
quit()
add_pipe = False
rem = []
for p in pipes:
if p.collide(b):
print("Your score: " + str(score))
run = False
if not p.passed and p.x < b.x:
p.passed = True
add_pipe = True
if p.x + p.pipe_top.get_width() < 0:
rem.append(p)
if add_pipe:
score += 1
pipes.append(Pipe.Pipe())
for r in rem:
pipes.remove(r)
move(WIN, b, pipes, base)
draw(WIN, b, bg, pipes, base, score)
main() | true |
99ab38236ff9b2cd4d64122fdea945a13421ee45 | Python | underseatravel/AlgorithmQIUZHAO | /Week_03/529_update_minesweeper.py | UTF-8 | 1,094 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/7/29 18:47
# @Author : weiyu
# @File : 529_update_minesweeper.py
class Solution:
def updateBoard(self, board, click):
if not board: return []
i, j = click[0], click[1]
if board[i][j] == "M":
board[i][j] = "X"
return board
self.dfs(board, i, j)
return board
def dfs(self, board, i, j):
if board[i][j] != "E":
return
m, n = len(board), len(board[0])
directions = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]
mine_count = 0
for d in directions:
ni, nj = i + d[0], j + d[1]
if 0 <= ni < m and 0 <= nj < n and board[ni][nj] == "M":
mine_count += 1
if mine_count == 0:
board[i][j] = "B"
else:
board[i][j] = str(mine_count)
return
for d in directions:
ni, nj = i + d[0], j + d[1]
if 0 <= ni < m and 0 <= nj < n :
self.dfs(board, ni, nj)
| true |
679870777cc23dec7b0e40ffc3bdbaa9c682bf24 | Python | wheirman/PinComm | /dicts.py | UTF-8 | 1,925 | 2.984375 | 3 | [] | no_license | # $Id: dicts.py 6232 2010-03-01 17:16:57Z wheirman $
import time
try:
from collections import defaultdict
# Try to derive DDict from collections.defaultdict (only in Python 2.5+)
# If defaultdict exists, we just need to override __missing__ to implement childArgs, childKwds, init_with_key,
# while the most time-consuming part (the normal __getitem__ for non-missing keys) is done in C
class DDict(defaultdict):
"""Directory with defaults: get of non-existing item initializes item with default value and returns it."""
def __init__(self, childObject, *childArgs, **childKwds):
self.childObject = childObject
self.childArgs = childArgs
self.childKwds = childKwds
if 'init_with_key' in childKwds:
self.init_with_key = childKwds['init_with_key']
del childKwds['init_with_key']
else:
self.init_with_key = False
def __missing__(self, name):
if self.init_with_key:
self[name] = self.childObject(name, *self.childArgs, **self.childKwds)
else:
self[name] = self.childObject(*self.childArgs, **self.childKwds)
return self[name]
except:
class DDict(dict):
"""Directory with defaults: get of non-existing item initializes item with default value and returns it."""
def __init__(self, childObject, *childArgs, **childKwds):
self.childObject = childObject
self.childArgs = childArgs
self.childKwds = childKwds
if 'init_with_key' in childKwds:
self.init_with_key = childKwds['init_with_key']
del childKwds['init_with_key']
else:
self.init_with_key = False
def __getitem__(self, name):
if not name in self:
if self.init_with_key:
self[name] = self.childObject(name, *self.childArgs, **self.childKwds)
else:
self[name] = self.childObject(*self.childArgs, **self.childKwds)
return dict.__getitem__(self, name)
| true |
62addfe60c5a4e389c6475fac8c7b21c8fca26e6 | Python | Divan009/Machine-Learning-Projects | /house_prices.py | UTF-8 | 4,176 | 2.71875 | 3 | [] | no_license | <<<<<<< HEAD
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 16:43:26 2018
@author: lenovo
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('train.csv')
y = dataset.SalePrice
iowa_predictors = ['LotArea','YearBuilt','1stFlrSF', '2ndFlrSF','BedroomAbvGr','TotRmsAbvGrd','FullBath']
X = dataset[iowa_predictors]
#splitting data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)
# =============================================================================
# from sklearn.tree import DecisionTreeRegressor
# #define model
# iowa_model = DecisionTreeRegressor()
# #fit model
# iowa_model.fit(X_train,y_train)
#
# print("Making predictions for 5 houses:")
# print(X.head())
# print("The prices for each house are")
# print(iowa_model.predict(X.head()))
#
# =============================================================================
#MAE predicting averaage error
from sklearn.metrics import mean_absolute_error
# =============================================================================
# def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val):
# model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
# model.fit(predictors_train, targ_train)
# preds_val = model.predict(predictors_val)
# mae = mean_absolute_error(targ_val,preds_val)
# return(mae)
#
# for max_leafs_nodes in [5,50,500,5000]:
# my_mae = get_mae(max_leafs_nodes, X_train, X_test, y_train, y_test)
# print("Max leaf nodes:%d \t\t MAE:%d" %(max_leafs_nodes, my_mae))
# =============================================================================
#the output we got is called "In-Sample" score
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor()
forest_model.fit(X_train, y_train)
iowa_preds = forest_model.predict(X_test)
print(mean_absolute_error(y_test, iowa_preds))
=======
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 16:43:26 2018
@author: lenovo
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('train.csv')
y = dataset.SalePrice
iowa_predictors = ['LotArea','YearBuilt','1stFlrSF', '2ndFlrSF','BedroomAbvGr','TotRmsAbvGrd','FullBath']
X = dataset[iowa_predictors]
#splitting data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0)
# =============================================================================
# from sklearn.tree import DecisionTreeRegressor
# #define model
# iowa_model = DecisionTreeRegressor()
# #fit model
# iowa_model.fit(X_train,y_train)
#
# print("Making predictions for 5 houses:")
# print(X.head())
# print("The prices for each house are")
# print(iowa_model.predict(X.head()))
#
# =============================================================================
#MAE predicting averaage error
from sklearn.metrics import mean_absolute_error
# =============================================================================
# def get_mae(max_leaf_nodes, predictors_train, predictors_val, targ_train, targ_val):
# model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
# model.fit(predictors_train, targ_train)
# preds_val = model.predict(predictors_val)
# mae = mean_absolute_error(targ_val,preds_val)
# return(mae)
#
# for max_leafs_nodes in [5,50,500,5000]:
# my_mae = get_mae(max_leafs_nodes, X_train, X_test, y_train, y_test)
# print("Max leaf nodes:%d \t\t MAE:%d" %(max_leafs_nodes, my_mae))
# =============================================================================
#the output we got is called "In-Sample" score
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
forest_model = RandomForestRegressor()
forest_model.fit(X_train, y_train)
iowa_preds = forest_model.predict(X_test)
print(mean_absolute_error(y_test, iowa_preds))
>>>>>>> 845e05a278f14e402e537c23ca31b35f235caf25
| true |
b760e9893113d60cdc0fae118f506114a6eed7fb | Python | francoisverges/semfio-mist | /Learn Mist API with Mini Scripts/Claim AP/claim-ap.py | UTF-8 | 2,110 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Written by Francois Verges (@VergesFrancois)
Created on: May 12, 2020
This script claims an AP within your Mist Organization of choice
All the configuration details are coming from the 'config.json' file
"""
import argparse
import time
import json
import requests
def claim_ap(configs):
"""
This function claims an AP to an organization
API Call Used: POST https://api.mist.com/api/v1/orgs/:org_id/inventory
Parameters:
- configs: Dictionary containing all configurations information
Returns:
- ID of the AP
"""
data_post = f"[\"{configs['ap']['claim-code']}\"]"
api_url = f"{configs['api']['mist_url']}orgs/{configs['api']['org_id']}/inventory"
headers = {'Content-Type': 'application/json',
'Authorization': f"Token {configs['api']['token']}"}
response = requests.post(api_url, data=data_post, headers=headers)
claim_response = json.loads(response.content.decode('utf-8'))
# print(json.dumps(claim_response, indent=4, sort_keys=True))
if claim_response['error']:
print(f"ERROR: The AP was NOT claimed.\t\t Reason: {claim_response['reason'][0]}")
elif claim_response['inventory_added']:
print(f"{configs['ap']['mac']} AP has been claimed to organization {configs['api']['org_id']}")
elif claim_response['duplicated']:
print(f"{configs['ap']['mac']} AP has already been claimed to this organization.")
return()
def main():
"""
This function claims a Mist AP to a specific Organization
"""
parser = argparse.ArgumentParser(description='Creates a Mist site within your organization')
parser.add_argument('config', metavar='config_file', type=argparse.FileType(
'r'), help='file containing all the configuration information')
args = parser.parse_args()
configs = json.load(args.config)
claim_ap(configs)
if __name__ == '__main__':
start_time = time.time()
print('** Claiming Mist AP...\n')
main()
run_time = time.time() - start_time
print("")
print("** Time to run: %s sec" % round(run_time, 2))
| true |
265da68c3d1d77776024dedf7c658786501bbf03 | Python | kchendil/mypython | /casting.py | UTF-8 | 171 | 3.0625 | 3 | [] | no_license | print(str("2"))
print(str("s2"))
print(str(3.0))
print(float(1))
print(float(2.8))
print(float("3"))
print(float("4.2"))
print(int(1))
print(int(2.8))
print(int("3"))
| true |
d01feaa59db42d99d05f2f9fccee30e118be0b2c | Python | wontaechoi/algorithm | /BOJ/청소년 상어.py | UTF-8 | 1,909 | 2.890625 | 3 | [] | no_license | import copy
def sol(new_loc, fishes, ocean, fish_dic, count):
global answer
N = 4
x, y = new_loc
fish_n, direction = ocean[x][y]
new_fishes = []
for f_n, d in fishes:
if f_n != fish_n:
new_fishes.append((f_n, d))
fishes = new_fishes
count += fish_n
ocean[x][y] = (-1, direction)
for i, (f_n, d) in enumerate(fishes):
x, y = fish_dic[f_n]
while x + dx[d] < 0 or x + dx[d] >= N or y + dy[d] < 0 or y + dy[d] >= N or ocean[x + dx[d]][y + dy[d]][0] == -1:
d = 0 if d == 7 else d + 1
new_x = x + dx[d]
new_y = y + dy[d]
temp = ocean[new_x][new_y]
ocean[new_x][new_y] = (f_n, d)
fish_dic[f_n] = (new_x, new_y)
fishes[i] = (f_n, d)
ocean[x][y] = temp
fish_dic[temp[0]] = (x, y)
x, y = new_loc
possible = []
while x + dx[direction] >= 0 and x + dx[direction] < N and y + dy[direction] >= 0 and y + dy[direction] < N:
if ocean[x+dx[direction]][y+dy[direction]] != (0,0):
possible.append((x + dx[direction], y + dy[direction]))
x = x + dx[direction]
y = y + dy[direction]
if not possible:
answer = max(answer, count)
return
x, y = new_loc
ocean[x][y] = (0,0)
for pos in possible:
sol(pos, copy.deepcopy(fishes), copy.deepcopy(ocean), copy.deepcopy(fish_dic), count)
fishes = []
ocean =[]
dx = [-1, -1, 0, 1,1,1, 0, -1]
dy = [0, -1, -1, -1, 0, 1,1,1]
fish_dic = {}
for i in range(1, 17):
fish_dic[i] = ''
for i in range(4):
row = list(map(int, input().split()))
temp = []
for j in range(0, len(row), 2):
fishes.append((row[j], row[j+1]-1))
temp.append((row[j], row[j+1]-1))
fish_dic[row[j]] = (i, j//2)
ocean.append(temp)
fishes.sort(key = lambda x : x[0])
answer = 0
sol((0,0), fishes, ocean, fish_dic, 0)
print(answer) | true |
63fdeaa67d7aba6bf2fe98536b7ad8c00952bc62 | Python | JDiogoBSouza/MQTTIoT | /Publisher/MQTTPublisher.py | UTF-8 | 1,534 | 3 | 3 | [] | no_license | import paho.mqtt.client as mqtt
import time
import Adafruit_DHT
# Sensor should be set to Adafruit_DHT.DHT11,
# Adafruit_DHT.DHT22, or Adafruit_DHT.AM2302.
sensor = Adafruit_DHT.DHT11
# Example using a Raspberry Pi with DHT sensor
# connected to GPIO23.
pin = 23
temperatura_topic = "/i0t1md/temperatura"
umidade_topic = "/i0t1md/umidade"
broker_url = "iot.eclipse.org"
def mqtt_client_connect():
print("conectando ao broker: ", broker_url)
client.connect(broker_url) #Conecta ao broker de mensagens
client.loop_start() #Inicia loop
client = mqtt.Client("Raspberry") #Cria nova instancia
mqtt_client_connect() #Chama funcao para conectar ao broker
n = 0
while True:
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
# Note that sometimes you won't get a reading and
# the results will be null (because Linux can't
# guarantee the timing of calls to read the sensor).
# If this happens try again!
if humidity is not None and temperature is not None:
print("Publicando no Topico {} : {}*C").format( temperatura_topic, temperature )
print("Publicando no Topico {} : {}%").format( umidade_topic, humidity )
client.publish(temperatura_topic, temperature) #publica o dado no topico 1
client.publish(umidade_topic, humidity) #publica o dado no topico 2
else:
print('Failed to get reading. Try again!')
time.sleep(10) | true |
eab32a9c46162f54b900e714cf40e7f9ed7448c8 | Python | csu-hmc/inverted-pendulum-sys-id-paper | /src/indirect_shooting.py | UTF-8 | 4,876 | 3.109375 | 3 | [
"CC-BY-4.0"
] | permissive | """
1. Build a function for non-linear closed loop ODEs and cache it to disk in
binary form. Maybe use joblib and/or Bjorn's stuff. The model constants
can be hard coded. This function should evaluate as fast as possible.
2. Choose and initial guess for the gains.
3. Create an objective function: minimize difference in angles (and angular
rates?). The args are the gains (and the initial state?), the function
then simulates the system and computes the objective value.
4. Use scipy.optimize.minimize and try out different methods.
"""
import multiprocessing as mp
import numpy as np
from scipy.integrate import odeint
from scipy.optimize import minimize
import cma
# TODO : Make sure that we are simulating with the MEASURED platform
# acceleration. The identification simluations should be using the measured
# values not the actual values.
def sum_of_squares(measured_states, simulated_states, interval=1.0):
"""Returns the sum of the squares of the difference in the measured
states and the simulated states.
Parameters
----------
measured_states : array_like, shape(n, 4)
The measured state trajectories.
simulated_states : array_like, shape(n, 4)
The simulated state trajectories.
Returns
-------
sum_of_squares : float
The sum of the squares in the difference between the measured and
simulated states.
"""
return interval * np.sum((measured_states - simulated_states) ** 2)
def objective(gain_matrix, model, rhs, initial_conditions, time_vector,
rhs_args, measured_state_trajectory):
"""
Parameters
==========
gain_matrix : array_like, shape(2, 4)
K = [k_00, k_01, k_02, k_03]
[k_10, k_11, k_12, k_13]
"""
print('Shooting...')
print('Trying gains: {}'.format(gain_matrix))
if len(gain_matrix.shape) == 1:
gain_matrix = gain_matrix.reshape(2, 4)
model.scaled_gains = gain_matrix
model_state_trajectory = odeint(rhs,
initial_conditions,
time_vector,
args=rhs_args)
s = sum_of_squares(measured_state_trajectory, model_state_trajectory)
print('Objective = {}'.format(s))
return s
def identify(time, measured_states, rhs, rhs_args, model, method='SLSQP',
initial_guess=None, tol=1e-8):
"""
Parameters
==========
time : ndarray, shape(n,)
The monotonically increasing time vector.
measured_states : ndarray, shape(n, 4)
The measured state variables.
rhs : function
A function, f(x, t, r, p), that evaluates the right hand side of the
ordinary differential equations describing the closed loop system.
rhs_args : tuple
The specified input and the constants.
model : QuietStandingModel
method : string, optional
Any method available in scipy.optimize.minimize or 'CMA'.
initial_guess : ndarray, shape(8,), optional
The initial guess for the gains.
Returns
=======
gains : ndarray, shape(8,)
The flattend gain matrix.
"""
x0 = np.zeros(4)
if initial_guess is None:
initial_guess = np.zeros_like(model.scaled_gains.copy())
#initial_guess = model.scaled_gains.copy()
if method == 'CMA':
sigma = 0.125
# NOTE : The objective function needs to be importable from this
# module to work with multiprocessing. Making it a global allows it
# to inherit all the variables from inside the identify function and
# be importable. This shows a more elegant solution than making the
# function a global: http://stackoverflow.com/a/16071616/467314
global obj
def obj(gains):
return objective(gains, model, rhs, x0, time, rhs_args,
measured_states)
# This method of parallelization is taken from the cma.py docstring
# for CMAEvolutionStrategy.
es = cma.CMAEvolutionStrategy(initial_guess.flatten(), sigma,
{'tolx': tol})
pool = mp.Pool(es.popsize)
while not es.stop():
# TODO : This gains is a group of gains for each iteration.
gains = es.ask()
f_values = pool.map_async(obj, gains).get()
es.tell(gains, f_values)
es.disp()
es.logger.add()
else:
result = minimize(objective,
initial_guess,
method=method,
args=(model, rhs, x0, time, rhs_args,
measured_states),
tol=tol,
options={'disp': True})
gains = result.x.flatten()
return model.gain_scale_factors.flatten() * gains
| true |
9e42fb156a1fd0586747c593da18b06033039e47 | Python | AndrewBatty/Variables | /VIE_development_exercise3.py | UTF-8 | 482 | 4.40625 | 4 | [] | no_license | # Andrew Batty
# 23/09/14
# Development Exercise 3
print("This program will convert your height form inches into centimetres, and your weight from stone into kilograms")
height = float(input("Please enter your Heigh in inches: "))
weight = float(input("please enter your Weight in stone: "))
cmHeight = height * 2.54
kgWeight = weight * 6.364
print("Your Height in cntimetres is: {0}".format(cmHeight))
print("Your Weight in kilograms is: {0}".format(kgWeight))
| true |
2ea31af1281a08b6847ade95c401ee48f4f1b1a8 | Python | gitlearn212/My-Python-Lab | /Ud/finding_types.py | UTF-8 | 341 | 3.53125 | 4 | [] | no_license | #print(type(2 + 2)) # prints <class 'int'>
#print(type(2 + 0.2)) # prints <class 'float'>
#print(type(2 ** .2)) # prints <class 'int'>
#print(abs(-20)) # abs means not a negative number
#print(bin(5)) # get 0b101 also check for bin number for 5 in google
#print(int('0b101', 2)) converts binnary in to hexdecimal
#iq = 190
#print(bin(iq)) | true |
c451820f4b88a88cc20e8e08699b43b4f455858f | Python | qeedquan/misc_utilities | /snippets/python/numpy/cov.py | UTF-8 | 760 | 3.203125 | 3 | [
"MIT"
] | permissive | import numpy as np
def cov(x, y, xm, ym, n):
p = 0.0
for i in range(len(x)):
p += (x[i]-xm)*(y[i]-ym)
return p/(n-1)
X = np.array([4, 4.2, 3.9, 4.3, 4.1])
Y = np.array([2, 2.1, 2.0, 2.1, 2.2])
Z = np.array([0.6, 0.59, 0.58, 0.62, 0.63])
xm = np.average(X)
ym = np.average(Y)
zm = np.average(Z)
n = 5
xx = cov(X, X, xm, ym, n)
xy = cov(X, Y, xm, ym, n)
xz = cov(X, Z, xm, zm, n)
yx = cov(Y, X, ym, xm, n)
yy = cov(Y, Y, ym, ym, n)
yz = cov(Y, Z, ym, zm, n)
zx = cov(Z, X, zm, xm, n)
zy = cov(Z, Y, zm, ym, n)
zz = cov(Z, Z, zm, zm, n)
print("Mean {:.5f} {:.5f} {:.5f}".format(xm, ym, zm))
print("{:.5f} {:.5f} {:.5f}".format(xx, xy, xz))
print("{:.5f} {:.5f} {:.5f}".format(yx, yy, yz))
print("{:.5f} {:.5f} {:.5f}".format(zx, zy, zz))
| true |
9ca60da42961cb153f16fb9b33daeda22b400fb3 | Python | gabriellaec/desoft-analise-exercicios | /backup/user_242/ch171_2020_06_15_19_57_42_949153.py | UTF-8 | 576 | 3.859375 | 4 | [] | no_license | class Carrinho:
def __init__(self):
self.produtos = {}
def adiciona(self,produto, valor):
if produto in self.produtos:
self.produtos[produto] += valor
else:
self.produtos[produto] = valor
def total_do_produto(self, produto):
return self.produtos[produto]
c = Carrinho()
c.adiciona('banana', 5)
total_banana = c.total_do_produto('banana')
print(total_banana) # Vai imprimir 5
c.adiciona('abacate', 7)
c.adiciona('banana', 4)
total_banana = c.total_do_produto('banana')
print(total_banana) # Vai imprimir 9 | true |
7be05a91b456f2c57805c67c2f76654e8087f539 | Python | martin284/restaurant_app | /main.py | UTF-8 | 505 | 2.921875 | 3 | [] | no_license | import model
if __name__ == "__main__":
# hamburger = model.FoodItem('Hamburger', 'main', 'beef', 12)
# cheeseburger = model.FoodItem('Cheeseburger', 'main', 'beef', 13)
# veggie_burger = model.FoodItem('Veggie-Burger', 'main', 'vegan', 10)
# veggie_burger.add_special_wishes('with double beef')
# order = model.Order()
# order.add_food_item(hamburger)
# order.add_food_item(cheeseburger)
# order.add_food_item(veggie_burger)
menu = model.Menu()
menu.show_menu()
| true |
5d5bf0e8b0e4d06fba83f183ca60ebf8195b743d | Python | AtomicWasp/PiWars2015 | /Code/speed.py | UTF-8 | 2,467 | 3.453125 | 3 | [] | no_license | #!/usr/bin/python
# speed.py (Hand-controlled Speed. Use the forward of the left trigger to move robot forwards at full velocity. Use the left and right of the right trigger to correct) by George P Tuli of Positronic for Pi Wars 2015.
# Import the required libraries.
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor
import xbox
import time
import atexit
# Define the motor HAT object.
mh = Adafruit_MotorHAT(addr=0x60)
# Define the xbox controller object.
pad = xbox.Joystick()
# Auto-disable motors.
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
pad.close()
# Set the exit function to disable motors.
atexit.register(turnOffMotors)
# Setup the 4 motors.
M1 = mh.getMotor(1) # Front-left (A).
M2 = mh.getMotor(2) # Front-right (B).
M3 = mh.getMotor(3) # Back-right (C).
M4 = mh.getMotor(4) # Back-left (D).
### Motors 1 and 3 must be wired in reverse for the standard setup to function correctly (swap + and - wires). ###
# Clamp motor values to within the range minn to maxn.
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
# Start reading the conroller input.
while True:
xVelocity = pad.rightX(deadzone = 12000) * 255
yVelocity = pad.leftY(deadzone = 12000) * 255
rotation = pad.leftX(deadzone = 12000) * 255
if yVelocity > 0:
yVelocity = 255
# Set motor speeds.
A = int(clamp((xVelocity + yVelocity + rotation) * 2, -255, 255)) # Front-left.
B = int(clamp((xVelocity + (yVelocity * -1) + rotation) * 2, -255, 255)) # Front-right.
C = int(clamp(((xVelocity * -1) + (yVelocity * -1) + rotation) * 2, -255, 255)) # Back-right.
D = int(clamp(((xVelocity * -1) + yVelocity + rotation) * 2, -255, 255)) # Back-left.
# Run motor A.
if A > 0:
M1.run(Adafruit_MotorHAT.FORWARD)
M1.setSpeed(A)
else:
M1.run(Adafruit_MotorHAT.BACKWARD)
M1.setSpeed(A * -1)
# Run motor B.
if B > 0:
M2.run(Adafruit_MotorHAT.FORWARD)
M2.setSpeed(B)
else:
M2.run(Adafruit_MotorHAT.BACKWARD)
M2.setSpeed(B * -1)
# Run motor C.
if C > 0:
M3.run(Adafruit_MotorHAT.FORWARD)
M3.setSpeed(C)
else:
M3.run(Adafruit_MotorHAT.BACKWARD)
M3.setSpeed(C * -1)
# Run motor D.
if D > 0:
M4.run(Adafruit_MotorHAT.FORWARD)
M4.setSpeed(D)
else:
M4.run(Adafruit_MotorHAT.BACKWARD)
M4.setSpeed(D * -1)
# End of program.
| true |
9f7fc989de308a4f4d06e506627a5ab29ce756eb | Python | Aasthaengg/IBMdataset | /Python_codes/p02665/s347533666.py | UTF-8 | 1,151 | 2.96875 | 3 | [] | no_license | import math
def main():
n = int(input())
A = list(map(int, input().split()))
max_node = [0 for i in range(n+1)]
min_node = [0 for i in range(n+1)]
res = 0
for i in range(n, -1, -1):
if i == n:
max_node[i] = A[i]
min_node[i] = A[i]
elif i == 0:
max_node[i] = 1
min_node[i] = 1
elif i == 1 and n != 1:
max_node[i] = 2
min_node[i] = math.ceil(min_node[i+1] /2) + A[i]
else:
max_node[i] = max_node[i+1] + A[i]
min_node[i] = math.ceil(min_node[i+1] / 2) + A[i]
for i in range(n):
if i == 0:
if n != 0 and A[i] != 0:
res = -1
break
else:
if max_node[i] > 2 * (max_node[i-1] - A[i-1]):
max_node[i] = 2 * (max_node[i-1] - A[i-1])
if max_node[i] < min_node[i]:
res = -1
break
if res == -1:
print(res)
else:
if n == 0 and A[i] != 1:
print(-1)
else:
print(sum(max_node))
if __name__ == '__main__':
main() | true |
75e0000eadb0be59c70624a7f18d39381d95c87a | Python | yunyusha/xunxibiji | /month1/te_week3/test/DBtest.py | UTF-8 | 1,445 | 2.671875 | 3 | [] | no_license | import pymysql
class DB(object):
def __init__(self,**kw):
self.__conn = pymysql.connect(
host = kw['host'],
port = kw['port'],
user = kw['user'],
password = kw['password'],
database = kw['database'],
charset = 'utf8'
)
if kw['dict'] is True:
# 生成游标对象,获取的数据是字典类型
self.__cursor = self.__conn.cursor(pymysql.cursors.DictCursor)
else:
# 生成游标对象,获取的数据按元组类型返回
self.__cursor = self.__conn.cursor()
# 为DB添加数据插入操作
def insert(self,sql,data):
# 完成多条数据插入操作
result = self.__cursor.executemany(sql,data)
# 将执行之后的结果提交给数据库最终完成数据库数据的修改
self.__conn.commit()
return result
def select(self,sql,data=()):
# 执行查询操作
self.__cursor.execute(sql,args=data)
# 返回查询之后的结果
return self.__cursor.fatchall()
def delete(self,sql,data):
result = self.__cursor.execute(sql,args=data)
self.__conn.commit()
return result
def update(self,sql,data):
result = self.__cursor.execute(sql, args=data)
self.__conn.commit()
def commom(self,sql):
result = self.__cursor.execute(sql)
return result | true |
3e0d46b69a65678c3233a7231ed3fa898c9b7ed5 | Python | simrit1/TenBagger | /tenbagger/src/portfolio/core.py | UTF-8 | 2,714 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | from tenbagger.src.utils.utilities import read_yaml, Ticker, make_percentage
from tenbagger.src.portfolio.crypto import PortfolioCrypto
from currency_converter import CurrencyConverter
import yfinance as yf
import datetime
import pandas as pd
from tqdm import tqdm
class Portfolio(PortfolioCrypto):
def __init__(self, name_port):
super().__init__()
self.name_port = name_port
self.portfolio = self._select()
self.env = read_yaml('user_data/env/environment.yaml')
def _select(self):
if isinstance(self.name_port, dict):
portfolio = self.name_port
else:
portfolio = read_yaml('user_data/portfolio/portfolio.yaml')[self.name_port]
return portfolio
def get_portfolio(self):
res = []
day = datetime.date.today()
for ticker in tqdm(self.portfolio):
t = yf.Ticker(ticker)
df = pd.DataFrame()
df['date'] = [day]
df['ticker'] = [ticker]
info = t.info
df['price'] = Ticker(ticker).last_price()
df['amount'] = [self.portfolio[ticker]]
df["currency"] = info["currency"]
df['circulatingSupply'] = info['circulatingSupply']
df['type'] = info['quoteType']
if info["dividendYield"]:
df["yield"] = info["dividendYield"]
else:
df["yield"] = None
try:
df['sector'] = info['sector']
except:
df['sector'] = 'Crypto'
res.append(df)
self.df = pd.concat(res)
def unification(self):
self.get_portfolio()
df = self.df
# Convert to desired currency
c = CurrencyConverter()
df['price'] = df.apply(lambda x: c.convert(x.price, x.currency, self.env["CURRENCY"]), axis=1)
df['value'] = df.price * df.amount
# Get staking rewards
df = self.staking_rewards(self.df)
# Caclulate percentage
df['percentage'] = df.value / df.value.sum()
# Formatting
df['percentage'] = df.percentage.apply(lambda x: "{:.2%}".format(x))
df = df.sort_values('value', ascending=False)
df["dividends"] = df["yield"] * df.price * df.amount
df[['dividends', 'passive_income']] = df[['dividends', 'staking_rewards']].fillna(0)
# Passive income
df['passive_income'] = df[['dividends', 'passive_income']].max(axis=1)
self.df = df
if __name__ == "__main__":
pd.set_option("expand_frame_repr", False)
d = Portfolio('test_calculator')
d.unification()
print(d.df)
make_percentage(df=d.df, value='value', groupby='sector') | true |
ffc8b093943711304f7d212a78e33395524d3ecc | Python | alando93/Xlsx-Python | /newCombatants.py | UTF-8 | 2,796 | 2.890625 | 3 | [] | no_license |
# coding: utf-8
# In[99]:
import openpyxl
import pandas as pd
#filename = input('Enter workbook filename: ')
filename = 'Marvel v2 ProgressionCombatant.xlsx'
wb = openpyxl.load_workbook(filename)
characterlist = 'characterlist.csv'
df = pd.read_csv(characterlist, sep=',',header=0)
# In[100]:
dfmale = df[df['Rig'] == 'Male']
dffemale = df[df['Rig'] == 'Female']
dfsorted = dfmale.append(dffemale)
dfsorted = dfsorted.sort_values('character_id')
print(dfsorted.info())
print(dfsorted.head(5))
# In[101]:
dfsorted = dfsorted.dropna()
dfsorted.info()
# In[102]:
sheet_count = len(wb.get_sheet_names())
print('First ten sheets: ' + str(wb.get_sheet_names()[:10]))
print('Number of sheets: ' + str(sheet_count))
# In[103]:
#target = wb.copy_worksheet(antManCbt)
#newsheetname = wb.get_sheet_names()[-1]
#wb.get_sheet_by_name(newsheetname).title = 'newSheet'
#newsheet = wb.get_sheet_by_name('newSheet')
antManCbt = wb.get_sheet_by_name('antManCbt')
agent13Cbt = wb.get_sheet_by_name('agent13Cbt')
for i in range(0,len(dfsorted)):
if dfsorted.values[i][1] == 'Female':
target = wb.copy_worksheet(agent13Cbt)
newsheetname = wb.get_sheet_names()[-1]
wb.get_sheet_by_name(newsheetname).title = str(dfsorted.values[i][0]) + 'Cbt'
else:
target = wb.copy_worksheet(antManCbt)
newsheetname = wb.get_sheet_names()[-1]
wb.get_sheet_by_name(newsheetname).title = str(dfsorted.values[i][0]) + 'Cbt'
print('New sheets copied: ' + str(wb.get_sheet_names()[-len(dfsorted):]))
# In[104]:
#print('New sheets copied: ' + str(wb.get_sheet_names()[-len(characters):]))
# In[105]:
def replace_cells(character):
for i in range(1,275):
for j in range(1,20):
if type(currentsheet.cell(row = i, column = j).value) == str:
#print(currentsheet)
currentsheet.cell(row = i, column = j).value = currentsheet.cell(row = i, column = j).value.replace(character, str(dfsorted.values[k][0]))
#print('copy: ',str(character))
print('new char copied:' ,str(dfsorted.values[k][0]),'...')
k = 0
for i in range(0, len(dfsorted)):
currentsheet = wb.get_sheet_by_name(str(wb.get_sheet_names()[-len(dfsorted) + i]))
if dfsorted.values[i][1] == 'Female':
replace_cells('agent13')
else:
replace_cells('antMan')
k += 1
#print(k)
# In[106]:
#crossbonesCbt = wb.get_sheet_by_name('crossbonesCbt')
#crossbonesCbt['C7'].value
# In[107]:
#claireTempleCbt = wb.get_sheet_by_name('claireTempleCbt1')
#claireTempleCbt['E7'].value
# In[110]:
newfilename = filename.replace('.xlsx','_updated.xlsx').replace(' ','_')
print('new file name: ',newfilename)
# In[109]:
wb.save(newfilename)
# In[ ]:
| true |