blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bbb7f192887284384b59ca8cf45766e0e9110783 | Python | MrSkl1f/DataBase | /lab6/lab6.py | UTF-8 | 5,206 | 2.953125 | 3 | [] | no_license | import psycopg2
conn = psycopg2.connect(
dbname='basketball',
user='postgres',
password='1337',
host='localhost'
)
cursor = conn.cursor()
# 1. Выполнить скалярный запрос
# select player_name, team, player_country from players where players.id = 100;
def f1():
request = 'select player_name, team, player_country from players where players.id = 100'
cursor.execute(request)
result = cursor.fetchall()[0]
print('Name: ' + result[0] + ', team id: ' + str(result[1]) + ', country: ' + result[2])
# 2.Выполнить запрос с несколькими соединениями (JOIN)
# select players.player_name, teams.team_name, management.general_director from players join teams on players.team = teams.id join management on teams.management = management.id where players.id = 100;
def f2():
request = 'select players.player_name, teams.team_name, management.general_director\
from players join teams on players.team = teams.id\
join management on teams.management = management.id\
where players.id = 100'
cursor.execute(request)
result = cursor.fetchall()[0]
print('Name: ' + result[0] + ', team: ' + result[1] + ', general director: ' + result[2])
# 3. Выполнить запрос с ОТВ(CTE) и оконными функциями
def f3():
request = '''
select cur.player_country as "country", avg(cur.player_height) over (partition by player_country) as "height"
from (
select player_country, player_height
from players
where player_country = 'Canada'
) as cur
union
select cur.player_country as "country", avg(cur.player_height) over (partition by player_country) as "height"
from (
select player_country, player_height
from players
where player_country = 'United States'
) as cur;
'''
cursor.execute(request)
for row in cursor:
print('Country: ' + row[0] + ', average height: ' + str(row[1]))
# 4. Выполнить запрос к метаданным;
def f4():
request = '''
select pp.proname
from pg_catalog.pg_proc pp
where pp."oid" = (
select pt.tgfoid
from pg_catalog.pg_trigger pt
where pt.tgname = 'draft'
);
'''
cursor.execute(request)
print(cursor.fetchall()[0][0])
# 5. Вызвать скалярную функцию
# select * from skalar_func();
def f5():
request = 'select * from skalar_func()'
cursor.execute(request)
result = cursor.fetchall()[0][0]
print('Result: ', result)
# 6. Вызвать многооператорную или табличную функцию
# select * from get_players(219);
def f6():
request = 'select * from get_players(219)'
cursor.execute(request)
for row in cursor:
print('Id: ' + str(row[0]) + ', team: ' + str(row[1]) + ', name: ', row[2] + ', position: ' + str(row[3])\
+ ', height: ' + str(row[4]) + ', weight: ' + str(row[5]) + ', number: ' + str(row[6]) + ', age: '\
+ str(row[7]) + ', country: ' + row[8])
# 7. Вызвать хранимую процедуру
#Вызвать хранимую процедуру (написанную в третьей лабораторной работе)
def f7():
request = "call get_trigger_and_func(cast('draft' as name));"
cursor.execute(request)
print(conn.notices[-1])
# 8. Вызвать системную функцию или процедуру
def f8():
request = 'select version()'
cursor.execute(request)
print(cursor.fetchall()[0][0])
# 9. Создатьтаблицувбазеданных, соответствующую тематике БД
def f9():
request = '''
drop table if exists drafts;
create table if not exists drafts (
id serial not null,
player_id int,
last_team_id int,
new_team_id int
);
'''
cursor.execute(request)
conn.commit()
def f10(id=101, last_id=2, new_id=3):
request = f'''
insert into drafts(player_id, last_team_id, new_team_id)
values({id}, {last_id}, {new_id})
'''
cursor.execute(request)
conn.commit()
menu = '''0 - Выход
1. Скалярный запрос
2. Запрос с несколькими соединениями
3. Запрос с ОТВ(CTE) и оконными функциями
4. Запрос к метаданным
5. Скалярная функция
6. Многооператорная функция
7. Хранимая процедура
8. Системная функция
9. Создать таблицу
10. Вставить в таблицу
'''
print(menu)
choice = 1
while choice != 0:
choice = int(input('Выбор:'))
if choice == 1:
f1()
elif choice == 2:
f2()
elif choice == 3:
f3()
elif choice == 4:
f4()
elif choice == 5:
f5()
elif choice == 6:
f6()
elif choice == 7:
f7()
elif choice == 8:
f8()
elif choice == 9:
f9()
elif choice == 10:
f10()
cursor.close()
conn.close() | true |
45f32000bdd8be67d548fb487cf05545afac09bb | Python | alimg/insight | /agent/src/HwController.py | UTF-8 | 3,051 | 2.71875 | 3 | [] | no_license | from threading import Thread, Timer
import Queue
from sensors.IRController import IRController
from sensors.LedController import LedController
from sensors.PIRSensor import PIRSensor
from sensors import Camera
from sensors import SpiAdcController
from sensors.ButtonController import ButtonController
class HwController(Thread):
def __init__(self, camera_event_handler, video_event_handler, audio_event_handler, setup_button_handler):
super(HwController, self).__init__()
self.camera_event_handler = camera_event_handler
self.video_event_handler = video_event_handler
self.audio_event_handler = audio_event_handler
self.setup_button_handler = setup_button_handler
self.camera = Camera.get_camera()
self.adc_controller = SpiAdcController.get_adc_controller()
self.command_queue = Queue.Queue()
self._STOP = object()
self.running = True
self.pir_sensor = PIRSensor(lambda: self.on_pir_trigger())
self.led_controller = LedController()
self.IR_controller = IRController()
self.button_controller = ButtonController(lambda: self.setup_button_handler())
self.start_ldr_timer()
self.pir_enabled = True
def run(self):
while self.running:
command = self.command_queue.get()
if command == self._STOP:
break
if command['action'] == "cap_photo":
image = self.camera.take_picture()
self.camera_event_handler(image)
elif command['action'] == "cap_audio":
self.adc_controller.capture_audio(lambda captured_file: self.on_audio_captured(captured_file))
elif command['action'] == "cap_video":
self.camera.capture_video(lambda captured_file: self.on_video_captured(captured_file))
elif command['action'] == "cap_temperature":
temp = self.adc_controller.read_temperature_sensor()
print temp
def process_command(self, command):
print "processCommand ", command
self.command_queue.put(command)
def stop(self):
self.running = False
self.command_queue.put(self._STOP)
def on_pir_trigger(self):
if not self.pir_enabled:
print "pir disabled by user conf"
return
image = self.camera.take_picture()
self.camera_event_handler(image)
def on_audio_captured(self, captured_file):
self.audio_event_handler(captured_file)
def set_led_status(self, status):
self.led_controller.set_status(status)
def on_video_captured(self, captured_file):
self.video_event_handler(captured_file)
def start_ldr_timer(self):
ldr_val = self.adc_controller.read_ldr_sensor()
print "LDR: ", ldr_val
if ldr_val > 2500:
self.IR_controller.enable()
else:
self.IR_controller.disable()
self.ldr_timer = Timer(5.0, lambda: self.start_ldr_timer())
self.ldr_timer.start()
| true |
6f4edfa8113fab1ae5f84caf6e901829000781e6 | Python | RoveAllOverTheWorld512/stock_pandas | /misc/badnews.py | UTF-8 | 3,114 | 3.265625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on 2019-10-27 11:21:35
author: huangyunbin
email: huangyunbin@sina.com
QQ: 592440193
bad news 利空走势分析
分析方法:利空点前三个月的高点,后三个月的低点,低点后一个月反弹的高点
"""
import pandas as pd
from numpy import NaN as npNaN
from math import isnan
import dateutil.parser
def idxmax(ds, i, l):
'''
返回pandas.Series指定位置、指定区间内的最大值及索引
注意:ds索引号为顺序号,l>0
'''
if isnan(i):
return None
if (l <= 0) | ((i - l + 1) < 0) | (i > len(ds)):
return None
s = ds.iloc[(i - l + 1): i + 1]
return s.loc[[s.idxmax()]]
def idxmin(ds, i, l):
'''
返回pandas.Series指定位置、指定区间内的最小值及索引
注意:ds索引号为顺序号,l>0
'''
if isnan(i):
return None
if (l <= 0) | ((i - l + 1) < 0) | (i > len(ds)):
return None
# print(f'i={i}')
s = ds.iloc[(i - l + 1): i + 1]
return s.loc[[s.idxmin()]]
def badnews(df, date, j=60, k=60, l=30, m=120):
'''
利空走势分析:利空发生日前j个交易日内(含消息日)高点,
利空发生日后k个交易日内(不含消息日)低点
低点后l个交易日内(不含低点日)高点
'''
df = df.sort_index() # 按索引date排序
dt = dateutil.parser.parse(date)
df = df.reset_index() # 将索引date变成一列
i = df.loc[(df['date'] >= dt)].index[0] - 1 # 对应索引序号
# 由于有可能日期不在序列中,所以要用“>=”
p = df.loc[i, 'close'] # 对应收盘价
dt = df['date'].iloc[i].strftime('%Y%m%d')
ds = df['close']
if i < j -1: # 前面交易天数不够
j = i + 1
p0 = idxmax(ds, i, j) # 获取前高点信息
p0i = p0.index[0] # 前高点索引序号
p0v = p0.loc[p0i] # 前高点价格
p0d = df['date'].iloc[p0i].strftime('%Y%m%d') # 前高点日期
p0ds = i - p0i # 距前高点交易日天数
p0zf = p / p0v - 1 # 前期跌幅
if i + k > len(ds): # 后面交易天数不够
k = len(ds) - 1 - i
p1 = idxmin(ds, i + k, k) # 获取后低点信息
p1i = p1.index[0]
p1v = p1.loc[p1i]
p1d = df['date'].iloc[p1i].strftime('%Y%m%d')
p1ds = p1i - i
p1zf = p1v / p - 1
if p1i + l > len(ds): # 后面天数不够
l = len(ds) - 1 - p1i
p2 = idxmax(ds, p1i + l, l)
if p2 is not None:
p2i = p2.index[0]
p2v = p2.loc[p2i]
p2d = df['date'].iloc[p2i].strftime('%Y%m%d')
p2ds = p2i - i
p2zf = p2v / p1v - 1
else:
p2i = None
p2v = None
p2d = None
p2ds = None
p2zf = None
if i + m > len(ds): # 后面交易天数不够
m = len(ds) - 1 - i
p3 = idxmin(ds, i + m, m) # 获取后低点信息
p3i = p3.index[0]
p3v = p3.loc[p3i]
p3d = df['date'].iloc[p3i].strftime('%Y%m%d')
p3ds = p3i - i
p3zf = p3v / p - 1
return [p0d, p0ds, p0v, p0zf, dt, p, p1d, p1ds, p1v, p1zf, p2d, p2ds, p2v, p2zf, p3d, p3ds, p3v, p3zf]
| true |
513b277b563e288082818cdde51008c136e9904c | Python | ChristianSchneeweiss/kata-python | /dice-rolling/solution-1.py | UTF-8 | 288 | 2.65625 | 3 | [] | no_license | import numpy as np
from time_benchmark import benchmark
def get_dice_roll():
return np.random.randint(1, 7)
@benchmark
def main():
print(get_dice_roll())
print(get_dice_roll())
print(get_dice_roll())
print(get_dice_roll())
if __name__ == '__main__':
main()
| true |
000bef19f261b2c605548a2b6e106464a83c7ba8 | Python | turbo-moniak/battle_ships | /client.py | UTF-8 | 332 | 2.75 | 3 | [] | no_license | import socket
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect(("127.0.0.1", 12345))
clients_input = input("Some input from the client: ")
soc.send(clients_input.encode("utf-8"))
result_bytes = soc.recv(4096)
result_string = result_bytes.decode("utf-8")
print("result from server is {}".format(result_string))
| true |
a50cb9220d2962b81b3641ad3e81e57127136f76 | Python | udhayprakash/PythonMaterial | /python3/12_Logging/c_loguru/e_log_formatting.py | UTF-8 | 697 | 2.921875 | 3 | [] | no_license | import sys
from loguru import logger
logger.add(sys.stderr, format="{time} {level} {message}")
logger.info("This message will be logged with a custom format")
print()
# adding color to log
logger.add(sys.stderr, colorize=True)
logger.info("<green>This message</green> will be logged in green")
print()
# time formatting
logger.add(sys.stderr, format="{time:YYYY-MM-DD HH:mm:ss} {level} {message}")
logger.info("This message will include a timestamp")
print()
# different timezone
import pytz
tz = pytz.timezone("Europe/Paris")
logger.add(sys.stderr, format="{time:YYYY-MM-DD HH:mm:ss} {message}", serialize=False)
logger.bind(time=tz).info("This message will include the Paris timezone")
| true |
4ff416830a5a6271509ca87ecf1b2ee6ef37fd0e | Python | doer001/Programs66 | /3数组中重复的数字.py | UTF-8 | 2,774 | 4 | 4 | [] | no_license | # -*- coding:utf-8 -*-
''' ********************************************************************
题目描述
在一个长度为n的数组里的所有数字都在0到n-1的范围内。
数组中某些数字是重复的,但不知道有几个数字是重复的。
也不知道每个数字重复几次。请找出数组中任意一个重复的数字。
例如,如果输入长度为7的数组{2,3,1,0,2,5,3},
那么对应的输出是第一个重复的数字2。
# -*- coding:utf-8 -*-
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
# write code here
******************************************************************** '''
# -*- coding:utf-8 -*-
class Solution:
def duplicate(self, numbers, duplication):
hash = dict()
for num in numbers:
if(num in hash):
duplication[0]=num
return True
else:
hash[num]=1
return False
''' ********************************************************************
解题思路1
从头到尾按顺序扫描数组,每扫描一个数,判断是否在哈希表中,如果是,就找到了这个重
复的数字,否则,就把这个数加入到哈希表中
时间复杂度=O(n),空间复杂度=O(n)
******************************************************************** '''
# -*- coding:utf-8 -*-
class Solution:
def duplicate(self, numbers, duplication):
n = len(numbers)
for i in range(n):
for j in range(i+1,n):
if(numbers[i]==numbers[j]):
duplication[0]=numbers[i]
return True
return False
''' ********************************************************************
解题思路2
从第一个数开始与后面的数进行比较,如果后面有相同的数,则找到这个重复的数字。
时间复杂度=O(n^2),空间复杂度=O(1)
******************************************************************** '''
# -*- coding:utf-8 -*-
class Solution:
def duplicate(self, numbers, duplication):
numbers.sort()
n = len(numbers)
for i in range(n-1):
if(numbers[i]==numbers[i+1]):
duplication[0]=numbers[i]
return True
return False
''' ********************************************************************
解题思路3
先给数组排序,然后从头到尾检查连续的两个数是否相同,如果相同则找到重复数字。
时间复杂度=,空间复杂度=
******************************************************************** '''
| true |
a1670285dece96993e0a61010da323fb0a0f5033 | Python | valletidinesh/HackerRank | /ExtraLongFactorial.py | UTF-8 | 763 | 3.203125 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the extraLongFactorials function below.
def extraLongFactorials(n):
res = [None]*500
res[0] = 1
res_size = 1
x = 2
while x <= n:
res_size = multiply(x,res,res_size)
x += 1
i = res_size - 1
while i >= 0:
print(res[i],end="")
i = i-1
def multiply(x,res,res_size):
carry = 0
i = 0
while i<res_size:
prod = res[i]*x + carry
res[i] = prod % 10
carry = prod//10
i += 1
while carry:
res[res_size] = carry % 10
carry = carry//10
res_size += 1
return res_size
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
| true |
a9436202b4d1839010eedb0a16e8a7e18ebac4fe | Python | Phyks/Blissify | /mpd/client.py | UTF-8 | 12,005 | 2.96875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env python3
"""
This is a client for MPD to generate a random playlist starting from the last
song of the current playlist and iterating using values computed using Bliss.
MPD connection settings are taken from environment variables, following MPD_HOST
and MPD_PORT scheme described in `mpc` man.
You can pass an integer argument to the script to change the length of the
generated playlist (default is to add 20 songs).
"""
import argparse
import logging
import math
import os
import random
import sqlite3
import socket
import sys
import enum
import mpd
import random
class PersistentMPDClient(mpd.MPDClient):
"""
From
https://github.com/schamp/PersistentMPDClient/blob/master/PersistentMPDClient.py
"""
def __init__(self, socket=None, host=None, port=None):
super().__init__()
self.socket = socket
self.host = host
self.port = port
self.do_connect()
# get list of available commands from client
self.command_list = self.commands()
# commands not to intercept
self.command_blacklist = ['ping']
# wrap all valid MPDClient functions
# in a ping-connection-retry wrapper
for cmd in self.command_list:
if cmd not in self.command_blacklist:
if hasattr(super(PersistentMPDClient, self), cmd):
super_fun = super(PersistentMPDClient, self).__getattribute__(cmd)
new_fun = self.try_cmd(super_fun)
setattr(self, cmd, new_fun)
# create a wrapper for a function (such as an MPDClient
# member function) that will verify a connection (and
# reconnect if necessary) before executing that function.
# functions wrapped in this way should always succeed
# (if the server is up)
# we ping first because we don't want to retry the same
# function if there's a failure, we want to use the noop
# to check connectivity
def try_cmd(self, cmd_fun):
def fun(*pargs, **kwargs):
try:
self.ping()
except (mpd.ConnectionError, OSError):
self.do_connect()
return cmd_fun(*pargs, **kwargs)
return fun
# needs a name that does not collide with parent connect() function
def do_connect(self):
try:
try:
self.disconnect()
# if it's a TCP connection, we'll get a socket error
# if we try to disconnect when the connection is lost
except mpd.ConnectionError:
pass
# if it's a socket connection, we'll get a BrokenPipeError
# if we try to disconnect when the connection is lost
# but we have to retry the disconnect, because we'll get
# an "Already connected" error if we don't.
# the second one should succeed.
except BrokenPipeError:
try:
self.disconnect()
except:
print("Second disconnect failed, yikes.")
if self.socket:
self.connect(self.socket, None)
else:
self.connect(self.host, self.port)
except socket.error:
print("Connection refused.")
logging.basicConfig(level=logging.INFO)
_QUEUE_LENGTH = 20
if "XDG_DATA_HOME" in os.environ:
_BLISSIFY_DATA_HOME = os.path.expandvars("$XDG_DATA_HOME/blissify")
else:
_BLISSIFY_DATA_HOME = os.path.expanduser("~/.local/share/blissify")
def distance(x, y):
"""
Compute the distance between two songs.
Params:
- x: First song dict
- y: Second song dict
Returns: The cartesian distance between the two songs.
"""
return math.sqrt(
(x["tempo"] - y["tempo"])**2 +
(x["amplitude"] - y["amplitude"])**2 +
(x["frequency"] - y["frequency"])**2 +
(x["attack"] - y["attack"])**2
)
def mean_song(X):
"""
Compute a "mean" song for a given iterable of song dicts.
Params:
- X: An iterable of song dicts.
Returns: A "mean" song, whose features are the mean features of the songs
in the iterable.
"""
result = {'tempo': 0, 'amplitude': 0, 'frequency': 0, 'attack': 0}
count = len(X)
for song in X:
result["tempo"] += song["tempo"]
result["amplitude"] += song["amplitude"]
result["frequency"] += song["frequency"]
result["attack"] += song["attack"]
result["tempo"] /= count
result["amplitude"] /= count
result["frequency"] /= count
result["attack"] /= count
return result
def distance_sets(X, Y):
"""
Compute the distance between two iterables of song dicts, defined as the
distance between the two mean songs of the iterables.
Params:
- X: First iterable of song dicts.
- Y: First iterable of song dicts.
Returns: The distance between the two iterables.
"""
return distance(mean_song(X), mean_song(Y))
def _init():
# Get MPD connection settings
try:
mpd_host = os.environ["MPD_HOST"]
try:
mpd_password, mpd_host = mpd_host.split("@")
except ValueError:
mpd_password = None
except KeyError:
mpd_host = "localhost"
mpd_password = None
try:
mpd_port = os.environ["MPD_PORT"]
except KeyError:
mpd_port = 6600
# Connect to MPD
client = PersistentMPDClient(host=mpd_host, port=mpd_port)
if mpd_password is not None:
client.password(mpd_password)
# Connect to db
db_path = os.path.join(_BLISSIFY_DATA_HOME, "db.sqlite3")
logging.debug("Using DB path: %s." % (db_path,))
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
conn.execute('pragma foreign_keys=ON')
cur = conn.cursor()
# Ensure random is not enabled
status = client.status()
if int(status["random"]) != 0:
logging.warning("Random mode is enabled. Are you sure you want it?")
# Take the last song from current playlist and iterate from it
playlist = client.playlist()
if len(playlist) > 0:
current_song = playlist[-1].replace("file: ", "").rstrip()
# If current playlist is empty
else:
# Add a random song to start with TODO add a random album
all_songs = [x["file"] for x in client.listall() if "file" in x]
current_song = random.choice(all_songs)
client.add(current_song)
logging.info("Currently played song is %s." % (current_song,))
# Get current song coordinates
cur.execute("SELECT id, tempo, amplitude, frequency, attack, filename, album FROM songs WHERE filename=?", (current_song,))
current_song_coords = cur.fetchone()
if current_song_coords is None:
logging.error("Current song %s is not in db. You should update the db." %
(current_song,))
client.close()
client.disconnect()
sys.exit(1)
return client, conn, cur, current_song_coords
def main_album(queue_length, option_best=True):
client, conn, cur, current_song_coords = _init()
# Get 'queue_length' random albums
for i in range(queue_length):
# No cache management
# Get all songs from the current album
distance_array = []
# Get album name and all of this album's songs coordinates
album_name = current_song_coords["album"]
cur.execute("SELECT id, tempo, amplitude, frequency, attack, filename, album FROM songs WHERE album=?", (album_name,))
target_album_set = cur.fetchall()
# Get all albums
cur.execute("SELECT DISTINCT album FROM songs")
albums = cur.fetchall();
# Compute the distance between current album and all other albums
for tmp_album in albums:
# Get all songs in the album
cur.execute("SELECT id, tempo, amplitude, frequency, attack, filename, album FROM songs WHERE album=?", (tmp_album["album"],))
tmp_songs = cur.fetchall()
# Don't compute distance for the current album and albums already in the playlist
if(tmp_album["album"] == target_album_set[0]["album"] or
("file: %s" % (tmp_songs[0]["filename"],)) in client.playlist()):
# Skip current song and already processed songs
logging.debug("Skipping %s." % (tmp_album["album"]))
continue
tmp_distance = distance_sets(tmp_songs, target_album_set)
distance_array.append({'Distance': tmp_distance, 'Album': tmp_songs})
logging.debug("Distance between %s and %s is %f." %
(target_album_set[0]["album"],
tmp_album["album"], tmp_distance))
# Ascending sort by distance (the lower the closer)
distance_array.sort(key=lambda x: x["Distance"])
# Chose between best album and one of the top 10 at random
indice = 0 if option_best else random.randrange(10)
logging.info("Closest album found is \"%s\". Distance is %f." %
(distance_array[indice]["Album"][0]["album"], distance_array[indice]["Distance"]))
for song in distance_array[indice]["Album"]:
client.add(song["filename"])
conn.close()
client.close()
client.disconnect()
def main_single(queue_length, option_best=True):
client, conn, cur, current_song_coords = _init()
# Get 'queue_length' random songs
for i in range(queue_length):
distance_array = []
# Get all other songs coordinates and iterate on them
cur.execute("SELECT id, tempo, amplitude, frequency, attack, filename FROM songs")
for tmp_song_data in cur.fetchall():
# Skip current song and already processed songs
if(tmp_song_data["filename"] == current_song_coords["filename"] or
("file: %s" % (tmp_song_data["filename"],)) in client.playlist()):
logging.debug("Skipping %s." % (tmp_song_data["filename"]))
continue
# Compute distance between current song and songs in the loop
tmp_distance = distance(tmp_song_data, current_song_coords)
distance_array.append({'Distance': tmp_distance, 'Song': tmp_song_data})
logging.debug("Distance between %s and %s is %f." %
(current_song_coords["filename"],
tmp_song_data["filename"], tmp_distance))
# Ascending sort by distance (the lower the closer)
distance_array.sort(key=lambda x: x['Distance'])
# Chose between best album and one of the top 10 at random
indice = 0 if option_best else random.randrange(10)
current_song_coords = distance_array[indice]['Song']
client.add(current_song_coords["filename"])
logging.info("Found a close song: %s. Distance is %f." %
(current_song_coords["filename"], distance_array[0]['Distance']))
conn.close()
client.close()
client.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--queue-length", help="The number of items to add to the MPD playlist.", type=int)
parser.add_argument("--best-playlist", help="Makes the best possible playlist, always the same for a fixed song/album",
action='store_true', default=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--song-based", help="Make a playlist based on single songs.",
action="store_true", default=False)
group.add_argument("--album-based", help="Make a playlist based on whole albums.",
action="store_true", default=False)
args = parser.parse_args()
if args.queue_length:
queue_length = args.queue_length
else:
queue_length = _QUEUE_LENGTH
if args.song_based:
main_single(queue_length, args.best_playlist)
elif args.album_based:
main_album(queue_length, args.best_playlist)
| true |
a66b9a3916ccb1a772d40fa22cbdb481ff511c66 | Python | ChrisVergis/testML | /testscanMLE_forStephany.py | UTF-8 | 2,026 | 2.671875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import poisson
np.random.seed(1234)
N=100000
lambda_true=20.1
binning = range(0,61)
lam_min = 19.0
lam_max = 21.0
lam_step = 0.01
n_trades = np.random.poisson(lambda_true,N)
plt.hist(n_trades,bins=binning, density=True)
plt.show()
actual_data, bins = np.histogram(n_trades, bins=binning)
actual_data=actual_data/N
best = []
bestL =-10
lam = lam_min
CHI2 = []
LAMS = []
while lam<=lam_max:
pois_i = np.zeros(len(binning)-1)
for k in range(len(binning)-1):
pois_i[k] = poisson.pmf(k,lam)
if len(best)==0:
best = np.array(pois_i,'d')
else:
COMPARE = np.sum((pois_i-actual_data)**2) < np.sum((best-actual_data)**2)
LAMS.append(lam)
CHI2.append(np.sum((pois_i-actual_data)**2))
best = np.array(pois_i,'d') if COMPARE else best
bestL= lam if COMPARE else bestL
lam += lam_step
plt.plot(LAMS,CHI2)
plt.show()
plt.plot(best)
plt.hist(n_trades,bins=bins,density=True)
plt.show()
print("True lambda =",lambda_true)
print("Average = ",n_trades.mean())
print("Best Lambda (Scan):",bestL)
best_Lambdas =[]
for itoy in range(100):
print(itoy,"/",10)
n_trades = np.random.poisson(lambda_true,N)
actual_data, bins = np.histogram(n_trades, bins=binning)
actual_data=actual_data/N
best =[]
bestL =-10
lam = lam_min
while lam<=lam_max:
pois_i = np.zeros(len(binning)-1)
for k in range(len(binning)-1):
pois_i[k] = poisson.pmf(k,lam)
if len(best)==0:
best = np.array(pois_i,'d')
else:
COMPARE = np.sum((pois_i-actual_data)**2) < np.sum((best-actual_data)**2)
best = np.array(pois_i,'d') if COMPARE else best
bestL= lam if COMPARE else bestL
lam += lam_step
best_Lambdas.append(bestL)
best_Lambdas=np.array(best_Lambdas)
print(best_Lambdas)
print(best_Lambdas.mean())
plt.hist(best_Lambdas,bins=np.linspace(20,20.25,25))
| true |
65ab9a96b453d9c5ded7cd90b08ec3f174a2ac7b | Python | guiyom-e/discord_escape_bot | /game_models/listener_collection.py | UTF-8 | 4,141 | 2.75 | 3 | [] | no_license | import functools
from typing import Type, List, Optional, Union
from discord import Guild
from game_models import AbstractListener
from helpers import TranslationDict
from logger import logger
from models import GuildWrapper, CustomEnum
from models.abstract_models import DiscordObjectDict, SpecifiedDict, SpecifiedDictCollection
def warn_if_no_guild(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self._guild is None:
logger.error(f"Guild is not set for method {method} ({wrapper.__name__}) for {self}!")
return method(self, *args, **kwargs)
return wrapper
class ListenerDescription(SpecifiedDict):
_listener_enum = None # Enum of all possible listener classes
_updatable_keys = ["_game_type", "_order", "_init_kwargs"]
def _update_description(self, kwargs):
self._game_type = kwargs.pop("game_type", self._game_type)
self._order = kwargs.pop("order", self._order)
messages = kwargs.pop("messages", None)
if isinstance(messages, TranslationDict):
pass
elif isinstance(messages, dict): # messages dictionary
messages = TranslationDict.from_dict(messages)
elif isinstance(messages, str): # path. the default version is used
messages = TranslationDict(path=messages)
elif isinstance(messages, (list, tuple)): # list of versions. The path must be included in versions.
messages = TranslationDict(versions=messages)
self._init_kwargs.update(kwargs)
if messages:
self._init_kwargs['messages'] = messages
def __init__(self, game_type, name="", description="", auto_start=False, show_in_listener_manager=True,
messages=None, key=None, order=None, **kwargs):
self._game_type = ""
self._order = -1
self._init_kwargs = {}
kwargs.update({"game_type": game_type, "order": order, "name": name, "description": description,
"auto_start": auto_start, "show_in_listener_manager": show_in_listener_manager,
"messages": messages})
self._update_description(kwargs)
super().__init__(key=key)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} ref={getattr(self.object_reference, 'name', None)}>"
@classmethod
def set_listener_enum(cls, listener_enum): # todo: better way ?
cls._listener_enum = listener_enum
# Properties
@property
def object_reference(self) -> Optional[AbstractListener]:
return super().object_reference
@object_reference.setter
def object_reference(self, object_reference: AbstractListener):
self._object_reference = object_reference
@property
def order(self) -> int:
return self._order or self._order_auto
# Factories
@classmethod
def from_dict(cls, dico):
return cls(**dico)
def generate_object(self, guild: Union[GuildWrapper, Guild]):
"""Returns a new object defined by the description of this class instance"""
if not self._listener_enum:
logger.error(f"No listener enum defined for {self}!")
return None
return self._listener_enum[self._game_type].value(**self._init_kwargs).set(guild)
def get_instance(self, guild):
"""Get a new object defined by the description of this class instance"""
self.object_reference = self.generate_object(guild) # todo: handle multiple guilds
return self.object_reference
class ListenerCollection(SpecifiedDictCollection):
_base_class: Type[DiscordObjectDict] = ListenerDescription
def __init__(self, versions: Optional[Union[str, List[str]]] = None, path: str = ""):
super().__init__(versions=versions, path=path)
self._guild = None
@staticmethod
def set_listener_enum(listener_enum: CustomEnum):
ListenerDescription.set_listener_enum(listener_enum)
def get_guild_instances(self, guild: GuildWrapper):
# TODO
return [listener_description.get_instance(guild) for listener_description in self.to_list()]
| true |
758d63cb7071050619a314f0adfbc759fe9bf681 | Python | dakaza98/colorful-platform | /colorfulplatform/single_menu.py | UTF-8 | 2,534 | 3.90625 | 4 | [] | no_license | import curses
from curses.textpad import Textbox
class SingleMenu():
def __init__(self):
self.screen = curses.initscr()
self.player1_name = ""
self.player2_name = ""
self.enter_keys = [curses.KEY_ENTER, 10, 13]
def get_player_names(self):
curses.wrapper(self.run_single_menu)
return self.player1_name.rstrip("\n").rstrip(" ") ,self.player2_name.rstrip("\n").rstrip(" ")
def validate_key_input(self,key_input):
"""
Callback function used by curses when a user types input.
The function checks if the pressed key is one of the enter keys and
signals curses to stop asking for input. Otherwise it lets the character through
"""
if key_input in self.enter_keys:
# 7 is a magic number that tells curses to stop asking for input
return 7
else:
return key_input
def get_user_input(self, text):
"""
Prints and centers text on screen.
Creates a new text input where the user enters the player name and returns it.
Keyword arguments:
self.screen -- the curses self.screen.
text -- Text that appears before the input.
Example:
text = "Insert player 1's name" => Insert player 1's name: (user types here)
Returns:
The name of the player that the user entered.
"""
# Centers the text
num_rows, num_cols = self.screen.getmaxyx()
x = int(num_cols / 2) - int(len(text) / 2)
y = int(num_rows / 2)
self.screen.addstr(y, x, text)
self.screen.refresh()
# We must create a new window becuase the edit function will return
# everything that has been printed on the self.screen and not just the entered name
win = curses.newwin(5, 10, y, x + len(text))
textbox = Textbox(win)
user_input = textbox.edit(self.validate_key_input)
return user_input
def ask_for_player_names(self):
player1_text = "Insert player 1's name: "
player2_text = "Insert player 2's name: "
# Enable blinking cursor when typing in names
curses.curs_set(1)
self.player1_name = self.get_user_input(player1_text)
self.player2_name = self.get_user_input(player2_text)
curses.curs_set(0)
self.screen.refresh()
def run_single_menu(self,screen):
self.screen.clear()
self.ask_for_player_names()
| true |
aeda64dbf8adb7d84b60c8d2cd1afc852cc518e0 | Python | nilp0inter/ryter | /scripts/getspells.py | UTF-8 | 2,864 | 3.046875 | 3 | [] | no_license | """
Gets the list of spells of Harry Potter from Wikipedia and writes
`behave` feature files.
"""
import os
import re
import sys
import warnings
from jinja2 import Template
import wikipedia
TITLE_RE = re.compile(
r"^(?P<incantation>.*?)\s?(?:\((?P<vernacular>.*)\))?\s===$")
KV_RE = re.compile(r"^(\S+):\s(.*)$")
FEATURE_TEMPLATE = Template("""Feature: {% if incantation %}{{ incantation }}{% endif %}{% if incantation and vernacular %} {% endif %}{% if vernacular %}({{ vernacular }}){% endif %}
{% if description %}{{ description|wordwrap(75, False)|indent(4) }}{% endif %}
Pronunciation:{% if pronunciation %}
{{ pronunciation|wordwrap(71, False)|indent(8) }}{% else %} - {% endif %}
Seen/mentioned:{% if seen_mentioned %}
{{ seen_mentioned|wordwrap(71, False)|indent(8) }}{% else %} - {% endif %}
Suggested etymology:{% if suggested_etymology %}
{{ suggested_etymology|wordwrap(71, False)|indent(8) }}{% else %} - {% endif %}
Notes:{% if notes %}
{{ notes|wordwrap(71, False)|indent(8) }}{% else %} - {% endif %}
""")
def parse_spell(raw_spell):
spell = {}
lines = raw_spell.split('\n')
title = TITLE_RE.search(lines[0])
if title:
spell.update(dict((k, v) for k, v in title.groupdict().items() if v))
key = value = None
for line in lines:
if line.startswith('='): # Title, skip.
continue
else:
data = KV_RE.search(line)
if data:
key, value = data.groups()
key = normalize_str(key).lower()
elif value: # Continuation line
value += line
else:
continue
spell[key] = value
return spell
def render_feature(spell):
return FEATURE_TEMPLATE.render(**spell)
def normalize_str(s):
return re.sub('\W', '_', s)
if __name__ == '__main__':
if len(sys.argv) < 2 or not os.path.isdir(sys.argv[1]):
print("argument must be the output directory")
sys.exit(2)
else:
os.chdir(sys.argv[1])
reference = wikipedia.page("List_of_spells_in_Harry_Potter")
raw_spells = reference.content.split("\n=== ")[1:]
for raw_spell in raw_spells:
spell = parse_spell(raw_spell)
if spell:
if 'incantation' in spell:
filename = normalize_str(spell['incantation'])
elif 'vernacular' in spell:
filename = normalize_str(spell['vernacular'])
else:
warnings.warn("Unknown spell name %r" % spell)
continue
filename += '.feature'
if os.path.exists(filename):
continue
else:
with open(filename, 'w') as f:
f.write(render_feature(spell))
| true |
8090f088a6c8e9b79c78d9cb792da17309ca9edd | Python | gnils/vp6500 | /probe_sensor.py | UTF-8 | 422 | 3 | 3 | [] | no_license |
size = (320, 240)
#~ size = (640, 480)
#~ size = (6, 4)
for x in range(1, size[0] + 1):
for y in range(x, size[0] + 1):
for bytes_per_pixel in range(1, 4):
count = x * y * bytes_per_pixel
#~ print "count = %d * %d * %d = %d" % (x, y, bytes_per_pixel,
#~ x * y * bytes_per_pixel)
print "do it %d" % count
| true |
3d95daadebd947b4d3a75b352600d8329150009d | Python | moon4311/sp | /1_Variable/_set.py | UTF-8 | 2,644 | 3.984375 | 4 | [] | no_license | # sets 집합 자료형
# java의 Set
# 순서가 없다
# 중복된 값이 없다.
# 중복값 제거를 위해 다른 자료형을 Set 변경 후 다시 원래 자료형으로 재변경
a_set = {1}
print('a_set : ', a_set, type(a_set))
a_list = ['a', 'b', 'mpilgrim', True, False, 42]
a_set = set(a_list)
print(a_set)
a_set = set() # -> class 'set'
not_sure = {} # -> class 'dict'
# #### list = [] / tuple = () / set = {}
print("\n *** 항목 추가 (중복 X ) *** ")
a_set = {1, 2}
a_set.add(4)
print("\n 1개 추가\n a_set.add(4) : ", a_set)
a_set.add(1)
print("a_set.add(1) : ", a_set, "* sets are bags of unique values")
a_set.update({2, 4, 6})
print("\n 여러개 추가\na_set.update({a, b}) : ", a_set, "* duplicate values are ignored")
a_set.update({3, 6, 9}, {1, 2, 3, 5, 8, 13})
print("a_set.update({a, b}, {c, d}) : ", a_set, "* set possible update() method with any number of arguments")
a_set.update([10, 20, 30])
print("a_set.update([a, b, c]) : ", a_set, "* list 객체를 더할수도 있다.")
print("\n *** 항목 삭제 *** ")
a_set.discard(10)
print("a_set.discard(10) : ", a_set , " * 항목 없는 경우 넘어감 ")
a_set.remove(13)
print("a_set.remove(13) : ", a_set, " * 항목이 없는 경우 에러 ")
print("a_set.pop() : ", a_set.pop(), " * the pop() method removes a single value from a set and returns the value.")
print("a_set.pop() : ", a_set.pop(), " * there is no “last” value in a set")
print("a_set.pop() : ", a_set.pop(), " * 앞에서부터 하나씩 추출")
a_set.clear()
print("a_set.clear() : ", a_set)
print("\n *** 항목 읽기 / 비교 *** ")
print("9 in a_set : ", 9 in a_set , " * 포함 여부")
b_set = {1, 2, 3, 5, 6, 8, 9, 12, 15, 17, 18, 21}
print("\n a∪b\n a_set | b-set : ", a_set | b_set )
print(" a_set.union(b_set) : ", a_set.union(b_set), " * a_set.union(b_set) == b_set.union(a_set)" )
print("\n a∩b\n a_set & b-set : ", a_set & b_set )
print(" a_set.intersection(b_set) : ", a_set.intersection(b_set), " * a_set.intersection(b_set) == b_set.intersection(a_set)")
print("\n a-b\n a_set - b-set : ", a_set - b_set )
print(" a_set.difference(b_set) : ", a_set.difference(b_set), " * a_set.difference(b_set) != b_set.difference(a_set)")
print("\n (a∩b)\na_set.symmetric_difference(b_set) ", a_set.symmetric_difference(b_set), " * b_set.symmetric_difference(a_set) == a_set.symmetric_difference(b_set)")
a_set={1, 2, 3}
b_set={1, 2, 3, 4}
print("# a⊆b : a_set.issubset(b_set) ", a_set.issubset(b_set), "* Is Sub Set" )
print("# b⊇a : b_set.issuperset(a_set) ", b_set.issuperset(a_set), " * Is Super Set")
| true |
730fa70a67343b585b376973985aa2128e553cc0 | Python | Brebeuf-Code/brebeuf-code.github.io | /Turtles/spiral.py | UTF-8 | 315 | 3.796875 | 4 | [] | no_license | import turtle
t = turtle.Turtle()
t.pensize(0.1)
for c in range(80):
for i in range(3):
t.speed(10)
if c%30<10:
t.color('red')
elif c%30<20:
t.color('purple')
else:
t.color('blue')
t.forward((80-c))
t.left(122) | true |
90feed58d3bdf9dd4310d3857303ffea3ddfa325 | Python | iasip/qazar | /cloudtestenvironment/test/models/customer_tests.py | UTF-8 | 426 | 2.6875 | 3 | [] | no_license | import unittest
from cloudtestenvironment import db
from cloudtestenvironment.models import Customer
class CustomerTests(unittest.TestCase):
def testCustomerCreation(self):
name = "My Name"
email = "me@example.com"
customer = Customer(name, email)
self.assertEqual(customer.name, name)
self.assertEqual(customer.email, email)
db.session.add(customer)
db.session.commit()
if __name__ == '__main__':
unittest.main() | true |
fafe6b8461176470ffedb28098b11528bf6bc75c | Python | joao-vitor-vlr/flask_proj | /projeto1/init_db.py | UTF-8 | 401 | 2.671875 | 3 | [] | no_license | import sqlite3
connection = sqlite3.connect('database.db')
cur = connection.cursor()
cur.execute("INSERT INTO tabelas (nome, content) VALUES (?, ?)",
('tird Post', 'Content for the first post')
)
cur.execute("INSERT INTO tabelas (nome, content) VALUES (?, ?)",
('fourth Post', 'Content for the second post')
)
connection.commit()
connection.close() | true |
71bb438638527476c09f14d988a3c94a07d8a45d | Python | chenghuiren/galaxy | /util.py | UTF-8 | 1,053 | 2.703125 | 3 | [] | no_license | #! /usr/bin/env python
import subprocess
def getCPUUsage(hostname, interval, count):
cmd = 'mpstat -P ALL {} {}'.format(interval, count)
print(cmd)
p = subprocess.Popen(['ssh', hostname, cmd], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = p.communicate()
if err is not None and err != '':
print(err)
return None
lines = out.split('\n')
averageCount = 0
usage = []
for line in lines:
columns = line.split()
if len(columns) == 0:
continue
if columns[0] == 'Average:':
averageCount += 1
if averageCount > 1:
usage.append(float(columns[2]))
return usage
def getWho(hostname):
cmd = 'who'
p = subprocess.Popen(['ssh', '-t', '-o ConnectTimeout=5', hostname, cmd], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = p.communicate()
if out == "":
return None
lines = out.split('\n')
users = []
for line in lines:
columns = line.split()
if len(columns) == 0:
continue
users.append(columns[0])
return users
| true |
9826e5de78fdbb6cd595bd7d8c72ae92541c8e82 | Python | kamyu104/python-cheatsheet | /web/convert_table.py | UTF-8 | 1,360 | 3.125 | 3 | [] | no_license | #!/usr/bin/env python3
def convert_table(lines):
def from_ascii():
out = []
first, header, third, *body, last = lines
first = first.translate(str.maketrans({'-': '━', '+': '┯'}))
out.append(f'┏{first[1:-1]}┓')
header = header.translate(str.maketrans({'|': '│'}))
out.append(f'┃{header[1:-1]}┃')
third = third.translate(str.maketrans({'-': '─', '+': '┼'}))
out.append(f'┠{third[1:-1]}┨')
for line in body:
line = line.translate(str.maketrans({'|': '│'}))
line = line.replace('yes', ' ✓ ')
out.append(f'┃{line[1:-1]}┃')
last = last.translate(str.maketrans({'-': '━', '+': '┷'}))
out.append(f'┗{last[1:-1]}┛')
return '\n'.join(out)
def from_unicode():
out = []
for line in lines:
line = line.translate(str.maketrans('┏┓┗┛┠┼┨┯┷━─┃│', '+++++++++--||'))
line = line.replace(' ✓ ', 'yes')
out.append(line)
return '\n'.join(out)
if lines[0][0] == '+':
return from_ascii()
return from_unicode()
if __name__ == '__main__':
input_lines = []
try:
while True:
input_lines.append(input())
except EOFError:
pass
print(convert_table(input_lines))
| true |
f3c9a2d979927729bac4ab0751651258f0b64cf4 | Python | ohsean93/algo | /08월/08_07/random25.py | UTF-8 | 698 | 3.21875 | 3 | [] | no_license | from random import shuffle
a = list(range(1,26))
shuffle(a)
matrix = [[]]*7
for line_num in range(1,6):
line = [a[0]] + a[:5] +[a[4]]
matrix[line_num] = line.copy()
a = a[5:]
matrix[0] = matrix[1].copy()
matrix[6] = matrix[5].copy()
print(matrix)
vactor = [(1,0),(0,1),(-1,0),(0,-1)]
all_sum = 0
for x in range(1,6):
for y in range(1,6):
sum_num = 0
num1 = matrix[x][y]
for mod in range(4):
x_dal, y_dal = vactor[mod]
num2 = matrix[x+x_dal][y+y_dal]
num = num1 - num2
if num < 0:
sum_num += -num
else:
sum_num += num
all_sum += sum_num
print(all_sum)
| true |
9f229329a3812ec9e65a6bf44a6764a50e8793b4 | Python | RealDense/PythonFun | /Hello_World.py | UTF-8 | 2,295 | 3.90625 | 4 | [] | no_license | import random
def addition(score):
usr = 1
print('\n\nCan you add?')
while usr != 0:
fir = random.randint(1,101)
sec = random.randint(1,101)
summ = fir + sec
print '\n ', fir, '\n','+ ', sec, '\n','------'
usr = input('> ')
if(usr == 0):
continue
elif(usr == summ):
print('\nWell done comrade!!')
score[1] += 1
else:
print('\nCome on. Its simple addition.')
score[0] += 1
return score
def subtraction(score):
usr = 1
print('\n\nCan you subtraction?')
while usr != 0:
fir = random.randint(1,101)
sec = random.randint(1,101)
summ = fir + sec
print '\n ', summ, '\n','- ', sec, '\n','------'
usr = input('> ')
if(usr == 0):
continue
elif(usr == fir):
print('\nWell done comrade!!')
score[1] += 1
else:
print('\nCome on. Its simple subtraction.')
score[0] += 1
return score
def mult(score):
usr = 1
nums = [1,2,3,4,5,6,7,8,9,10,11,12]
print('\n\nCan you multiply?')
while usr != 0:
fir = random.choice(nums)
sec = random.choice(nums)
prod = fir * sec
print '\n ', fir, '\n','x ', sec, '\n','------'
usr = input('> ')
if(usr == 0):
continue
elif(usr == prod):
print('\nWell done comrade!!')
score[1] += 1
else:
print('\nCome on. Its simple multiplication.')
score[0] += 1
return score
def divide(score):
usr = 1
nums = [1,2,3,4,5,6,7,8,9,10,11,12]
print('\n\nCan you divide?')
while usr != 0:
fir = random.choice(nums)
sec = random.choice(nums)
prod = fir * sec
print '\n ', prod, '\n','/ ', sec, '\n','------'
usr = input('> ')
if(usr == 0):
continue
elif(usr == fir):
print('\nWell done comrade!!')
score[1] += 1
else:
print('\nCome on. Its simple division.')
score[0] += 1
return score
score = [0,0]
while True:
print '\n Please choose something to practice:'
print ' 1: Addition'
print ' 2: Subtraction'
print ' 3: Multiply'
print ' 4: Division\n'
choice = input(' > ')
if (choice == 1):
score = addition(score)
elif (choice == 2):
score = subtraction(score)
elif (choice == 3):
score = mult(score)
elif (choice == 4):
score = divide(score)
else:
break
print '\n\nYou got ', score[0], ' right out of ', score[1]
print('Thanks for playing!!\n---------------------\n\n')
| true |
9ee7158da70f0f22b613749dce86ab4ba783111f | Python | dummy3k/eternalhelper | /gui/world_map.py | UTF-8 | 8,172 | 2.6875 | 3 | [] | no_license | import logging
import logging.config
import wx
import Image, ImageDraw, ImageFont
import os
from lxml import etree
from pprint import pprint
from location import Location
from map_service import MapService
from distance_service import DistanceService
if __name__ == '__main__':
logging.config.fileConfig("logging.conf")
log = logging.getLogger(__name__)
el_to_bmp = 50. / 192
el_to_bmp = 94. / 384
el_to_bmp = 294. / (384 * 3)
MAP_OFFSET = [318, 234]
IP_OFFSET = [0,0]
def el_to_dc(el_loc):
doc = etree.ElementTree(file='map.xml')
map_xml = doc.xpath('//map[@name="%s"]' % el_loc.map_name)[0]
map_loc = map_xml.get('loc').split(',')
map_loc = (int(map_loc[0]), int(map_loc[1]))
map_size = map_xml.get('size').split(',')
map_size = (int(map_size[0]), int(map_size[1]))
#~ dc_loc = el_to_dc((map_loc[0] + el_loc.loc[0],
#~ map_loc[1] + map_size[1] - el_loc.loc[1]))
#~
#~ return ((loc[0] + MAP_OFFSET[0]) * el_to_bmp,
#~ (loc[1] + MAP_OFFSET[1]) * el_to_bmp)
loc = el_loc.loc
return ((map_loc[0] + loc[0] + MAP_OFFSET[0]) * el_to_bmp,
(map_loc[1] + map_size[1] - loc[1] + MAP_OFFSET[1]) * el_to_bmp)
def DrawMarker(dc, x, y, size):
dc.DrawCircle(x, y, 4)
dc.DrawLine(x - size,
y - size,
x + size,
y + size)
dc.DrawLine(x + size,
y - size,
x - size,
y + size)
def DrawElLocation(dc, el_loc):
dc_loc = el_to_dc(el_loc)
#~ log.debug(dc_loc)
DrawMarker(dc, dc_loc[0], dc_loc[1], 3)
class MapSprite():
def __init__(self, loc, size, map_name):
self.__loc__ = loc
self.__size__ = size
self.map_name = map_name
def __loc_size__(self):
loc = self.__loc__
loc = (loc[0] + MAP_OFFSET[0], loc[1] + MAP_OFFSET[1])
return (loc, self.__size__)
def Draw(self, dc):
loc, size = self.__loc_size__()
dc.SetBrush(wx.Brush('red', wx.TRANSPARENT))
if self.map_name == 'Isla Prima':
dc.DrawRectangle((loc[0] + IP_OFFSET[0]) * el_to_bmp,
(loc[1] + IP_OFFSET[1]) * el_to_bmp,
size[0] * el_to_bmp + 1,
size[1] * el_to_bmp + 1)
else:
dc.DrawRectangle(loc[0] * el_to_bmp,
loc[1] * el_to_bmp,
size[0] * el_to_bmp + 1,
size[1] * el_to_bmp + 1)
def HitTest(self, x, y):
loc, size = self.__loc_size__()
if x < loc[0] * el_to_bmp or\
x > (loc[0] + size[0]) * el_to_bmp or\
y < loc[1] * el_to_bmp or\
y > (loc[1] + size[1]) * el_to_bmp:
return False
el_loc = (int(x / el_to_bmp), int(y / el_to_bmp))
el_loc = (el_loc[0] - MAP_OFFSET[0] - self.__loc__[0],
self.__size__[1] - (el_loc[1] - MAP_OFFSET[1] - self.__loc__[1]))
return Location(self.map_name, el_loc)
class WorldMapWindow(wx.Window):
def __init__(self, parent):
wx.Window.__init__(self, parent, id=wx.ID_ANY,
style=wx.WANTS_CHARS
#| wx.RAISED_BORDER
#| wx.SUNKEN_BORDER
, name="sink")
bg_image_path = os.path.expanduser('~/bin/el_linux/maps/seridia.bmp')
self.image = wx.Image(bg_image_path, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouse)
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
doc = etree.ElementTree(file='map.xml')
self.sprites = []
self.current_map = None
for item in doc.xpath('//map'):
if not item.get('loc'):
continue
if not item.get('size'):
continue
loc = item.get('loc').split(',')
loc = (int(loc[0]), int(loc[1]))
size = item.get('size').split(',')
size = (int(size[0]), int(size[1]))
self.sprites.append(MapSprite(loc, size, item.get('name')))
def OnSize(self, event):
log.debug("OnSize!")
self.Width, self.Height = self.GetClientSizeTuple()
self._Buffer = wx.EmptyBitmap(self.Width, self.Height)
self.Draw()
def OnPaint(self, event):
log.debug("OnPaint()")
self.Draw()
def OnKeyDown(self, event):
log.debug("OnKeyDown()")
#~ log.debug(event.GetKeyCode())
if event.GetKeyCode() == wx.WXK_RIGHT:
IP_OFFSET[0] += 1
elif event.GetKeyCode() == wx.WXK_LEFT:
IP_OFFSET[0] -= 1
elif event.GetKeyCode() == wx.WXK_UP:
IP_OFFSET[1] -= 1
elif event.GetKeyCode() == wx.WXK_DOWN:
IP_OFFSET[1] += 1
self.Draw()
log.debug("MAP_OFFSET: %s" % IP_OFFSET)
def OnMouse(self, event):
if event.LeftDown():
for item in self.sprites:
if item.HitTest(event.GetX(), event.GetY()):
self.current_map = item
wx.GetApp().GetLocalMapWindow(item.map_name).Raise()
self.Draw()
if not event.LeftDown() and not event.RightDown():
return
log.debug("OnMouse(%s, %s)" % (event.GetX(), event.GetY()))
self.SetFocus()
for item in self.sprites:
loc = item.HitTest(event.GetX(), event.GetY())
if loc:
#~ log.debug(loc)
if event.RightDown():
wx.GetApp().SetNavFrom(loc)
#~ else:
#~ self.nav_to = loc
self.Draw()
def Draw(self):
log.debug("Draw()")
dc = wx.BufferedPaintDC(self, self._Buffer)
dc.Clear()
if not self.image:
return
png_dc = wx.MemoryDC()
png_dc.SelectObject(self.image)
dc.Blit(0, 0, self.image.GetWidth(), self.image.GetHeight(),
png_dc, 0, 0)
dc.SetPen(wx.Pen('black'))
for item in self.sprites:
item.Draw(dc)
if self.current_map:
dc.SetPen(wx.Pen('yellow'))
self.current_map.Draw(dc)
ms = MapService()
ds = DistanceService(ms)
if wx.GetApp().GetNavTo():
dc.SetPen(wx.Pen('red'))
DrawElLocation(dc, wx.GetApp().GetNavTo())
dc.SetPen(wx.Pen('yellow'))
for item in ms.doors(wx.GetApp().GetNavTo().map_name):
DrawElLocation(dc, item)
dc.SetPen(wx.Pen('green'))
DrawElLocation(dc, ds.nearest_door(wx.GetApp().GetNavTo()))
if wx.GetApp().GetNavFrom():
dc.SetPen(wx.Pen('blue'))
DrawElLocation(dc, wx.GetApp().GetNavFrom())
dc.SetPen(wx.Pen('yellow'))
for item in ms.doors(wx.GetApp().GetNavFrom().map_name):
DrawElLocation(dc, item)
dc.SetPen(wx.Pen('green'))
DrawElLocation(dc, ds.nearest_door(wx.GetApp().GetNavFrom()))
if wx.GetApp().GetNavTo() and wx.GetApp().GetNavFrom():
route = wx.GetApp().GetRoute()
#~ for item in route:
#~ log.debug(item)
dc.SetPen(wx.Pen('blue'))
last_pos = el_to_dc(route[0].payload)
for item in route[1:]:
pos = el_to_dc(item.payload)
dc.DrawLine(last_pos[0], last_pos[1],
pos[0], pos[1])
last_pos = pos
class WordMapFrame(wx.Frame):
def __init__(self, parent, id=wx.ID_ANY, title="WordMapFrame", pos=wx.DefaultPosition,
size=(512,512), style=wx.DEFAULT_FRAME_STYLE):
wx.Frame.__init__(self, parent, id, title, pos, size, style)
self.wnd = WorldMapWindow(self)
self.wnd.SetSize(self.GetSize())
def main():
from eh_app import EhApp
app = EhApp()
win = WordMapFrame(None, pos=(0,0))
win.Show()
log.info("entering main loop")
app.MainLoop()
| true |
45bc106d5b896323c2348444ff2b331a13c0d73e | Python | DanielVorobiov/TMPS_labs | /codewars.py | UTF-8 | 3,788 | 2.828125 | 3 | [] | no_license |
from abc import ABC, abstractmethod
root = 'Desktop'
files = [
'meetings/2021-01-12/notes.txt',
'meetings/2020_calendar.xlsx',
'meetings/2021-01-12/report.pdf',
'misc/photos/forest_20130430.jpg',
'misc/photos/sunset_20130412.jpg',
'scripts/tree.py',
'meetings/2021-01-24/report.pdf',
]
class Component:
def __init__(self, name):
self.name = name
def display(self, level=0):
pass
class Folder(Component):
def __init__(self, name):
self.components = []
super().__init__(name)
def display(self, level):
spaces = "| "
print(spaces * (level-1) + '├──' + self.name)
for component in self.components:
component.display(level + 1)
def addComponent(self, component):
if component not in self.components:
self.components.append(component)
class File(Component):
def __init__(self, name):
super().__init__(name)
def display(self, level):
spaces = " "
print("|" + " |" * (level-2) + spaces * (level-1) + '├──' + self.name)
paths = []
for file in files:
elements = file.split("/")
paths.append(elements)
pathsDictionary = {}
pathsNamesDirectory = {}
unique = []
for path in paths:
for element in path:
if element not in unique:
unique.append(element)
temp = unique
def createComponent(path):
if "." in path:
return(File(path))
else:
return(Folder(path))
for i, unique in zip(range(1, len(unique)+1), unique):
pathsDictionary[str(i)] = createComponent(unique)
pathsNamesDirectory[str(i)] = pathsDictionary[str(i)].name
for path in paths:
if len(path) > 2:
for key in pathsNamesDirectory:
if path[-3] == pathsNamesDirectory[key]:
for key1 in pathsNamesDirectory:
if path[-2] == pathsNamesDirectory[key1]:
pathsDictionary[key].addComponent(
pathsDictionary[key1])
for key2 in pathsNamesDirectory:
if path[-1] == pathsNamesDirectory[key2]:
pathsDictionary[key1].addComponent(
pathsDictionary[key2])
else:
for key in pathsNamesDirectory:
if path[-2] == pathsNamesDirectory[key]:
for key1 in pathsNamesDirectory:
if path[-1] == pathsNamesDirectory[key1]:
pathsDictionary[key].addComponent(
pathsDictionary[key1])
print(root)
pathsDictionary["1"].display(1)
pathsDictionary["6"].display(1)
pathsDictionary["10"].display(1)
# pathsDictionary["1"][-3].display(1)
# pathsDictionary["3"][-3].display(1)
# folder1_1 = Folder("meetings")
# folder1_2 = Folder("misc")
# folder1_3 = Folder("scripts")
# folder2_1 = Folder("2021-01-12")
# folder2_2 = Folder("2021-01-14")
# file2_1 = File("2020_calendar.xlsx")
# file2_5 = File("notes.txt")
# file2_6 = File("report.pdf")
# folder2_1.addComponent(file2_5)
# folder2_1.addComponent(file2_6)
# folder2_2.addComponent(file2_6)
# folder2_3 = Folder("photos")
# folder1_1.addComponent(folder2_1)
# folder1_1.addComponent(folder2_2)
# folder1_1.addComponent(file2_1)
# folder1_2.addComponent(folder2_3)
# file2_2 = File("sunset_20130412.jpg")
# file2_3 = File("forest_20130430.jpg")
# folder2_3.addComponent(file2_2)
# folder2_3.addComponent(file2_3)
# file2_4 = File("tree.py")
# folder1_3.addComponent(file2_4)
# print(root)
# folder1_1.display(1)
# folder1_2.display(1)
# folder1_3.display(1)
| true |
f425596dd688828c3c8266c6573b94893e4b03eb | Python | prashnts/agloe | /example.py | UTF-8 | 487 | 2.65625 | 3 | [
"MIT"
] | permissive | import skimage.io
import skimage.draw
from hello.argleton.model import MapShots
# Load webpage
agloe = MapShots()
img = agloe.map
def draw_agent(x, y, img):
steps = [(100, [100, 20, 255, 50]),
(50, [100, 20, 255, 150]),
(20, [50, 20, 150, 255])]
for step in steps:
rr, cc = skimage.draw.circle(x, y, step[0])
img[rr, cc] = step[1]
return img
draw_agent(100, 100, img)
draw_agent(100, 120, img)
draw_agent(200, 300, img)
skimage.io.imshow(img)
skimage.io.show()
| true |
ab1263a00c6d324a63919735269cb5cf9a599650 | Python | Stepan91/foodgram-project | /recipes/models.py | UTF-8 | 2,707 | 2.515625 | 3 | [] | no_license | from django.db import models
from django.contrib.auth import get_user_model
from multiselectfield import MultiSelectField
from django.core.validators import MinValueValidator
from .tags_instanse import TAGS_DICT
User = get_user_model()
TAG_CHOICES = [(name, name) for name in TAGS_DICT]
class Ingredient(models.Model):
name = models.CharField(
max_length=200,
verbose_name='Ингредиент',
unique=True,
error_messages={'unique': 'Ингредиент с таким именем уже существует.'}
)
unit = models.CharField(max_length=50, verbose_name='Ед. изм.')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Ингредиент'
verbose_name_plural = 'Ингредиенты'
class Recipe(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='recipes', verbose_name='Автор')
title = models.CharField(max_length=200, verbose_name='Название')
pub_date = models.DateTimeField('Дата публикации', auto_now_add=True)
image = models.ImageField(verbose_name='Изображение')
description = models.TextField(verbose_name='Описание')
ingredient = models.ManyToManyField(
Ingredient,
through='IngredientRecipe',
verbose_name='Ингредиенты')
tag = MultiSelectField(
verbose_name='Теги',
choices=TAG_CHOICES,
max_choices=3,
default='Обед'
)
cooking_time = models.IntegerField(validators=[MinValueValidator(1)])
slug = models.SlugField(max_length=100, unique=True, blank=True, null=True)
def __str__(self):
return self.title
class Meta:
ordering = ('-pub_date',)
verbose_name = 'Рецепт'
verbose_name_plural = 'Рецепты'
class IngredientRecipe(models.Model):
ingredient = models.ForeignKey(
Ingredient,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Ингредиент')
recipe = models.ForeignKey(
Recipe,
on_delete=models.CASCADE,
related_name='ingredientrecipe',
verbose_name='Рецепт')
value = models.IntegerField(
validators=[MinValueValidator(1)],
verbose_name='Количество'
)
def __str__(self):
return (f'Ингредиент "{self.ingredient.name}"'
f'в рецепте "{self.recipe.title}"')
class Meta:
verbose_name = 'Ингредиент-Рецепт'
verbose_name_plural = 'Ингредиенты-Рецепты'
| true |
02811dd0f17ef838ef55f650516c4b3120b7598c | Python | pragyanshu0110/Digit-detection-via-motion-of-head | /digit_prediction.py | UTF-8 | 1,617 | 2.578125 | 3 | [] | no_license | from sklearn.externals import joblib
from skimage.feature import hog
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Load the classifier
clsf = joblib.load("pragya.pkl")
# import image and resize it
img = cv2.imread("test1.jpg")
# Convert to grayscale and apply Gaussian filtering
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gray = cv2.GaussianBlur(img_gray, (5, 5), 0)
# Threshold the image
ret, img_th = cv2.threshold(img_gray, 90, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
_,ctrs, hier = cv2.findContours(img_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #/////////////////////////////////////////////
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs] # it returns [x,y,w,h]
# For each rectangular region, calculate HOG features and predict
# the digit using Linear SVM.
#print(rects)
rect=rects[0] # here only one contour is present
#print(rect)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = img_th[pt1:pt1+leng, pt2:pt2+leng]
# Resize the image
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
# Calculate the HOG features
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
#print('roihogfd',np.array([roi_hog_fd]))
nbr = clsf.predict(np.array([roi_hog_fd], 'float64'))
#cv2.putText(im, str(int(nbr[0])), (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)
print(int(nbr[0]))
| true |
e98e0dcb900092fc57282a99bb3288b6098bf02a | Python | harsh6646/Checkio | /Roman Numerals.py | UTF-8 | 1,002 | 3.21875 | 3 | [] | no_license | def checkio(data):
ind = {1:["I","V"],2:["X","L"],3:["C","D"],4:["M"]}
length = len(str(data))
num = int(str(data)[0])
number = ""
if length > 1:
next = int(str(data)[1:])
if num > 0 and num < 4:
number = ind[length][0]*num
elif num == 4:
number = ind[length][0] + ind[length][1]
elif num > 4 and num < 9:
number = ind[length][1] + ind[length][0]*(num-5)
elif num == 9:
number = ind[length][0] + ind[length+1][0]
if length != 1:
print(number+checkio(next))
return number + checkio(next)
else:
return number
if __name__ == '__main__':
#These "asserts" using only for self-checking and not necessary for auto-testing
assert checkio(9) == "IX", "9"
assert checkio(99) == 'XCIX',"99"
assert checkio(6) == 'VI', '6'
assert checkio(76) == 'LXXVI', '76'
assert checkio(499) == 'CDXCIX', '499'
assert checkio(3888) == 'MMMDCCCLXXXVIII', '3888' | true |
24d5ddfd705fde1f8a464200e71837453262d8dd | Python | ctn-archive/nengo_theano | /nengo_theano/lif_rate.py | UTF-8 | 1,938 | 2.71875 | 3 | [
"MIT"
] | permissive | from _collections import OrderedDict
import theano
from theano import tensor as TT
import neuron
class LIFRateNeuron(neuron.Neuron):
def __init__(self, size, tau_rc=0.02, tau_ref=0.002):
"""Constructor for a set of LIF rate neuron
:param int size: number of neurons in set
:param float t_rc: the RC time constant
:param float tau_ref: refractory period length (s)
"""
neuron.Neuron.__init__(self, size)
self.tau_rc = tau_rc
self.tau_ref = tau_ref
def make_alpha_bias(self, max_rates, intercepts):
"""Compute the alpha and bias needed to get the given max_rate
and intercept values.
Returns gain (alpha) and offset (j_bias) values of neurons.
:param float array max_rates: maximum firing rates of neurons
:param float array intercepts: x-intercepts of neurons
"""
x = 1.0 / (1 - TT.exp(
(self.tau_ref - (1.0 / max_rates)) / self.tau_rc))
alpha = (1 - z2) / (intercepts - 1.0)
j_bias = 1 - alpha * intercepts
return alpha, j_bias
def update(self, J, dt):
"""Theano update rule that implementing LIF rate neuron type.
Returns dictionary with firing rate for current time step.
:param float array J:
the input current for the current time step
"""
# set up denominator of LIF firing rate equation
rate = self.tau_ref - self.tau_rc * TT.log(
1 - 1.0 / TT.maximum(J, 0))
# if input current is enough to make neuron spike,
# calculate firing rate, else return 0
rate = TT.switch(J > 1, 1 / rate, 0)
# return dictionary of internal variables to update
return OrderedDict({
self.output: TT.unbroadcast(rate.astype('float32'), 0)
})
neuron.types['lif-rate'] = LIFRateNeuron
| true |
adb3c4d0dc1e69668d1646f495a3bfd058448765 | Python | LuciaBaldassini/Grasping_Detection_System | /code/similarities.py | UTF-8 | 3,001 | 3.5625 | 4 | [
"MIT"
] | permissive | from scipy.spatial.distance import jaccard
import numpy as np
import pandas as pd
# Computing Jaccard Distance of two 5D-Rectangles
# Issues to deal with:
# Normalizing values?
# Input format correct?
# Weighting of the different dimensions?
def jaccard_distance(datFr, name, pred):
"""
Should return the "closest" jaccard distance of the rectangles in the label dat
and the prediction distance.
Input:
datFr: 5 Dim. DataFrame including all labels, assuming that column 0
includes the names of the respective files the rectangles belong to.
name: Name as string of the correct file.
pred: Prediction rectangle
Return:
Closest Distance (should be a float)
"""
# Indexing the correct rectangles based on the name, retrieving all
# columns, minus the "name"-one
corr_rect = datFr.loc[datFr[0].str.match(name), 1:]
# Computing all Jaccard Distances
jacc_distances = corr_rect.apply(jaccard, axis=1, args=[pred])
# Returning closest distance
return jacc_distances.min()
"""
Returns closest Ruzicka Distance, related to Jaccard Distance, of rectangles
in the label dat and the prediction distance.
Input:
datFr: 5 Dim. DataFrame including all labels, assuming that column 0
includes the names of the respective files the rectangles belong to.
name: Name as string of the correct file.
pred: Prediction rectangle
Return:
Closest Distance (should be a float)
"""
def ruzicka_distance(datFr, name, pred):
"""
Chooses max and min per point, ultimately returning 1 minus the sum of the
vector of minimal values by the sum of the vector of maximal values.
(Ruzicka Similarity and Soergel Distance). So, if they are the same it
returns 0, else it returns a higher value.
"""
def ruz_similarity(x, y):
min_vec = np.minimum(x, y)
max_vec = np.maximum(x, y)
# Return Soergel Distance
return 1 - min_vec.sum() / max_vec.sum()
# Indexing the correct rectangles based on the name, retrieving all
# columns, minus the "name"-one
corr_rect = datFr.loc[datFr[0].str.match(name), 1:]
# Getting Ruzicka for all correct Rectangles
ruz_distances = corr_rect.apply(ruz_similarity, axis=1, args=[pred])
return ruz_distances.min()
"""
Function to incorporate both the positive and negative rectangles. Computes
both the Ruzicka distance to the closest positive and negative rectangle and
returns the positive plus the inverted negative Soergel Distance divided by two.
Input:
pos_df: 5 Dim. DataFrame including all labels for pos. rectangles
(see ruzicka_distance)
neg_df: 5 DIm. DataFrame, but for negative rectangles
name: Name as string of correct image
pred: Prediction Rectangle
"""
def ruz_posneg(pos_df, neg_df, name, pred):
ruz_pos = ruzicka_distance(pos_df, name, pred)
ruz_neg = 1 - ruzicka_distance(neg_df, name, pred)
return (ruz_pos + ruz_neg) / 2
| true |
07717ec6a4306ff66609c640d3ec38632ea0c87f | Python | DL2021Spring/CourseProject | /data_files/Number of Airplanes in the Sky.py | UTF-8 | 924 | 3.390625 | 3 | [] | no_license |
__author__ = 'Daniel'
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
class Solution:
@staticmethod
def cmp(a, b):
if a.start != b.start:
return a.start-b.start
else:
return a.end-b.end
def countOfAirplanes(self, airplanes):
return self.count_heap(airplanes)
def count_heap(self, intervals):
import heapq
intervals.sort(cmp=Solution.cmp)
heap = []
cnt = 0
for intv in intervals:
heapq.heappush(heap, intv.end)
while heap[0] <= intv.start:
heapq.heappop(heap)
cnt = max(cnt, len(heap))
return cnt
if __name__ == "__main__":
assert Solution().countOfAirplanes([Interval(i[0], i[1]) for i in [[1, 10], [2, 3], [5, 8], [4, 7]]]) == 3 | true |
0f6638d7c40c25b44b1d1a53ba8c8b77fe55bd5b | Python | msahu2595/PYTHON_3 | /fromkeys_get_copy_clear_121.py | UTF-8 | 869 | 3.65625 | 4 | [] | no_license | # fromkeys
# d = {'name' : 'unknown', 'age' : 'unknown'}
# d = dict.fromkeys(['name', 'age', 'height'], 'unknown')
# print(d)
# d = dict.fromkeys(('name', 'age', 'height'), 'unknown')
# print(d)
# d = dict.fromkeys('abc', 'unknown')
# print(d)
# d = dict.fromkeys(range(1,11), 'unknown')
# print(d)
# d = dict.fromkeys(['name', 'age'], ['unknown', 'unknown'])
# print(d)
# get method (useful)
d = {'name' : 'harshit', 'age' : 'unknown'}
print(d['name'])
# print(d['names']) # give error
print(d.get('names')) # None ----> better
# if error ---> False, else ----> True
if 'name' in d:
print('present')
else:
print('not present')
# if None ---> False, else ----> True
if d.get('names'):
print('present')
else:
print('not present')
# d.clear()
# print(d)
# d1 = d.copy()
# d1 = d
# print(d1.popitem())
# print(d)
print(d1 is d)
print(d1 == d)
| true |
cc8ac1afcfd47ae2bd2586a49dcfbafb9023cb22 | Python | Sangewang/PythonBasicLearn | /ClassEleven/seek.py | UTF-8 | 222 | 2.875 | 3 | [] | no_license | f = open(r'\var\www\Python\ClassEleven\somefile.txt','w')
f.write('0123467890123456789')
f.seek(5)
print f.tell()
f.write('Hello World!')
f.close()
f = open(r'\var\www\Python\ClassEleven\somefile.txt','r')
print f.read()
| true |
a49cb25d3112afa1dd991006dbd34623324c939d | Python | jiangw0419/python_way | /python_day_09/d01.py | UTF-8 | 2,090 | 4.375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@文件 :d01.py
@时间 :2020/09/09 09:34:03
@作者 :江伟
@版本 :1.0
@说明 :面向对象进阶
在前面的章节我们已经了解了面向对象的入门知识,知道了如何定义类,如何创建对象以及如何给对象发消息。
为了能够更好的使用面向对象编程思想进行程序开发,我们还需要对Python中的面向对象编程进行更为深入的了解。
'''
"""
@property装饰器:
之前我们讨论过Python中属性和方法访问权限的问题,虽然我们不建议将属性设置为私有的,
但是如果直接将属性暴露给外界也是有问题的,比如我们没有办法检查赋给属性的值是否有效。
我们之前的建议是将属性命名以单下划线开头,通过这种方式来暗示属性是受保护的,不建议外界直接访问,
那么如果想访问属性可以通过属性的getter(访问器)和setter(修改器)方法进行对应的操作。
如果要做到这点,就可以考虑使用@property包装器来包装getter和setter方法,使得对属性的访问既安全又方便,
代码如下所示。
"""
class Person(object):
def __init__(self, name, age):
self._name = name
self._age = age
# 访问器 getter方法
@property
def name(self):
return self._name
@property
def age(self):
return self._age
# setter
# @name.setter
# def name(self, name):
# self._name = name
@age.setter
def age(self, age):
self._age = age
def play(self):
if self._age < 16:
print('%s 正在玩飞行棋' % self._name)
else:
print('%s 正在玩围棋' % self._name)
def main():
person = Person('张三',15)
person.play()#张三 正在玩飞行棋
person.age = 22
person.name = '李四'
#如果注释掉name的setter方法会报以下错误
#AttributeError: can't set attribute
person.play()#李四 正在玩围棋
if __name__ == "__main__":
main() | true |
69e758cf15b12c0935929b1301a625a89acec94d | Python | nubobap2015/Voice_ChatBot | /speech_to_text.py | UTF-8 | 7,263 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# OS : GNU/Linux Ubuntu 16.04 or 18.04
# LANGUAGE : Python 3.5.2 or later
# AUTHOR : Klim V. O.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
'''
Распознавание речи с помощью PocketSphinx.
'''
import os
import subprocess
from pydub import AudioSegment
from pocketsphinx import Pocketsphinx, LiveSpeech, get_model_path
class SpeechToText:
''' Предназначен для распознавания речи с помощью PocketSphinx.
1. mode - может иметь два значения: from_file и from_microphone
1.1. from_file - распознавание речи из .wav файла (частота дискретизации >=16кГц, 16bit, моно)
1.2. from_microphone - распознавание речи с микрофона
2. name_dataset - имя набора данных, на основе которого построена языковая модель: plays_ru, subtitles_ru или conversations_ru '''
def __init__(self, mode='from_microphone', name_dataset='plays_ru'):
self.current_dirname = os.path.dirname(os.path.realpath(__file__))
self.work_mode = mode
model_path = get_model_path()
if not (name_dataset == 'plays_ru' or name_dataset == 'subtitles_ru' or name_dataset == 'conversations_ru'):
print('\n[E] Неверное значение name_dataset. Возможные варианты: plays_ru, subtitles_ru или conversations_ru\n')
return
if self.work_mode == 'from_file':
config = {
'hmm': os.path.join(model_path, 'zero_ru.cd_cont_4000'),
'lm': os.path.join(model_path, 'ru_bot_' + name_dataset + '.lm'),
'dict': os.path.join(model_path, 'ru_bot_' + name_dataset + '.dic')
}
self.speech_from_file = Pocketsphinx(**config)
elif self.work_mode == 'from_microphone':
self.speech_from_microphone = LiveSpeech(
verbose=False,
sampling_rate=16000,
buffer_size=2048,
no_search=False,
full_utt=False,
hmm=os.path.join(model_path, 'zero_ru.cd_cont_4000'),
lm=os.path.join(model_path, 'ru_bot_' + name_dataset + '.lm'),
dic=os.path.join(model_path, 'ru_bot_' + name_dataset + '.dic')
)
else:
print('[E] Неподдерживаемый режим работы, проверьте значение аргумента mode.')
# Добавить фильтры шума, например с помощью sox
def get(self, f_name_audio=None):
''' Распознавание речи с помощью PocketSphinx. Режим задаётся при создании объекта класса (из файла или с микрофона).
1. f_name_audio - имя .wav или .opus файла с речью (для распознавания из файла, частота дискретизации >=16кГц, 16bit, моно)
2. возвращает строку с распознанной речью '''
if self.work_mode == 'from_file':
if f_name_audio is None:
print('[E] В режиме from_file необходимо указывать имя .wav или .opus файла.')
return
filename_audio_raw = f_name_audio[:f_name_audio.find('.')] + '.raw'
filename_audio_wav = f_name_audio[:f_name_audio.find('.')] + '.wav'
audio_format = f_name_audio[f_name_audio.find('.') + 1:]
# Конвертирование .opus файла в .wav
if audio_format == 'opus':
command_line = "yes | ffmpeg -i '" + f_name_audio + "' '" + filename_audio_wav + "'"
proc = subprocess.Popen(command_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if err.decode().find(f_name_audio + ':') != -1:
return 'error'
# Конвертирование .wav файла в .raw
audio_file = AudioSegment.from_wav(self.current_dirname + '/' + filename_audio_wav)
audio_file = audio_file.set_frame_rate(16000)
audio_file.export(self.current_dirname + '/' + filename_audio_raw, format = 'raw')
# Создание декодера и распознавание
self.speech_from_file.decode(
audio_file = self.current_dirname + '/' + filename_audio_raw,
buffer_size = 2048,
no_search = False,
full_utt = False
)
return self.speech_from_file.hypothesis()
elif self.work_mode == 'from_microphone':
for phrase in self.speech_from_microphone:
return str(phrase)
def main():
print('[i] Загрузка языковой модели... ', end='')
stt = SpeechToText('from_microphone')
print('ок')
while True:
print(stt.get())
if __name__ == '__main__':
main()
'''
# Распознавание из потока данных с помощью pocketsphinx
from pocketsphinx import DefaultConfig, Decoder
model_path = get_model_path()
config = DefaultConfig()
config.set_string('-hmm', os.path.join(model_path, 'zero_ru.cd_cont_4000'))
config.set_string('-lm', os.path.join(model_path, 'ru.lm'))
config.set_string('-dict', os.path.join(model_path, 'my_dict.dic'))
decoder = Decoder(config)
# Decode streaming data
buf = bytearray(1024)
with open(os.path.join(os.path.dirname(sys.argv[0]) + '/data/answer.raw'), 'rb') as f:
decoder.start_utt()
while f.readinto(buf):
decoder.process_raw(buf, False, False)
decoder.end_utt()
segs = decoder.seg()
print('Best hypothesis segments:', [seg.word for seg in segs])
'''
'''
# Распознавание с помощью google speech cloud api
import speech_recognition as sr
r = sr.Recognizer()
while True:
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Скажите что-нибудь")
audio = r.listen(source)
print('Распознаю')
try:
print(r.recognize_google(audio, language="ru-RU"))
except sr.UnknownValueError:
print("Робот не расслышал фразу")
except sr.RequestError as e:
print("Ошибка сервиса; {0}".format(e))
'''
'''
# Распознавание с помощью wit.ai
from wit import Wit
import speech_recognition as sr
client = Wit('4EXNGIL4JFS5NPKRZIRQWXAOU5DCKZRS')
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Скажите что-нибудь")
audio = r.listen(source)
print('Распознаю')
resp = None
resp = client.speech(audio.get_wav_data(), None, {'Content-Type': 'audio/wav'})
print('Yay, got Wit.ai response: ' + str(resp))''' | true |
48abaf7cb44138ebda73395ae9e0568f0df8f211 | Python | PallabPandaOwn/python101 | /variables/venv/src/Assignment-2/FunctionAssignment.py | UTF-8 | 372 | 3.140625 | 3 | [] | no_license | def genre():
var ="genre function is calling"
return var
def artist():
var = "artist function is calling"
return var
def year():
var = "year function is calling"
return var
#extra credit boolean vaue
def check():
var =True
return var
var1 = genre()
var2=artist()
var3=year()
var4=check()
print(var1)
print(var2)
print(var3)
print(var4) | true |
72f1d1269883020f917aab0f635005b952aba4f3 | Python | harbinger55/code_samples | /python/dicegame/dicegame.py | UTF-8 | 1,214 | 3.859375 | 4 | [] | no_license | #!/usr/bin/python
import rollDice
import readyToRoll
"""Set begining values for variables """
playerIndex = 0
playerScore = {'One':0,'Two':0}
""" Stay in the loop while both players score is less than 100 """
while playerScore['One'] < 100 and playerScore['Two'] < 100:
if playerIndex%2 == 0: # If the player index is even set player to 1
player = 'One'
else: # If player index is not even set play to 2
player = 'Two'
"""Loop until the results of readytoroll return True then break """
while True:
if readyToRoll.readyToRoll(player):
break
roll = rollDice.rollDice() #The player answered yes so roll the dice
if roll == 1: # Your turn ends when you role a 1
print "You have rolled a 1, your turn is over"
playerIndex += 1 # Increment the player index by 1
else:
playerScore[player] += roll # Add the new dice roll to the players total
if playerScore[player] >= 100: # First to 100 or more wins
print "Player %s has WON the game with a score of %d" % (player, playerScore[player])
else:
""" You havent won yet, keep going """
print "Player %s has rolled %d for a new score of %d, you get to go again" % (player, roll, playerScore[player])
| true |
7e98e1a4fe8444e2c148d2a4bcd9077db959bea6 | Python | jump/nigori-cogs | /vouch/vouch.py | UTF-8 | 4,669 | 2.578125 | 3 | [
"MIT"
] | permissive | import discord
import os
import datetime
import logging
from discord.ext import commands
from .utils.dataIO import fileIO
from .utils import checks
log = logging.getLogger(__name__)
class Vouch:
"""Vouch users. The more I stare at this word (Vouch) the less it means."""
def __init__(self, bot):
self.bot = bot
try:
self.vouchers = fileIO("data/vouchers/vouchers.json", "load")
except:
log.debug("Exception when loading vouchers!")
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(create_instant_invite=True)
async def vouch(self, ctx, user: discord.Member=None):
"""Record vouches, when members want to endose non members."""
if user:
if user.id == self.bot.user.id:
user = ctx.message.author
response = "- thanks for vouching for me, your robot overlord."
await self.bot.say(user.mention + response)
elif user.id == ctx.message.author.id:
response = "- you can't vouch for yourself, you silly goose"
await self.bot.say(user.mention + response)
else:
# see if this author has previously vouched for this user.
for item in self.vouchers:
if item['VOUCHER'] == ctx.message.author.display_name:
if item['USER'] == user.display_name:
response = " you already vouched for this user"
await self.bot.say(ctx.message.author.mention +
response)
return
# check if USER has already been vouched, record the new name
for item in self.vouchers:
if item['USER'] == user.display_name:
if not item['VOUCHER'] == \
ctx.message.author.display_name:
# case: we have a USER who has already been vouched
# vouched for again, by a different discord member
item['VOUCHER'] = item['VOUCHER'] + ", " + \
ctx.message.author.display_name
fileIO("data/vouchers/vouchers.json", "save",
self.vouchers)
await self.bot.say(ctx.message.author.mention +
", recorded.")
await self.bot.say(user.display_name +
" now has multple vouches.")
return
# record the vouching
self.vouchers.append({"VOUCHER": ctx.message.author.display_name,
"USER": user.display_name, "ID": user.id,
"DATE": str("{:%B %d, %Y}".format(
datetime.datetime.now()))})
fileIO("data/vouchers/vouchers.json", "save", self.vouchers)
response = " - your voucher for " + user.mention + \
" has been recorded."
await self.bot.say(ctx.message.author.mention + response)
else:
response = "Usage: !vouch <user>"
await self.bot.say(response)
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def showvouches(self):
if self.vouchers:
for item in self.vouchers:
await self.bot.say(item['VOUCHER'] + " vouched for " +
item['USER'] + " @ " + item['DATE'])
else:
response = "There are no user vouchers."
await self.bot.say(response)
@commands.command(pass_context=True, no_pm=True)
@checks.mod_or_permissions(manage_messages=True)
async def clearvouches(self):
self.vouchers.clear()
fileIO("data/vouchers/vouchers.json", "save", self.vouchers)
await self.bot.say("Existing vouchers have been cleared.")
def build_folders():
folders = ("data", "data/vouchers/")
for folder in folders:
if not os.path.exists(folder):
log.debug("Creating " + folder + " folder...")
os.makedirs(folder)
if not os.path.isfile("data/vouchers/vouchers.json"):
log.debug("creating default vouchers.json...")
fileIO("data/vouchers/vouchers.json", "save", [])
def setup(bot):
build_folders()
bot.add_cog(Vouch(bot))
| true |
3fe7633e82ea496188af3d43ae17062453b8e008 | Python | saitharak333-zz/blog-web-app | /blog/users/views.py | UTF-8 | 1,287 | 2.625 | 3 | [] | no_license | # Renderring is to display html templates.
# Redirecting to other urls.
from django.shortcuts import render, redirect
# Importing form from inbuilt django User Creation Forms.
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
# User Logging In
from django.contrib.auth import login, logout
def signup_func(request):
print(1)
if request.method == 'POST':
print(2)
form = UserCreationForm(request.POST)
if form.is_valid():
print(3)
user = form.save()
return redirect('article:arlist')
else:
print(4)
form = UserCreationForm()
print(9)
return render(request, 'users/signup.html', {'form':form})
def signin_func(request):
print(5)
if request.method == 'POST':
print(6)
form = AuthenticationForm(data=request.POST)
if form.is_valid():
print(7)
user = form.get_user()
login(request, user)
return redirect('article:arlist')
else:
print(8)
form = AuthenticationForm()
print(10)
return render(request, 'users/signin.html', {'form':form})
def signout_func(request):
if request.method == 'POST':
logout(request)
return redirect('article:arlist')
| true |
ccb4a5075c3960a8c380c06525c95ba608b83cb9 | Python | broox9/learning | /egghead/python/files.py | UTF-8 | 1,423 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# ### READING
# # implied read and manual close
# f = open('animals.csv')
#
# for line in f:
# print(line)
#
# # implied close, manual 'r' for read
# with open('animals.csv', 'r') as file:
# print(file.read())
# nice csv and JSON packages
import csv
import json
with open('animals.csv', 'r') as f:
animals = csv.reader(f)
for row in animals:
if row[-1] == 'True':
print(f"{row[0]} the {row[1]} is allowed in the house")
else:
print(f"don't let {row[0]} the {row[1]} in this house!!")
with open('animals.json', 'r') as j:
data = json.load(j)
for obj in data:
if obj['housebroken'] == 'True':
print(f"{obj['name']} the {obj['type']} is allowed inside")
else:
print(f"{obj['name']} the {obj['type']} has to stay outside!!")
### WRITING
# 'w' overwrites, 'a' appends
w = open('cars.txt', 'w')
cars1 = ['bmw', 'lexus', 'mercedes']
for car in cars1:
# at this point the file is emptied, but not written to just buffered
w.write(car + '\n')
w.close() # actuall write happens on close
with open('cars.txt', 'a') as a:
cars = ['chevy', 'ford', 'vw']
for car in cars:
a.write(car + '\n')
with open('cars.json', 'w') as wj:
cars = [
{"make": "chevy"},
{"make": "tesla"},
{"make": "amc"},
{"make": "scion"}
]
json.dump(cars, wj)
| true |
51108643ca970675a64c4eb8036235dcc472e439 | Python | AlexysCoate/Mision-02 | /coordenadas.py | UTF-8 | 711 | 4.53125 | 5 | [] | no_license | # Autor:Alexys Martín Coate Reyes, A01746998
# Descripcion: Calcular la distancia entre 2 puntos dados de un plano.
"""Elabora un algoritmo y escribe un programa que calcula la distancia entre dos puntos.
# • El programa le pregunta al usuario las coordenadas (x1, y1) del primer punto y, también, las coordenadas (x2, y2) del segundo punto.
# • Imprime:
# o La distancia entre los dos puntos.
"""
# Escribe tu programa después de esta línea.
x1 = float(input("Inserta el valor de x1: "))
x2 = float(input("Inserta el valor de x2: "))
y1 = float(input("Inserta el valor de y1: "))
y2 = float(input("Inserta el valor de y2: "))
distancia = float(((x2-x1)**2 + (y2-y1)**2)**(1/2))
print("Distancia: %.4f" % (distancia))
| true |
7a117ec772f86deb7a1d9203b23ee6a2412711b2 | Python | hyper-neutrino/yuno-abandoned | /legacy/yuno.py | UTF-8 | 1,010 | 2.515625 | 3 | [
"MIT"
] | permissive | import codepage, commands, interpreter, lexer, sys
from utils import *
usage = """
yuno - a modern golfing language
$ python yuno.py <flags> <file | code> [arguments...]
Flags: h - display this help message and exit
f - read from a file
n - output a newline
p - print using python's string representation rather than yuno's
"""
if len(sys.argv) < 3:
raise SystemExit(usage)
_, flags, code, *arguments = sys.argv
if "h" in flags:
raise SystemExit(usage)
if "f" in flags:
with open(code, "r") as f:
code = f.read()
arguments = list(map(try_eval, arguments))
for key, val in zip("³⁴⁵⁶⁷", arguments):
commands.functions[key].call = Constant(val)
lines = lexer.tokenize("".join(char for char in code.replace("\n", "¶") if char in codepage.codepage))
if lines:
interpreter.links = lines
result = interpreter.quick_invoke(-1, *arguments[:2])
output(result)
else:
output(arguments[0] if arguments else 0)
if "n" in flags:
print()
| true |
5325ce99e38782a34f0ecd3afe8f753af0563ee4 | Python | chintak/scikit-image | /doc/logo/scipy_logo.py | UTF-8 | 7,926 | 3.3125 | 3 | [
"BSD-3-Clause"
] | permissive | """
Code used to trace Scipy logo.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.nxutils as nx
from skimage import io
from skimage import data
class SymmetricAnchorPoint(object):
"""Anchor point in a parametric curve with symmetric handles
Parameters
----------
pt : length-2 sequence
(x, y) coordinates of anchor point
theta : float
angle of control handle
length : float
half-length of symmetric control handle. Each control point is `length`
distance away from the anchor point.
use_degrees : bool
If True, convert input `theta` from degrees to radians.
"""
def __init__(self, pt, theta, length, use_degrees=False):
self.pt = pt
if use_degrees:
theta = theta * np.pi / 180
self.theta = theta
self.length = length
def control_points(self):
"""Return control points for symmetric handles
The first point is in the direction of theta and the second is directly
opposite. For example, if `theta = 0`, then the first `p1` will be
directly to the right of the anchor point, and `p2` will be directly
to the left.
"""
theta = self.theta
offset = self.length * np.array([np.cos(theta), np.sin(theta)])
p1 = self.pt + offset
p2 = self.pt - offset
return p1, p2
def __repr__(self):
v = (self.pt, self.theta * 180/np.pi, self.length)
return 'SymmetricAnchorPoint(pt={0}, theta={1}, length={2})'.format(*v)
def curve_from_anchor_points(pts):
"""Return curve from a list of SymmetricAnchorPoints"""
assert len(pts) > 1
bezier_pts = []
for anchor in pts:
c1, c2 = anchor.control_points()
bezier_pts.extend([c2, anchor.pt, c1])
# clip control points from ends
bezier_pts = bezier_pts[1:-1]
x, y = [], []
# every third point is an anchor point
for i in range(0, len(bezier_pts)-1, 3):
xi, yi = cubic_curve(*bezier_pts[i:i+4])
x.append(xi)
y.append(yi)
return np.hstack(x), np.hstack(y)
def cubic_curve(p0, p1, p2, p3, npts=20):
"""Return points on a cubic Bezier curve
Parameters
----------
p0, p3 : length-2 sequences
end points of curve
p1, p2 : length-2 sequences
control points of curve
npts : int
number of points to return (including end points)
Returns
-------
x, y : arrays
points on cubic curve
"""
t = np.linspace(0, 1, npts)[:, np.newaxis]
# cubic bezier curve from http://en.wikipedia.org/wiki/Bezier_curve
b = (1-t)**3 * p0 + 3*t*(1-t)**2 * p1 + 3*t**2*(1-t) * p2 + t**3 * p3
x, y = b.transpose()
return x, y
class Circle(object):
def __init__(self, center, radius):
self.center = center
self.radius = radius
def point_from_angle(self, angle):
r = self.radius
# `angle` can be a scalar or 1D array: transpose twice for best results
pts = r * np.array((np.cos(angle), np.sin(angle))).T + self.center
return pts.T
def plot(self, **kwargs):
ax = kwargs.pop('ax', plt.gca())
fc = kwargs.pop('fc', 'none')
c = plt.Circle(self.center, self.radius, fc=fc, **kwargs)
ax.add_patch(c)
class ScipyLogo(object):
"""Object to generate scipy logo
Parameters
----------
center : length-2 array
the Scipy logo will be centered on this point.
radius : float
radius of logo
"""
CENTER = np.array((254, 246))
RADIUS = 252.0
THETA_START = 2.58
THETA_END = -0.368
def __init__(self, center=None, radius=None):
if center is None:
if radius is None:
center = self.CENTER
else:
center = np.array((radius, radius))
self.center = center
if radius is None:
radius = self.RADIUS
self.radius = radius
# calculate end points of curve so that it lies exactly on circle
logo_circle = Circle(self.CENTER, self.RADIUS)
s_start = logo_circle.point_from_angle(self.THETA_START)
s_end = logo_circle.point_from_angle(self.THETA_END)
self.circle = Circle(self.center, self.radius)
# note that angles are clockwise because of inverted y-axis
self._anchors = [SymmetricAnchorPoint(*t, use_degrees=True)
for t in [(s_start, -37, 90),
((144, 312), 7, 20),
((205, 375), 52, 50),
((330, 380), -53, 60),
((290, 260),-168, 50),
((217, 245),-168, 50),
((182, 118), -50, 60),
((317, 125), 53, 60),
((385, 198), 10, 20),
(s_end, -25, 60)]]
# normalize anchors so they have unit radius and are centered at origin
for a in self._anchors:
a.pt = (a.pt - self.CENTER) / self.RADIUS
a.length = a.length / self.RADIUS
def snake_anchors(self):
"""Return list of SymmetricAnchorPoints defining snake curve"""
anchors = []
for a in self._anchors:
pt = self.radius * a.pt + self.center
length = self.radius * a.length
anchors.append(SymmetricAnchorPoint(pt, a.theta, length))
return anchors
def snake_curve(self):
"""Return x, y coordinates of snake curve"""
return curve_from_anchor_points(self.snake_anchors())
def plot_snake_curve(self, **kwargs):
ax = kwargs.pop('ax', plt.gca())
x, y = self.snake_curve()
ax.plot(x, y, 'k', **kwargs)
def plot_circle(self, **kwargs):
self.circle.plot(**kwargs)
def plot_image(self, **kwargs):
ax = kwargs.pop('ax', plt.gca())
img = io.imread('data/scipy.png')
ax.imshow(img, **kwargs)
def get_mask(self, shape, region):
"""
Parameters
----------
region : {'upper left', 'lower right'}
"""
if region == 'upper left':
theta = np.linspace(self.THETA_END, self.THETA_START - 2 * np.pi)
elif region == 'lower right':
theta = np.linspace(self.THETA_END, self.THETA_START)
else:
msg = "Expected 'upper left' or 'lower right'; got %s" % region
raise ValueError(msg)
xy_circle = self.circle.point_from_angle(theta).T
x, y = self.snake_curve()
xy_curve = np.array((x, y)).T
xy_poly = np.vstack((xy_curve, xy_circle))
h, w = shape[:2]
y_img, x_img = np.mgrid[:h, :w]
xy_points = np.column_stack((x_img.flat, y_img.flat))
mask = nx.points_inside_poly(xy_points, xy_poly)
return mask.reshape((h, w))
def plot_scipy_trace():
plt.figure()
logo = ScipyLogo()
logo.plot_snake_curve()
logo.plot_circle()
logo.plot_image()
plot_anchors(logo.snake_anchors())
def plot_anchors(anchors, color='r', alpha=0.7):
for a in anchors:
c = a.control_points()
x, y = np.transpose(c)
plt.plot(x, y, 'o-', color=color, mfc='w', mec=color, alpha=alpha)
plt.plot(a.pt[0], a.pt[1], 'o', color=color, alpha=alpha)
def plot_snake_overlay():
plt.figure()
logo = ScipyLogo((670, 250), 250)
logo.plot_snake_curve()
logo.plot_circle()
img = io.imread('data/snake_pixabay.jpg')
plt.imshow(img)
def plot_lena_overlay():
plt.figure()
logo = ScipyLogo((300, 300), 180)
logo.plot_snake_curve()
logo.plot_circle()
img = data.lena()
plt.imshow(img)
if __name__ == '__main__':
plot_scipy_trace()
plot_snake_overlay()
plot_lena_overlay()
plt.show()
| true |
39dfea3d6c11359e2c5a8bd75fe09a693e0a4859 | Python | selbieh/link | /user_task/models.py | UTF-8 | 1,260 | 2.609375 | 3 | [] | no_license | from django.contrib.auth.models import User
from django.db import models
class Task(models.Model):
FINISHED = 'finished'
PENDING = 'pending'
status_choices = [
(FINISHED, FINISHED),
(PENDING, PENDING)
]
"""
task model held the tasks
N.B: assigned_to is end-user
created_by: is admin user
updated_at ,created_at is
models.PROTECT to persistence history in database if must delete user should implement soft delete
"""
title = models.CharField(null=True, blank=True, max_length=125)
description = models.TextField(null=True, blank=True)
assigned_to = models.ForeignKey(User, null=False, blank=False, related_name='assigned_tasks',
on_delete=models.PROTECT)
created_by = models.ForeignKey(User, null=False, blank=False, related_name='created_tasks',
on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status = models.CharField(choices=status_choices, max_length=15)
due_date = models.DateTimeField(blank=False, null=False)
def __str__(self):
return f'{self.title or ""}" assigned to " {self.assigned_to}'
| true |
2d6f0d44e7cb4f3ba6d6cc1c6abd40f765e92d4b | Python | cloudmesh-community/book | /latex/bin/md-tmp-to-tex.py | UTF-8 | 434 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
from pathlib import Path
import subprocess
import os
files = Path('tmp').glob('**/*.md')
for f in files:
data = subprocess.check_output(["python3", "bin/md-to-tex.py",f]).decode("utf-8")
tex = str(f)
tex = tex.replace(".md", ".tex")
print ("convert:", f, "->", tex)
with open(tex, "w") as output:
output.write("\\MDNAME\\")
output.write("\n")
output.write(data)
| true |
64ccca2258c0e83509294eae219d97c496d6dd20 | Python | abhishekumrawal/FleetAI | /engine/mapper/pathgenerator.py | UTF-8 | 13,184 | 2.671875 | 3 | [] | no_license | import numpy as np
import networkx as nx
import geohelper as gh
from networkx.exception import NetworkXNoPath
class PathGenerator(object):
"""
Path_Generator generates a simulated real trajectory
"""
def __init__(self, G, cycle=60):
self.cycle = cycle
self.G = G
N = len(self.G.nodes())
self.node_lats = np.zeros(N, 'float32')
self.node_lons = np.zeros(N, 'float32')
self.node_ids = np.zeros(N)
for i, (node_id, data) in enumerate(self.G.nodes(data=True)):
self.node_lats[i] = data['lat']
self.node_lons[i] = data['lon']
self.node_ids[i] = node_id
def get_node_locs(self):
return zip(self.node_lats, self.node_lons)
def shortest_path(self, source, target, weight='length', distance=True):
## A* search for shortest path
path = nx.astar_path(self.G, source, target, self.__grand_circle, weight=weight)
if distance:
distance = sum(self.G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
return path, distance
else:
return path
def map_matching_shortest_path(self, origin, destination, weight='length', noise=1e-3, maxtry=10):
ptry = 0
while 1:
mmtry = 0
lat, lon = origin
while 1:
try:
su, sv, sd = self.map_match((lat, lon))
break
except ValueError:
print "MM ERROR: ", origin
if mmtry > maxtry:
raise
mmtry += 1
lat += np.random.uniform(-noise, noise)
lon += np.random.uniform(-noise, noise)
mmtry = 0
lat, lon = destination
while 1:
try:
tu, tv, td = self.map_match((lat, lon))
break
except ValueError:
print "MM ERROR: ", destination
if mmtry > maxtry:
raise
mmtry += 1
lat += np.random.uniform(-noise, noise)
lon += np.random.uniform(-noise, noise)
try:
path, distance = self.shortest_path(su, tu, weight=weight)
break
except NetworkXNoPath:
print "NetworkXNoPath: %d, %d" % (su, tu)
if ptry > maxtry:
raise
ptry += 1
if len(nx.single_source_shortest_path_length(self.G, su)) < 1000:
self.G.remove_node(su)
print "REMOVE NODE: %d" % su
if len(nx.single_source_shortest_path_length(self.G, tu)) < 1000:
self.G.remove_node(tu)
print "REMOVE NODE: %d" % tu
source = su, sv, sd
target = tu, tv, td
return path, distance, source, target
def generate_path(self, origin, destination, step, path, source, target):
"""determine the shortest path from source to target and return locations on the path
"""
if len(path) < 3:
return [destination]
su, sv, sd = source
tu, tv, td = target
trajectory = []
ds = step
# origin~
if path[1] == su or path[1] == sv:
start_node = path.pop(0)
elif path[0] == su:
start_node = sv
else:
start_node = sv
end_node = path.pop(0)
lats, lons, bearings, lengths = self.get_segments_in_order(start_node, end_node)
if start_node < end_node:
d = sd
else:
d = sum(lengths) - sd
for lat, lon, b, l in zip(lats[:-1], lons[:-1], bearings, lengths):
if d > l:
d -= l
continue
if d > 0:
lat, lon = gh.end_lat_lon(lat, lon, d, b)
trajectory.append((lat, lon))
l -= d
d = 0
locs, ds = self.create_trajectory(lat, lon, b, l, step, ds)
trajectory += locs
start_node = end_node
# intermediate
for end_node in path[:-1]:
lats, lons, bearings, lengths = self.get_segments_in_order(start_node, end_node)
for lat, lon, b, l in zip(lats[:-1], lons[:-1], bearings, lengths):
locs, ds = self.create_trajectory(lat, lon, b, l, step, ds)
trajectory += locs
start_node = end_node
# ~destination
end_node = path[-1]
if not (start_node == tu or start_node == tv):
lats, lons, bearings, lengths = self.get_segments_in_order(start_node, end_node)
for lat, lon, b, l in zip(lats[:-1], lons[:-1], bearings, lengths):
locs, ds = self.create_trajectory(lat, lon, b, l, step, ds)
trajectory += locs
start_node = end_node
if start_node == tu:
end_node = tv
else:
end_node = tu
lats, lons, bearings, lengths = self.get_segments_in_order(start_node, end_node)
if start_node < end_node:
d = td
else:
d = sum(lengths) - td
for lat, lon, b, l in zip(lats[:-1], lons[:-1], bearings, lengths):
if d < l:
locs, ds = self.create_trajectory(lat, lon, b, d, step, ds)
trajectory += locs
trajectory.append(gh.end_lat_lon(lat, lon, d, b))
break
locs, ds = self.create_trajectory(lat, lon, b, l, step, ds)
trajectory += locs
d -= l
return trajectory
def create_trajectory(self, lat, lon, bearing, distance, step, init_step):
lats = []
lons = []
ds = init_step
while distance > ds:
lat, lon = gh.end_lat_lon(lat, lon, ds, bearing)
lats.append(lat)
lons.append(lon)
distance -= ds
ds = step
ds -= distance
return zip(lats, lons), ds
def get_segments_in_order(self, start_node, end_node):
edge = self.G.get_edge_data(start_node, end_node)
if not edge or 'lat' not in edge:
edge = self.G.get_edge_data(end_node, start_node)
d = edge['seg_length'] + [edge['length']]
lengths = [d2 - d1 for d1, d2 in zip(d[:-1], d[1:])]
bearings = edge['bearing']
lats = edge['lat']
lons = edge['lon']
if start_node > end_node:
bearings = [b + np.pi for b in bearings[::-1]]
lengths = lengths[::-1]
lats = lats[::-1]
lons = lons[::-1]
return lats, lons, bearings, lengths
def map_match(self, loc, geo_range=0.0018):
"""Search the most probable path on which the GPS signals are observed in a given graph
Parameters
----------
geo_range: float; buffer value of boundary box in degree
"""
lat, lon = loc
G = self.__get_subgraph(lat, lon, geo_range)
roads = G.edges()
N = len(roads)
if N == 0:
raise ValueError("""No nodes within search area around input location.""")
road_lengths = np.zeros(N, 'float16')
road_ids = np.zeros(N)
road_distance = np.ones((N), 'float16') * float('inf')
node_distance = np.zeros((N), 'float16')
for i, road in enumerate(roads):
data = self.G.get_edge_data(*road)
if 'lat' in data:
road_lengths[i] = data['length']
road_ids[i] = data['id']
(_, road_distance[i], node_distance[i]) = self.__get_nearest_segment(lat, lon, data)
nearest = road_distance.argmin()
u = int(roads[nearest][0])
v = int(roads[nearest][1])
d = node_distance[nearest]
if u > v:
u, v = v, u
return u, v, d
def mm_convert(self, loc, georange=0.0018):
u, v, d = self.map_match(loc, georange)
lats, lons, bearings, lengths = self.get_segments_in_order(u, v)
for lat, lon, b, l in zip(lats[:-1], lons[:-1], bearings, lengths):
if d > l:
d -= l
elif d > 0:
return gh.end_lat_lon(lat, lon, d, b)
else:
return lat, lon
return lats[-1], lons[-1]
def __get_nearest_segment(self, lat, lon, data):
"""Compute geometric properties between candidate roads and observation points
Parameters
----------
lat: float;
lon: float;
data: dictionary; road data
Returns
-------
nearest_seg: index of road segments closest to observation
road_distnace: distance between observation and the closest road
node_distance: distance from the node with higher ID to matched point
"""
road_lats = np.array(data['lat'])
road_lons = np.array(data['lon'])
bearings = np.array(data['bearing'])
seg_lengths = np.array(data['seg_length']+[data['length']])
h = gh.distance_in_meters(road_lats, road_lons, lat, lon)
h1 = h[:-1]
h2 = h[1:]
theta = gh.bearing_in_radians(road_lats, road_lons, lat, lon)
cos1 = np.cos(theta[:-1] - bearings)
cos2 = -np.cos(theta[1:] - bearings)
d = h1 * np.sqrt(1 - cos1 ** 2) * (np.sign(cos1) == np.sign(cos2)) \
+ h1 * (np.sign(cos1) < np.sign(cos2)) + h2 * (np.sign(cos1) > np.sign(cos2))
nearest_seg = d.argmin() #size: T
cos1 = cos1[nearest_seg]
cos2 = cos2[nearest_seg]
h1 = h1[nearest_seg]
road_distance = d[nearest_seg]
node_distance = (h1 * cos1) * (np.sign(cos1) == np.sign(cos2)) \
+ seg_lengths[nearest_seg] * ~(np.sign(cos1) > np.sign(cos2)) \
+ seg_lengths[nearest_seg+1] * (np.sign(cos1) > np.sign(cos2))
return (nearest_seg, road_distance, node_distance)
def __grand_circle(self, source_id, target_id):
source = self.G.node[source_id]
target = self.G.node[target_id]
return gh.distance_in_meters(source['lat'], source['lon'], target['lat'], target['lon'])
def __get_subgraph(self, lat, lon, geo_range):
""" Draw a square bounding box containing all of the observation points
Extract nodes within this bounding box and return a subgraph containing the nodes
Parameters
----------
G: networkx Graph object; graph representation of the road network
geo_range: float; buffer value of boundary box
"""
lat_min, lat_max = lat - geo_range, lat + geo_range
lon_min, lon_max = lon - geo_range, lon + geo_range
# sub_id = gh.nodes_within_square(self.G, [lat_max, lon_max], [lat_min, lon_min])
lats = self.node_lats
lons = self.node_lons
sub_ids = self.node_ids[(lats < lat_max) & (lats > lat_min) & (lons < lon_max) & (lons > lon_min)]
return self.G.subgraph(sub_ids)
if __name__ == '__main__':
import cPickle as pickle
import pandas as pd
import json
import time
print "Loading NYC road network graph..."
graph_path = 'data/nyc_network_graph.pkl'
with open(graph_path, 'r') as f:
G = pickle.load(f)
path_generator = PathGenerator(G)
print "Loading sample ride requests..."
requests_path = 'data/requests_sample.csv'
df = pd.read_csv(requests_path, nrows=150)
trajectories = {}
ride_requests = zip(df.request_id.values, df.trip_time.values, df[['pickup_latitude', 'pickup_longitude']].values,
df[['dropoff_latitude', 'dropoff_longitude']].values)
print "Start generating paths:"
print "# of ride requests: %d" % len(ride_requests)
start = time.time()
n = 0
for rid, trip_time, origin, destination in ride_requests:
try:
path = path_generator.get_path(origin, destination, trip_time)
trajectories[rid] = path
n += 1
if n % 100 == 0:
print "%d elapsed time: %.2f" % (n, time.time() - start)
except:
continue
print "%d elapsed time: %.2f" % (n, time.time() - start)
with open('data/trajectories.json', 'wb') as f:
json.dump(trajectories, f)
print "Complete!"
# class FastPathGenerator(object):
# def generate_path(self, origin, destination, timestep):
# lat, lon = origin
# tlat, tlon = destination
# distance = gh.distance_in_meters(lat, lon, tlat, tlon)
# bearing = gh.bearing_in_radians(lat, lon, tlat, tlon)
# lats = [lat]
# lons = [lon]
# ds = distance / timestep
# while distance > ds:
# lat, lon = gh.end_lat_lon(lat, lon, ds, bearing)
# lats.append(lat)
# lons.append(lon)
# distance -= ds
# ds = step
#
# ds -= distance
# return zip(lats, lons), ds
| true |
495185a95a6b2c83e9da73eb79ca406aff3a05f4 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_201/666.py | UTF-8 | 899 | 2.71875 | 3 | [] | no_license | __author__ = 'snv'
def left(N):
return (N-1)//2
def right(N):
return N//2
# sys.setrecursionlimit(10001)
# f = open('input.txt','r')
f = open('C-large.in','r')
g = open('output.txt', 'w')
T = int(f.readline())
for j in range(T):
N_stalls, N_users = f.readline().split()
N_stalls, N_users = int(N_stalls), int(N_users)
bin_users = str(bin(N_users))[3:]
row = len(bin_users)
quant = (2**row)
mn = N_stalls // quant -1
rm = N_users - quant
extra = N_stalls - quant*(mn+1)
print('stalls', N_stalls, 'users=', N_users, 'bin', bin_users, 'avg', mn, 'extra = ', extra, 'users_left', rm )
if rm <= extra:
mn +=1
left = (mn-1) //2
right = mn //2
ans = str(right) +' ' + str (left)
ans_string = 'Case #{0}: {1}\n'.format(j+1, ans)
print(ans_string)
g.write(ans_string)
f.close()
g.close()
| true |
e6c2e9e94d3e674818096124aaad041da5834818 | Python | pzmrzy/LeetCode | /python/simplify_path.py | UTF-8 | 429 | 3.140625 | 3 | [] | no_license | class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
res = []
for it in path.rstrip('/').split('/'):
if it == '.' or it == "":
continue
elif it == "..":
if len(res) > 0:
res.pop()
else:
res.append(it)
return '/' + '/'.join(res)
| true |
42b5cdec2fd865324820d247c0debb451900e232 | Python | kamalgaider/WebScrapping | /Exploratory data analysis/question4.py | UTF-8 | 255 | 2.875 | 3 | [] | no_license | import numpy as np
import pandas as pd
df = pd.read_csv("../CSV_without_comments.csv", sep=',')
fltr = df['Genre'] =='Classics'
df.where(fltr, inplace = True)
df = df.dropna()
print('Range : ' + str(df['Star Rating'].max() - df['Star Rating'].min()))
| true |
102ef9a688262a587b4102ab7ef164df2192bf66 | Python | ifelsejet/Google-CSSI2019 | /cssi-labs-master/python/labs/make-a-short-story/mystory.py | UTF-8 | 711 | 3.65625 | 4 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | story = """The %s jumped over a %s %s.
Then the %s decided to stop being so %s
and take up a hobby: %s . """
print("Enter a noun")
#get user input
firstNoun = raw_input()
print("Enter a adjective")
firstAdjective = raw_input()
print("Enter another noun")
secondNoun = raw_input()
print("Enter another noun")
thirdNoun = raw_input()
print("Enter an adjective")
firstAdjective = raw_input()
print("Enter a verb")
firstVerb = raw_input()
#print """The %s jumped over a %s %s. Then the %s decided to stop being so %s and take up a hobby: %s .""" % (firstNoun,firstAdjective,secondNoun,thirdNoun,firstAdjective,firstVerb)
print story % (firstNoun,firstAdjective,secondNoun,thirdNoun,firstAdjective,firstVerb)
| true |
902833c1b1ee2ab410d98eeea43e1baafecee8dc | Python | pritee55/Static_Partition | /Static_Partition.py | UTF-8 | 3,084 | 3.109375 | 3 | [] | no_license | import os
import getpass
print("\n\t\t**....................##.........................**")
os.system("tput setaf 3")
print("\n\t*****************WELCOME In MY AUTOMATION TOOL*****************")
os.system("tput setaf 7")
print("\n\t\t**....................##.........................**")
passwd = getpass.getpass("\nEnter your password: ")
if passwd != "root":
print("\n password is incorrect.Please enter right password. ")
exit()
os.system("tput setaf 3")
print("\n\tThank You...")
print("\n\t############ You locally logged in successfully ###########")
os.system("tput setaf 7")
def option():
op = int(input('''
\n
..............#WELCOME IN MAIN MENU#................
\n
Press 1 : To Create a Static Partition..
press 2 : Format and mount partition..
Press 3 : To increase size of Partition..
Press 4 : To decrease size of partition..
Press 5 : To delete the static partiton..
press 6 : exit..
\n
what can I help you..: '''))
if op == 1:
os.system("\n yum install parted")
name1= input("\n\tEnter your harddisk that you want..>> ")
part1= input("\n\tEnter start point of partition..>> ")
part2= input("\n\tEnter end point of partition..>> ")
os.system("parted {} mkpart primary ext4 {}G {}G;".format(name1,part1,part2))
os.system("lsblk")
option()
elif op == 2:
name3= input("\n\t Enter the name of partition..>> ")
os.system("mkfs.ext4 {}".format(name3))
name4= input("\n\t Enter the folder name that you want..>>" )
os.system("mkdir \{}".format(name4))
os.system("mount {} {}".format(name3name4))
os.system("lsblk")
elif op == 3:
name1= input("\n\t Enter the name of harddisk..>> ")
num1= input("\n\t Enter the number of partition..>> ")
size1= input("\n\t Enter the incresing end point size..>> ")
os.system("parted {} resizepart {} {}G".format(name1,num1,size1))
os.system("lsblk")
option()
elif op == 4:
name1= input("\n\t Enter the name of harddisk..>> ")
num1= input("\n\t Enter the number of partition..>> ")
size2= input("\n\t Enter the decreasing end point size..>> ")
os.system("parted {} resizepart {} {}G".format(name1,num1,size2))
os.system("lsblk")
option()
elif op == 5:
name1= input("\n\t Enter the name of harddisk..>> ")
num1= input("\n\t Enter the number of partition..>> ")
os.system("parted {} rm {}".format(name1,num1))
os.system("lsblk")
option()
elif op == 6:
os.system("exit")
os.system("tput setaf 3")
print("\n\n\t#########..THANK YOU..See you soon..#########")
os.system("tput setaf 7")
print("\n")
else:
print("Incorrect Choice,select correct option..")
anykey =input("\n\t Press Enter to go main menu")
option()
option()
| true |
6072aceff2305220339aab5b29b23400fd609c5c | Python | maguas01/hackerRank | /pStuff/TwoStrings.py | UTF-8 | 568 | 3.71875 | 4 | [] | no_license | #!/bin/python
'''
Given two strings a, and b, determine if they share a common substring.
'''
import sys
def twoStrings(s1, s2):
letters = [False] * (26)
for c in s1 :
letters[ord(c) - 97] = True
for c in s2 :
if (letters[ord(c) - 97] == True) :
return "YES"
return "NO"
def main() :
q = int(raw_input().strip())
for a0 in xrange(q):
s1 = raw_input().strip()
s2 = raw_input().strip()
result = twoStrings(s1, s2)
print(result)
if __name__ == "__main__" :
main() | true |
48356ec69df081037fdf485a7f3b121ce2dcc344 | Python | myashok/LeetCode | /solutions_new/longest_substring_without_repeating_characters/solution.py | UTF-8 | 408 | 2.96875 | 3 | [] | no_license | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
if len(s) == 0:
return 0
map = dict()
start = 0
max_len = 0
for i, chr in enumerate(s):
if chr in map:
start = max(start, map[chr] + 1)
map[chr] = i
max_len = max(max_len, (i - start) + 1)
return max_len | true |
2aa1c656daf9f1ec8fb1ac67a84a16cc4e8f58f3 | Python | KrishnaJaykrishna/PythonPrograms | /median.py | UTF-8 | 451 | 3.109375 | 3 | [] | no_license | import csv
with open('HeightWeight.csv', newline = '') as f:
reader = csv.reader(f)
file_data = list(reader)
file_data.pop(0)
newdata = []
for i in range(len(file_data)):
numb = file_data[i][2]
newdata.append(float(numb))
n = len(newdata)
newdata.sort()
if n % 2 == 0:
median1 = float(newdata[n//2])
median2 = float(newdata[n//2 - 1])
median = (median2 + median1)/2
else:
median = newdata[n//2]
print(median)
print (n)
| true |
c7f8b4377677c8af746273668e7c5efa7b4d7a45 | Python | CarMoreno/TutorialesPythonBasic | /Diccionarios_Ejercicio.py | UTF-8 | 1,788 | 3.234375 | 3 | [] | no_license | from Diccionarios import usuario_1, usuario_2
paises = [
{
"id": 1000,
"nombre": "Colombia",
"capital": "Bogotá",
"idioma": "Español",
"poblacion": 45000000
},
{
'id': 2000,
"nombre": "Ecuador",
"capital": "Quito",
"idioma": "Español",
"poblacion": 6500000
},
{
'id': 3000,
"nombre": "Reino Unido",
"capital": "Londres",
"idioma": "Ingles",
"poblacion": 6500000
},
{
'id': 6000,
"nombre": "Estados Unidos",
"capital": "Washington",
"idioma": "Ingles",
"poblacion": 4670000
},
{
"id": 4000,
"nombre": "China",
"capital": "Pekín",
"idioma": "Mandarín",
"poblacion": 8900000000
},
{
"id": 5000,
"nombre": "Argentina",
"capital": "Buenos aires",
"poblacion": 540000
},
]
lista_paises_idioma_espanol = []
lista_paises_idioma_ingles = []
lista_paises_otros_idiomas = []
# CLASIFICACION DE IDIOMAS
for pais in paises:
idioma = pais.get("idioma", False)
nombre = pais.get("nombre", False)
if idioma == False:
print("No existe el idioma en el diccionario para el pais " + nombre)
else:
# CONVERTIMOS EL STRING EN MAYUSCULA Y QUITAMOS ESPACIOS
if (idioma.upper().replace(" ", "") == "ESPAÑOL"):
lista_paises_idioma_espanol.append(pais)
elif (idioma.upper().replace(" ", "") == "INGLES"):
lista_paises_idioma_ingles.append(pais)
else:
lista_paises_otros_idiomas.append(pais)
def mostrar_listas():
print(lista_paises_idioma_espanol)
print(lista_paises_idioma_ingles)
print(lista_paises_otros_idiomas)
print (usuario_2) | true |
cef34fe8e82f5a42d6987d3cff0be27a1bc01478 | Python | preyingrazor/testrepo | /follow tags.py | UTF-8 | 1,068 | 3 | 3 | [] | no_license | import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter - ')
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
count = 0
namelist = list()
tags = soup('a')
for tag in tags:
count = count + 1
if count == 3:
print(tag.get('href', None))
name = tag.contents[0]
namelist.append(name)
url = tag.get('href', None)
html = urllib.request.urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, 'html.parser')
# Retrieve all of the anchor tags
count = 0
tags = soup('a')
for tag in tags:
count = count + 1
if count == 3:
print(tag.get('href', None))
name = tag.contents[0]
namelist.append(name)
print(namelist)
| true |
4eafa9eddfa7a2cef7326e3addd5b60c87ad1dce | Python | vijaysundar2701/python | /oddsepr.py | UTF-8 | 129 | 3.171875 | 3 | [] | no_license | small,big=map(int,input().split())
lis=[]
for i in range(small+1,big):
if i%2!=0:
lis.append(i)
print(*lis,end=' ')
| true |
5612dfa6319755779df745c07dc1e1d3ed54124e | Python | shabaiev/puppy_adoption | /model/adopt_a_puppy_page.py | UTF-8 | 3,611 | 3.046875 | 3 | [] | no_license | from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.remote.webelement import WebElement
import re
class PuppyBasicDetails(object):
def __init__(self, picture, name, breed, gender, view_details_btn: WebElement, puppy_found_on_page_number=None, puppy_id=None):
self.picture = picture
self.name = name
self.breed = breed
self.gender = gender
self.view_details_btn = view_details_btn
self.puppy_found_on_page_number = puppy_found_on_page_number
self.puppy_id = puppy_id
class AdoptPuppy:
def __init__(self, browser: RemoteWebDriver):
self.browser = browser
self.go_to()
self.puppy_list = self.collect_puppies()
def get_puppy_by_name(self, name: str):
for puppy in self.puppy_list:
if puppy.name.lower() == name.lower():
return puppy
def go_to(self):
self.browser.get("https://puppies.herokuapp.com/")
def go_to_page_number(self, page_number: int):
url = "https://puppies.herokuapp.com/agency/index?page="
self.browser.get(url + str(page_number))
def collect_puppies(self):
number_of_pages = int(self.get_number_of_pages())
# brook = PuppyBasicDetails(picture="/assets/Brook-ed6c0be3a8830921c5a954d1bc283354.jpg", name="Brook",
# breed="Golden Retriever", gender="Female", view_details="")
puppies = []
for page_number in range(1, number_of_pages + 1):
self.go_to_page_number(page_number)
number_of_puppies_on_the_page = self.get_number_of_puppies_on_the_page()
for puppy_number in range(1, number_of_puppies_on_the_page + 1):
puppy = self.get_puppy(puppy_number=puppy_number)
puppy.puppy_found_on_page_number = page_number
puppy_id = self.get_puppy_id(view_details_btn=puppy.view_details_btn)
puppy.puppy_id = puppy_id
puppies.append(puppy)
return puppies
def get_puppy_id(self, view_details_btn: WebElement):
view_details_btn.click()
url = self.browser.current_url
url = re.sub(pattern='[^0-9]', repl='', string=url)
self.browser.back()
return int(url)
def get_number_of_pages(self):
last_page = self.browser.find_element_by_xpath(
"//*[contains(text(),'Next')]/parent::div/a[position() = (last()-1)]")
return last_page.text
def get_number_of_puppies_on_the_page(self):
number_of_puppies = self.browser.find_elements_by_xpath("//div[@class = 'puppy_list']")
return len(number_of_puppies)
def get_puppy(self, puppy_number):
picture = self.browser.find_element_by_xpath(f"//div[@class = 'puppy_list'][{puppy_number}]//img")
name = self.browser.find_element_by_xpath(f"//div[@class = 'puppy_list'][{puppy_number}]//div[@class='name']")
breed = self.browser.find_element_by_xpath(
f"//div[@class = 'puppy_list'][{puppy_number}]//div[@class='details']/h4[1]")
gender = self.browser.find_element_by_xpath(
f"//div[@class = 'puppy_list'][{puppy_number}]//div[@class='details']/h4[2]")
view_details = self.browser.find_element_by_xpath(
f"//div[@class = 'puppy_list'][{puppy_number}]//div[@class='view']//input")
puppy = PuppyBasicDetails(picture=picture.get_attribute(name="src"), name=name.text, breed=breed.text,
gender=gender.text, view_details_btn=view_details)
return puppy
| true |
6aec265e4d04b5e5d5bfd029a3c8e052d0194f4a | Python | sighthnd/PUI2016_ss4977 | /HW2_ss4977/show_bus_locations_ss4977.py | UTF-8 | 2,688 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
## Python 3 print function
from __future__ import print_function
import os
import os.path
import sys
import re
import urllib2
# Prepare to convert the response from the MTA server to a dictionary
import ast
import json
mtakey = sys.argv[1]
busroute = sys.argv[2]
if re.match(r"[\d\-a-f]{20,}", mtakey):
# Argument is given in form of MTA API key
pass
else:
# Argument is a file name which contains the key
if os.path.isfile(mtakey):
# Check that the file exists, otherwise report its absence and exit
if os.access(mtakey, os.R_OK):
# Similarly if the file exists but is not readable
fh = open(mtakey, "r")
fstr = fh.readline()
fstr = re.sub(r'[\r\n]+', "", fstr)
if re.match(r"[\d\-a-f]{20,}$", fstr):
mtakey = fstr
else:
print(mtakey + " does not contain a valid key\n")
sys.exit()
else:
print("Could not open file " + mtakey + "\n")
sys.exit()
else:
print(mtakey + " is not an API key and is not a file\n")
sys.exit()
site = "http://bustime.mta.info/api/siri/vehicle-monitoring.json"
daturl = site + "?key=" + mtakey + "&LineRef=" + busroute
# Get the data from the server
try:
fh = urllib2.urlopen(daturl)
# If this works, just read what it returns
datstr = fh.readline()
except urllib2.URLError:
# A possible source of an error is trying to use a proxy
# This routine from https://www.decalage.info/en/python/urllib2noproxy
# bypasses the proxy
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
req = urllib2.Request(daturl)
r = opener.open(req)
datstr = r.read()
# Converts response from the server into dictionary
datstruct = json.loads(datstr)
# Traverse the data structure to the list of active buses
substruct = datstruct["Siri"]["ServiceDelivery"]["VehicleMonitoringDelivery"]
vehlist = substruct[0]["VehicleActivity"]
dests = {}
numbuses = 0
print("Bus Line : " + busroute + "\nNumber of Active Buses : " +
str(len(vehlist)))
for i in range(len(vehlist)):
numbuses += 1
newBus = vehlist[i]["MonitoredVehicleJourney"]
if newBus["DestinationName"] in dests.keys():
dests[newBus["DestinationName"]] += 1
else:
dests[newBus["DestinationName"]] = 1
print("Bus " + str(i) + " is at latitude " +
str(newBus["VehicleLocation"]["Latitude"]) + " and longitude " +
str(newBus["VehicleLocation"]["Longitude"]))
for busDest in dests.keys():
print("Number of Active Buses to " + busDest + " : " + str(dests[busDest]))
| true |
9668d78ea440dc3c71b7f7d47f6fd0de4f598283 | Python | klaudiaplk/pszt_neural_network | /pszt_neural_network/loader.py | UTF-8 | 1,321 | 2.953125 | 3 | [] | no_license | from pszt_neural_network.sample_data import Sample_data
class Loader:
def __init__(self, filename):
"""Load data from a file.
:param filename: file path
"""
self.filename = filename
self.loadedData = []
self.dataSize = 0
def load(self):
"""Load data from a file.
:return: list of training data
"""
loadedSamplesData = []
sampleDataStrings = []
with open(self.filename) as fp:
while True:
attributes = []
line = fp.readline().strip()
if not line:
break
self.dataSize += 1
sampleDataStrings = line.split(',')
diagnosis = float(sampleDataStrings[-1])
for i in range(len(sampleDataStrings) - 1):
attributes.append(float(sampleDataStrings[i]))
loadedSamplesData.append(Sample_data(diagnosis, attributes))
self.loadedData = loadedSamplesData
return loadedSamplesData
def get_data_size(self):
"""Get number of individual input training data vectors
:return: number of individual input training data vectors
"""
return self.dataSize
| true |
0ec400641cbd00313d42733b3fc77647384bb892 | Python | baracus77/stepik-auto-tests-course | /2.2.3.py | UTF-8 | 544 | 3.09375 | 3 | [] | no_license | from selenium import webdriver
import time
try:
link = "http://suninjuly.github.io/selects1.html"
browser = webdriver.Chrome()
browser.get(link)
x = browser.find_element_by_id("num1").text
y = browser.find_element_by_id("num2").text
z = str(int(x) + int(y))
print(z)
select = browser.find_element_by_tag_name("select").click()
value = browser.find_element_by_css_selector(f"[value = '{z}']").click()
button = browser.find_element_by_tag_name("button").click()
finally:
time(5)
browser.quit()
| true |
599a3fd4a3b9f446858dda081ab0346c908fb421 | Python | pflun/advancedAlgorithms | /validWordSquare.py | UTF-8 | 466 | 3.421875 | 3 | [] | no_license | class Solution(object):
def validWordSquare(self, words):
if len(words) != len(words[0]):
return False
for j in range(len(words)):
for i in range(len(words[0])):
if words[j][i] != words[i][j]:
return False
return True
test = Solution()
print test.validWordSquare([
"abcd",
"bnrt",
"crmy",
"dtye"
])
print test.validWordSquare([
"ball",
"area",
"read",
"lady"
]) | true |
760c7dec52e4d478b15a824073354fd82aec9ce9 | Python | NeilWangziyu/Leetcode_py | /minimumSemesters.py | UTF-8 | 3,473 | 3.140625 | 3 | [] | no_license | from typing import List
import collections
class weightedEdge():
def __init__(self, start, end, weight):
self.start = start
self.end = end
self.weight = weight
def __lt__(self, other):
return self.weight < other.weight
def __eq__(self, other):
return self.weight == other.weight
class Solution:
def minimumSemesters(self, N: int, relations: List[List[int]]) -> int:
pass
# matrix = {}
# for i in range(N):
# matrix[i] = []
# for each in relations:
# matrix[each[0]-1].append(each[1]-1)
#
# def DFS_circle(index):
# onStack[index] = True
# marked[index] = True
# print(index, onStack, marked)
# for w in matrix[index]:
# if onStack[w]:
# res_Tem = []
# x = index
# while (x != w):
# res_Tem.append(x)
# x = edgeTo[x]
# res_Tem.append(w)
# res_Tem.append(index)
# cycle.append(res_Tem)
# else:
# edgeTo[w] = index
# DFS_circle(w)
# onStack[index] = False
#
#
# marked = [False for _ in range(N)]
# edgeTo = [-1 for _ in range(N)]
# cycle = []
# onStack = [False for _ in range(N)]
#
# for i in range(N):
# if marked[i] == False:
# print(i)
# DFS_circle(i)
#
# if len(cycle)==0:
# return -1
#
# reversePost = [] # stack
# marked = [False for _ in range(N)]
#
# def DFS_firstOrder(v):
# marked[v] = True
# for each_edge in matrix[v]:
# w = each_edge[0]
# if marked[w] == False:
# DFS_firstOrder(w)
# reversePost.insert(0, v)
#
# for index in range(N):
# if marked[index] == False:
# DFS_firstOrder(index)
# # top
# print(reversePost)
def minimumSemesters2(self, N: int, relations: List[List[int]]) -> int:
indegree = [0] * (N + 1)
graph = collections.defaultdict(set)
for item in relations:
indegree[item[1]] += 1
graph[item[0]].add(item[1])
used = set()
step = 1
deq = collections.deque()
for i in range(1, len(indegree)):
if indegree[i] == 0:
deq.append(i)
used.add(i)
if len(used) == N:
return 1
while deq:
cnt = 0
size = len(deq)
step += 1
while cnt < size:
front = deq.popleft()
for adj in graph[front]:
if adj not in used:
indegree[adj] -= 1
if indegree[adj] == 0:
deq.append(adj)
used.add(adj)
cnt += 1
if len(used) == N:
return step
return -1
s = Solution()
N = 3
relations = [[1,3],[2,3]]
print(s.minimumSemesters(N, relations))
print(s.minimumSemesters2(N, relations))
N = 3
relations = [[1,2],[2,3],[3,1]]
print(s.minimumSemesters(N, relations))
print(s.minimumSemesters2(N, relations))
| true |
87cb09019854f3aa329013e643208a07f08e98f2 | Python | JeremieHuteau/adria_internship | /src/tests.py | UTF-8 | 15,048 | 2.890625 | 3 | [] | no_license | import math
import random
import time
import torch
def rel_close(a, b, threshold):
if b > a:
a, b = b, a
a += 1e-9
b += 1e-9
return ((a/b - 1) < threshold) or (a < 1e-8)
def constant_tuple_numels(c1, c2):
return c1, c2
def random_tuple_numels(high):
tn1 = random.randint(1, high)
tn2 = random.randint(1, high)
return tn1, tn2
def make_positive_pairs(num_tuples, num_positives, max_positives):
if num_positives == 'random':
tuple_numels = lambda: random_tuple_numels(max_positives)
else:
tuple_numels = lambda: constant_tuple_numels(*num_positives)
positive_pairs = []
numel_1 = 0
numel_2 = 0
for tuple_idx in range(num_tuples):
tuple_numel_1, tuple_numel_2 = tuple_numels()
for i1 in range(tuple_numel_1):
for i2 in range(tuple_numel_2):
pair = [numel_1+i1, numel_2+i2]
positive_pairs.append(pair)
numel_1 += tuple_numel_1
numel_2 += tuple_numel_2
permutation = list(range(numel_2))
random.shuffle(permutation)
positive_pairs = list(map(
lambda p: (p[0], permutation[p[1]]),
positive_pairs))
positive_pairs = torch.tensor(positive_pairs, dtype=torch.long)
return positive_pairs, (numel_1, numel_2)
def test_naive_hardest_triplet_margin_loss():
import triplet_margin_loss as tml
scores = torch.tensor([
[-0.1, 0.2, -0.6, -0.5],
[ 0.9, 0.4, -0.1, 0.4],
[-0.2, -0.3, 0.3, -0.0],
[ 0.7, -0.5, -0.5, 0.5],
])
positive_indices = torch.tensor([
[0, 0],
[1, 3],
[2, 2],
[3, 1],
])
targets = torch.tensor([
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
])
margin = 0.2
row_violations = torch.tensor([
[ 0.0, 0.5, -0.0, -0.0],
[ 0.7, 0.2, -0.0, 0.0],
[-0.0, -0.0, 0.0, -0.0],
[ 1.4, 0.0, 0.2, 1.2],
])
col_violations = torch.tensor([
[ 0.0, 0.7, -0.9, -0.9],
[ 1.0, 0.9, -0.4, 0.0],
[-0.1, 0.2, 0.0, -0.4],
[ 0.8, 0.0, -0.8, 0.1],
])
row_losses_sum = torch.tensor([
(0.5 + 0.0 + 0.0) / 3,
(0.7 + 0.2 + 0.0) / 3,
(0.0 + 0.0 + 0.0) / 3,
(1.4 + 0.2 + 1.2) / 3,
])
row_losses_half = torch.tensor([
(0.5 + 0.0) / 2,
(0.7 + 0.2) / 2,
(0.0 + 0.0) / 2,
(1.4 + 1.2) / 2,
])
row_losses_max = torch.tensor([
(0.5) / 1,
(0.7) / 1,
(0.0) / 1,
(1.4) / 1,
])
col_violations = torch.tensor([
[ 0.0, 0.9, -0.0, -0.0],
[ 1.2, 1.1, -0.0, 0.0],
[ 0.1, 0.4, 0.0, -0.0],
[ 1.0, 0.0, -0.0, 0.3],
])
col_losses_sum = torch.tensor([
(1.2 + 1.0 + 0.1) / 3,
(1.1 + 0.9 + 0.4) / 3,
(0.0 + 0.0 + 0.0) / 3,
(0.3 + 0.0 + 0.0) / 3,
])
col_losses_half = torch.tensor([
(1.2 + 1.0) / 2,
(1.1 + 0.9) / 2,
(0.0 + 0.0) / 2,
(0.3 + 0.0) / 2,
])
col_losses_max = torch.tensor([
(1.2) / 1,
(1.1) / 1,
(0.0) / 1,
(0.3) / 1,
])
start_idx, end_idx = 0, scores.size(0)+1
hardest_fraction_losses = {
1.0: (row_losses_sum, col_losses_sum),
0.5: (row_losses_half, col_losses_half),
0.0: (row_losses_max, col_losses_max),
}
scores = scores[start_idx:end_idx]
targets = targets[start_idx:end_idx]
for hardest_fraction in hardest_fraction_losses:
row_losses, col_losses = hardest_fraction_losses[hardest_fraction]
hard_coded_row_loss = row_losses.mean()/margin
hard_coded_col_loss = col_losses.mean()/margin
naive_row_loss = tml.naive_hardest_fraction_triplet_margin_loss(
scores, targets, margin, hardest_fraction)
naive_col_loss = tml.naive_hardest_fraction_triplet_margin_loss(
scores.t(), targets.t(), margin, hardest_fraction)
assert hard_coded_row_loss == naive_row_loss
assert hard_coded_col_loss == naive_col_loss
def test_triplet_margin_losses():
import triplet_margin_loss as tml
import retrieval_metrics
def sparse_loss_fn(scores, targets, indices, margin, hardest_fraction):
def loss_fn(preds, targets):
return tml.anchor_hardest_fraction_triplet_margin_loss(
preds, targets, margin, hardest_fraction)
loss = retrieval_metrics.apply_retrieval_metric(
scores.reshape(-1), targets.reshape(-1), indices, loss_fn).mean()
return loss
seed = 0
random.seed(seed)
torch.manual_seed(seed)
num_tuples = 32
num_trials = 30
max_positives = 5
functions = {
'naive' : lambda p,t,i,m,h:
tml.naive_hardest_fraction_triplet_margin_loss(p,t,m,h),
'anchor' : lambda p,t,i,m,h:
sparse_loss_fn(p,t,i,m,h),
'chunked' : lambda p,t,i,m,h:
tml.chunked_hardest_fraction_triplet_margin_loss(p,t,m,h),
'vectorized': lambda p,t,i,m,h:
tml.vectorized_hardest_fraction_triplet_margin_loss(p,t,m,h),
'single' : lambda p,t,i,m,h:
tml.single_hardest_fraction_triplet_margin_loss(p,t,m,h),
'constant' : lambda p,t,i,m,h:
tml.constant_hardest_fraction_triplet_margin_loss(p,t,m,h),
'hardest' : lambda p,t,i,m,h:
tml.hardest_triplet_margin_loss(p,t,m),
}
#(Single|Constant|Multiple)Positives_Hardest[Fraction]
group_parameters = {
'SP_HF': ('single', 'random'),
'CP_HF': ('constant', 'random'),
'MP_HF': ('random', 'random'),
'MP_H' : ('random', 0.0)
}
group_functions = {
'SP_HF': ['naive', 'anchor', 'chunked', 'vectorized', 'single', 'constant'],
'CP_HF': ['naive', 'anchor', 'chunked', 'vectorized', 'constant'],
'MP_HF': ['naive', 'anchor', 'chunked', 'vectorized'],
'MP_H' : ['naive', 'anchor', 'chunked', 'vectorized', 'hardest'],
}
error_threshold = 1e-6
for group_name in group_parameters:
print(f"Starting {group_name}.")
num_positives_param, hardest_fraction_param = group_parameters[group_name]
if num_positives_param == 'single':
num_positives_fn = lambda: [1,1]
elif num_positives_param == 'constant':
num_positives_fn = lambda: list(random_tuple_numels(max_positives))
elif num_positives_param == 'random':
num_positives_fn = lambda: 'random'
if hardest_fraction_param == 'random':
hardest_fraction_fn = lambda: random.uniform(0,1)
else:
hardest_fraction_fn = lambda: hardest_fraction_param
function_names = group_functions[group_name]
function_times = {
function_name: 0.0
for function_name in function_names
}
for i in range(num_trials):
positive_pairs, preds_size = make_positive_pairs(
num_tuples,
num_positives_fn(),
max_positives)
preds = torch.distributions.uniform.Uniform(-1, 1).sample(preds_size)
targets, indices_1, indices_2 = retrieval_metrics.positive_sparse2dense(
positive_pairs, list(preds_size))
targets = targets.view(preds_size)
indices_2 = indices_2.view(preds_size).t().reshape(-1)
margin = random.uniform(0, 1)
hardest_fraction = hardest_fraction_fn()
function_values = {}
for function_name in function_names:
start_time = time.time()
value = functions[function_name](
preds, targets, indices_1, margin, hardest_fraction)
function_values[function_name] = value
function_times[function_name] += time.time() - start_time
reference_value = function_values['naive']
for function_name, function_value in function_values.items():
if not rel_close(reference_value, function_value, error_threshold):
print(f"{group_name}: n°{i}: 122: {function_name}: {reference_value} vs {function_value}")
function_values = {}
for function_name in function_names:
start_time = time.time()
value = functions[function_name](
preds.t(), targets.t(), indices_2, margin, hardest_fraction)
function_values[function_name] = value
function_times[function_name] += time.time() - start_time
reference_value = function_values['naive']
for function_name, function_value in function_values.items():
if not rel_close(reference_value, function_value, error_threshold):
print(f"{group_name}: n°{i}: 221: {function_name}: {reference_value} vs {function_value}")
min_time = min(function_times.values())
for function_name, function_time in function_times.items():
norm_time = function_time / min_time
print(f"{function_name}: {norm_time:.3f}")
def test_naive_recall_at_k():
import retrieval_metrics
scores = torch.tensor([
[-0.1, 0.2, -0.6, -0.5],
[ 0.9, 0.41, -0.1, 0.41],
[-0.2, -0.3, 0.3, -0.0],
[ 0.7, -0.52, -0.5, 0.5],
])
positive_indices = torch.tensor([
[0, 0],
[1, 3],
[2, 2],
[3, 1],
])
targets = torch.tensor([
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
])
row_recall_ranks = torch.tensor([
2,
3,
1,
4,
])
col_recall_ranks = torch.tensor([
3,
4,
1,
2
])
for k in range(1, 4):
hard_coded_recall = ((row_recall_ranks-1) < k).float()
hard_coded_recall = hard_coded_recall.mean()
naive_recall = retrieval_metrics.naive_recall_at_k(scores, targets, k)
naive_recall = sum(naive_recall) / len(naive_recall)
assert hard_coded_recall == naive_recall
for k in range(1, 4):
hard_coded_recall = ((col_recall_ranks-1) < k).float()
hard_coded_recall = hard_coded_recall.mean()
naive_recall = retrieval_metrics.naive_recall_at_k(scores.t(), targets.t(), k)
naive_recall = sum(naive_recall) / len(naive_recall)
assert hard_coded_recall == naive_recall
def test_recall_at_k():
import triplet_margin_loss as tml
import retrieval_metrics as rm
def sparse_recall_fn(scores, targets, indices, k):
def loss_fn(preds, targets):
return rm.anchor_recall_at_k(
preds, targets, k)
loss = rm.apply_retrieval_metric(
scores.reshape(-1), targets.reshape(-1), indices, loss_fn).mean()
return loss
def vectorized2(scores, targets, k):
ranks = rm.target_ranks(scores, targets)
counts = rm.target_counts(targets)
return rm.recalls_at_k(k, ranks, counts)
seed = 0
random.seed(seed)
torch.manual_seed(seed)
num_tuples = 32
num_trials = 30
max_positives = 5
functions = {
'naive': lambda p,t,i,k:
torch.mean(torch.tensor(rm.naive_recall_at_k(p,t,k))),
'anchor': lambda p,t,i,k:
torch.mean(sparse_recall_fn(p,t,i,k)),
'vectorized': lambda p,t,i,k:
torch.mean(rm.recall_at_k(p,t,k)),
'vectorized2': lambda p,t,i,k:
torch.mean(vectorized2(p, t, k))
}
#(Single|Constant|Multiple)Positives_Hardest[Fraction]
group_parameters = {
'SP': ('single', 'random'),
'MP': ('random', 'random'),
}
group_functions = {
'SP': ['naive', 'anchor', 'vectorized', 'vectorized2'],
'MP': ['naive', 'anchor', 'vectorized', 'vectorized2'],
}
error_threshold = 1e-6
for group_name in group_parameters:
print(f"Starting {group_name}.")
num_positives_param, k_param = group_parameters[group_name]
if num_positives_param == 'single':
num_positives_fn = lambda: [1,1]
elif num_positives_param == 'constant':
num_positives_fn = lambda: list(random_tuple_numels(max_positives))
elif num_positives_param == 'random':
num_positives_fn = lambda: 'random'
if k_param == 'random':
k_fn = lambda: random.randint(1,max_positives+2)
else:
k_fn = lambda: k_param
function_names = group_functions[group_name]
function_times = {
function_name: 0.0
for function_name in function_names
}
for i in range(num_trials):
positive_pairs, preds_size = make_positive_pairs(
num_tuples,
num_positives_fn(),
max_positives)
preds = torch.distributions.uniform.Uniform(-1, 1).sample(preds_size)
targets, indices_1, indices_2 = rm.positive_sparse2dense(
positive_pairs, list(preds_size))
targets = targets.view(preds_size)
indices_2 = indices_2.view(preds_size).t().reshape(-1)
k = k_fn()
function_values = {}
for function_name in function_names:
start_time = time.time()
value = functions[function_name](
preds, targets, indices_1, k)
function_values[function_name] = value
function_times[function_name] += time.time() - start_time
reference_value = function_values['naive']
for function_name, function_value in function_values.items():
if not rel_close(reference_value, function_value, error_threshold):
print(f"{group_name}: n°{i}: 122: {function_name}: {reference_value} vs {function_value}")
function_values = {}
for function_name in function_names:
start_time = time.time()
value = functions[function_name](
preds.t(), targets.t(), indices_2, k)
function_values[function_name] = value
function_times[function_name] += time.time() - start_time
reference_value = function_values['naive']
for function_name, function_value in function_values.items():
if not rel_close(reference_value, function_value, error_threshold):
print(f"{group_name}: n°{i}: 221: {function_name}: {reference_value} vs {function_value}")
min_time = min(function_times.values())
for function_name, function_time in function_times.items():
norm_time = function_time / min_time
print(f"{function_name}: {norm_time:.3f}")
if __name__ == '__main__':
#test_naive_hardest_triplet_margin_loss()
#test_triplet_margin_losses()
test_naive_recall_at_k()
test_recall_at_k()
| true |
a31fd91f627cc695eea53a429ebc4052693d269b | Python | pitanshu/python_programs | /lists_program/multiplication_of_all_element.py | UTF-8 | 153 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
list=[1,2,3,4]
result=1
for i in list:
result=result*i
print(result) | true |
0d03cc9771282a81885285d0c80972bf51f2e623 | Python | PavanKrishnaGadde/MachineLearning | /Clustering/kmeans_pk.py | UTF-8 | 1,295 | 3.171875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 23:19:10 2020
@author: pavankrg
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importing dataset
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:,[3,4]].values
# elbow method to find optimum number of clusters
from sklearn.cluster import KMeans
wcss = []
for i in range(1,11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11), wcss)
plt.title('The Elbow Method')
plt.xlabel('no of clusters')
plt.ylabel('wcss')
#Applying K-means
kmeans = KMeans(n_clusters=5, init='k-means++', max_iter = 300, n_init=10, random_state=0)
Y_pred = kmeans.fit_predict(X)
#data visualization
plt.scatter(X[Y_pred == 0,0], X[Y_pred == 0, 1], s = 100, c = 'red', label ='cluster1')
plt.scatter(X[Y_pred == 1,0], X[Y_pred == 1, 1], s = 100, c = 'cyan', label ='cluster2')
plt.scatter(X[Y_pred == 2,0], X[Y_pred == 2, 1], s = 100, c = 'magenta', label ='cluster3')
plt.scatter(X[Y_pred == 3,0], X[Y_pred == 3, 1], s = 100, c = 'yellow', label ='cluster4')
plt.scatter(X[Y_pred == 4,0], X[Y_pred == 4, 1], s = 100, c = 'black', label ='cluster5')
| true |
8292e14408bfd718ed75956675ccd9a08d486869 | Python | maxmyth01/Unit6 | /longestDictionaryWord.py | UTF-8 | 271 | 2.9375 | 3 | [] | no_license | #Max Low
#12-6-17
#longestDictionaryWord.py
dictionary = open('engmix.txt')
wordSize = 0
for word in dictionary:
if len(word) > wordSize:
wordSize = len(word)
for word in dictionary:
if wordSize == len(word):
print(word)
print(wordSize)
| true |
9499add4b69a0a50fef00388d211c284f0eeba51 | Python | vaishnavi-1/python | /if else.py | UTF-8 | 192 | 3.734375 | 4 | [] | no_license | n = int(input("enter the desired number "))
if (n > 0) :
print("the number is positive")
elif (n < 0):
print("the number is negative")
else :
print("invalid input")
| true |
8e7b6413f8f41ef354e527262509947d012f4a92 | Python | stshf/Encryption | /classical-cipher/caeser-cipher/enc.py | UTF-8 | 875 | 3.65625 | 4 | [] | no_license | def Enc(m, n):
"""
=== input ===
m(string): plain text
n(int) : key to shift (1 ~ 25)
=== return ===
c(string): cipher text
"""
c = ""
for m_ in m:
ascii_m_ = ord(m_)
if ord('a') <= ascii_m_ <= ord('z'):
c += chr((ord(m_) - ord('a') + n) % 26 + ord('a'))
elif ord('A') <= ascii_m_ <= ord('Z'):
c += chr((ord(m_) - ord('A') + n) % 26 + ord('A'))
else:
c += m_
return c
def main():
print("input plain text (Alpahbet)")
Plain_text_list = list(input().split())
print("")
print("input key")
key = int(input())
print("")
Cipher_text_list = []
for Plain_text in Plain_text_list:
Cipher_text_list.append(Enc(Plain_text, key))
print("Chiper text")
print(" ".join(Cipher_text_list))
if __name__ == "__main__":
main()
| true |
0838edc53a4ae28311a552db0a7adb356729aa13 | Python | prateekrk/DSA_C_Python | /LinkedList/doublylinkedlist.py | UTF-8 | 1,118 | 3.46875 | 3 | [] | no_license | class Node:
def __init__(self,data):
self.data=data
self.next=None
self.prev=None
class DLL:
def __init__(self):
self.head=None
def printElements(self):
current=self.head
while(current):
print(current.data)
current=current.next
def listLength(self):
current=self.head
count=0
while(current):
count+=1
current=current.next
return count
def insert(self,data,position):
k=1
current=self.head
newNode=Node(data)
if(position==1):
newNode.next=current
newNode.prev=None
self.head=newNode
return
while(current.next!=None and k<(position-1)):
k+=1
current=current.next
newNode.next=current.next
newNode.prev=current
if(current.next):
current.next.prev=newNode
current.next=newNode
if __name__ =="__main__":
dll=DLL()
dll.insert(5,1)
dll.insert(2,1)
dll.insert(3,2)
dll.printElements()
| true |
5bccdbc1c4fcb130ca93a80b973a87322ae0f374 | Python | arvidarvidarvid/adventofcode | /2017/day02/day.py | UTF-8 | 645 | 2.859375 | 3 | [] | no_license | import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def get_input():
with open('day.input', 'r') as file:
return file.readlines()
def main():
input = get_input()
grid = [list(map(lambda x: int(x), line.split())) for line in input]
checksum_1 = sum([max(line) - min(line) for line in grid])
logger.info('Result 1: %s' % checksum_1)
checksum_2 = sum([
int(nom / den) for line in grid for i, den in enumerate(sorted(line))
for nom in sorted(line)[i + 1:] if nom % den == 0])
logger.info('Result 2: %s' % checksum_2)
if __name__ == '__main__':
main()
| true |
b805e7df3da3d5c52465ea8284927f615ebc58f5 | Python | Sun-Joong/aifh | /vol1/python-examples/test/aifh/test_error.py | UTF-8 | 2,256 | 2.71875 | 3 | [
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | """
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import os
import sys
import unittest
import numpy as np
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from error import ErrorCalculation
class TestError(unittest.TestCase):
IDEAL = [
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0],
[17.0, 18.0, 19.0, 20.0]
]
ACTUAL = [
[1.1, -2.0, -3.0, 4.1],
[-5.1, -6.0, 7.1, 8.2],
[9.1, 10.2, -11.5, 12.1],
[13.0, -14.0, 15.0, 16.1],
[17.0, 18.0, -19.0, 20.1]
]
def test_rss(self):
actual = np.array(TestError.ACTUAL)
ideal = np.array(TestError.IDEAL)
self.assertAlmostEqual(ErrorCalculation.rss(actual, ideal), 3032.4099, 3)
def test_rms(self):
actual = np.array(TestError.ACTUAL)
ideal = np.array(TestError.IDEAL)
self.assertAlmostEqual(ErrorCalculation.rms(actual, ideal), 12.3134, 3)
def test_mse(self):
actual = np.array(TestError.ACTUAL)
ideal = np.array(TestError.IDEAL)
self.assertAlmostEqual(ErrorCalculation.mse(actual, ideal), 151.6205, 3) | true |
88722dd821a8aa7f90c5e167d483e616bf8d904a | Python | theahura/ProjectGenesis | /source/Author/Story/Clusters/Setting.py | UTF-8 | 760 | 2.640625 | 3 | [
"MIT"
] | permissive | """
@author: Amol Kapoor
@date: 5-25-16
Description: Contains the class structure for the setting class. Sets out params for environmental actions and structures for
object/character location
"""
from source.Author.Clusters.Cluster import Cluster
class Setting(Cluster):
def __init__(self, name, characters=None, settings=None, things=None, descriptors=None):
"""
See super for init param definition if not listed.
"""
super(Character, self).__init__(name, "SETTING", characters, settings, things, descriptors)
self.contains = things
def add_cluster_to_contains(cluster):
if cluster.type == 'SETTING':
raise TypeError('Setting cannot contain a setting')
self.contains.add(cluster)
| true |
525cca3f610da790958b5ffcb4b3a10fd69ffa7e | Python | CianFD/RPiSensorsNoiseDetectionSystem | /submission folder/code/storeFileFB.py | UTF-8 | 1,404 | 2.84375 | 3 | [] | no_license | import firebase_admin #imports firebase_admin package
from firebase_admin import credentials, firestore, storage, db #imports credentials, firestore, storage and db sections of firebase_admin package
import os #imports os package
cred=credentials.Certificate('./serviceAccountKey.json') #creates cred value which is defined as credentials pulled from the serviceAccountKey.json file
firebase_admin.initialize_app(cred, {
'storageBucket': 'dogbarkalertdatabase.appspot.com',
'databaseURL': 'https://dogbarkalertdatabase-default-rtdb.firebaseio.com/'
}) #initialises firebase using credentials above and setting the storageBucket and databaseURL as the values taken from the firebase account
bucket = storage.bucket() #default bucket values as per firebase storage API set up
ref = db.reference('/')
home_ref = ref.child('file')
def store_file(fileLoc): #creates store_file method which takes the images captured from camera and stores them in the Storage Section of Firebase
filename=os.path.basename(fileLoc)
# Store File in FB Bucket
blob = bucket.blob(filename)
outfile=fileLoc
blob.upload_from_filename(outfile)
def push_db(fileLoc, time): #creates push_db method which takes the image captured and timestamp and pushes them to the firebase realtime database
filename=os.path.basename(fileLoc)
home_ref.push({
'image': filename,
'timestamp': time}
)
| true |
809e33634026e21468de52b5871e2eddabc2375f | Python | harshagrawal523/TextBoat | /plag_check.py | UTF-8 | 759 | 2.796875 | 3 | [] | no_license |
# print("jai shri krishna")
# pip install requests
# pip install bs4
# pip install html5lib
import sys
from difflib import SequenceMatcher
import requests
from bs4 import BeautifulSoup
url = sys.argv[2].split(",")
x = sys.argv[1]
ans = []
for u in url:
r = requests.get(u)
htmlContent = r.content
soup = BeautifulSoup(htmlContent,'html.parser')
paras = soup.find_all('p')
s = " "
ma = 0
for i in paras:
s = s + i.get_text()
tma = SequenceMatcher(None,i.get_text().lower(),x.lower()).ratio()
if(tma>ma):
ma=tma
ttma = SequenceMatcher(None,s.lower(),x.lower()).ratio()
if(ttma>ma):
ma=ttma
ans.append(u+" - "+str(ma*100) + "}")
ii = 0
for y in ans:
print(y)
| true |
d694280fb1347480880fb15cff031293a3dc62e6 | Python | douglascodes/conway-python | /main_test.py | UTF-8 | 5,856 | 2.5625 | 3 | [] | no_license | import unittest
from main import Dish
from main import Environment
import pygame
import main
from test import test_support
class TestWorld(unittest.TestCase):
def setUp(self):
self.d = Dish()
self.c = self.d.cells
self.p = self.d.potentials
def tearDown(self):
self.d = None
self.c = None
self.p = None
def test_dish_creation(self):
self.assertIsInstance(self.d, Dish, "Is not a Dish instance")
def test_dish_cells_empty(self):
self.assert_(not self.d.cells, "Is not an empty set.")
def test_dish_cells_addition(self):
self.d.cells.add("s")
self.assert_(self.d.cells, "Is an empty set.")
def test_no_duplication(self):
self.d.cells.add("s")
self.d.cells.add("r")
self.c_len = len(self.d.cells)
self.d.cells.add("s")
self.assertEqual(self.c_len, len(self.d.cells), "Error in length check.")
def test_cells_removal(self):
self.c.add("a")
self.c_len = len(self.c)
self.c.remove("a")
self.assertNotEqual(self.c_len, len(self.c), "Error in length check.")
def test_create_dish(self):
self.limit = 100
self.desired = self.limit * self.limit
self.spawn_list = self.d.spawn(self.desired, self.limit)
self.spawn_count = len(self.spawn_list)
self.assertEqual(self.spawn_count, self.desired, "Did not return same created # as sent.")
def test_dish_spawn_limit(self):
self.limit = 100
self.desired = self.limit * self.limit
self.assertRaises(main.TooManyExpected, self.d.spawn, self.desired+1, self.limit)
def test_gets_all_nine_possible(self):
self.test_set = set()
self.test_set.add((5,5))
self.test_set = self.d.create_potentials( self.test_set )
self.assertEqual(9, len(self.test_set), "Set should have 9 elements.")
def test_neighbor_count_eight(self):
self.test_set = set()
self.test_set.add((5,5))
self.test_set = self.d.create_potentials( self.test_set )
self.assertEqual(8, self.d.count_neighbors( (5,5), self.test_set), "Neighbors should be 8")
def test_neighbor_count_three(self):
self.test_set = set()
self.test_set.add((5,5))
self.test_set = self.d.create_potentials( self.test_set )
self.assertEqual(3, self.d.count_neighbors( (5,3), self.test_set), "Neighbors should be 3")
def test_neighbor_count_five(self):
self.test_set = set()
self.test_set.add((5,5))
self.test_set = self.d.create_potentials( self.test_set )
self.assertEqual(5, self.d.count_neighbors( (4, 5), self.test_set), "Neighbors should be 5")
def test_neighbor_count_one(self):
self.test_set = set()
self.test_set.add((5,5))
self.test_set = self.d.create_potentials( self.test_set )
self.assertEqual(1, self.d.count_neighbors( (3, 3), self.test_set), "Neighbors should be 1")
def test_expected_gen(self):
self.test_set = set()
self.result_expected = set()
self.result_expected.add((6,6))
self.result_expected.add((5,6))
self.result_expected.add((4,6))
self.test_set.add((5,5))
self.test_set.add((5,6))
self.test_set.add((5,7))
self.assertNotEqual(self.test_set, self.result_expected, "Sets should not equal to start.")
self.pot_set = self.d.create_potentials(self.test_set)
self.result_set = self.d.determine_next_gen(self.test_set, self.pot_set)
self.assertEqual(self.result_set, self.result_expected, "Sets should be same at end.")
def test_generation_next(self):
self.d.cells = self.d.spawn(100, 20)
self.d.potentials = self.d.create_potentials(self.d.cells)
self.next_gen = self.d.determine_next_gen(self.d.cells, self.d.potentials)
self.assertTrue(self.d.potentials >= self.d.cells, "Not a subset.")
self.assertTrue(self.d.potentials >= self.d.next_gen, "Should give same result with repeated runs.")
class TestEnvironment(unittest.TestCase):
def setUp(self):
self.e = Environment()
def tearDown(self):
self.e = None
def test_env_creation(self):
self.assertGreater(self.e.start_count, 0, "Env start count is <= 0")
def test_color_difference(self):
self.assertNotEqual(self.e.cell_color, self.e.black, "Cells and background should not be same color.")
class TestDrawingMachine(unittest.TestCase):
def setUp(self):
self.env = Environment(10)
self.color = main.env.cell_color
self.px_array = main.px_array
self.p_test = set()
self.color = self.env.cell_color
self.d = main.thedish
def tearDown(self):
self.env = None
def test_pxarray_assignment(self):
self.px_array[5][5] = self.color
c = self.px_array[5][5]
self.assertEqual(c, main.env.screen.map_rgb(self.color), "Pixel doesn't match")
def test_drawing_all_cells(self):
main.env.screen.fill(main.env.black)
main.draw_pixels(self.d.cells)
pygame.display.flip()
for each_cell in iter(self.d.cells):
xC, yC = each_cell
c = main.px_array[xC][yC]
self.assertEqual(c, main.env.screen.map_rgb(self.color), "Pixels don't match")
def test_draw_box(self):
pass
def test_main():
test_support.run_unittest(TestWorld,
TestEnvironment,
TestDrawingMachine
)
if __name__ == '__main__':
test_main() | true |
a7b7dcd593ac3e185a8ed9edcc7d16ec70c3c2b4 | Python | italovarzone/Curso_Python_Guanabara | /ex029.py | UTF-8 | 408 | 3.546875 | 4 | [] | no_license | velCarro = float(input("Digite a velocidade que seu carro passou: "))
if velCarro > 80:
multa = (velCarro - 80) * 7.00
print("Cidadão, seu veículo passou da velocidade permitida do radar!")
print("==> Será aplicado uma multa de |R${:.2f}| em seu nome.".format(multa))
else:
print("Boa viagem cidadão, não ande mais de 80km nessa pista!")
print("==> Não foi aplicado nenhuma multa.") | true |
041b397e98b4ad7b993c95e9ecfe163e84f1c5d5 | Python | estudeplus/extracao | /extracao/api/apimanager.py | UTF-8 | 3,200 | 2.828125 | 3 | [
"MIT"
] | permissive | import pandas as pd
from django.shortcuts import redirect
from .models import Professor, Subject, Student
class ApiManager():
def __init__(self):
self._core_manager = None
self._data = None
def set_subject(self, core):
self._core_manager = core
def update(self):
Subject.objects.all().delete()
Professor.objects.all().delete()
Student.objects.all().delete()
self._data = self._core_manager.get_state()
self.load()
def load(self):
df = self.get_data()
df = df.fillna(0)
if(self.check_data(df.columns)):
# If correct, it's time to add the data to the models
for index, row in df.iterrows():
subject = self.check_subject(row)
self.save_professor(row, subject)
self.save_student(row, subject)
else:
# If not correct, notify the view that the uploaded document is not correct
pass
def save_student(self, row, subject):
try:
student = Student.objects.get(name=row['aluno'])
student.subject = subject
student.save()
except Student.DoesNotExist:
print(1)
print(row)
student = Student(
name=row['aluno'],
email=row['email do aluno'],
student_code=row['matrícula'],
ira=row['ira'],
grade=row['nota final'],
mention=row['menção'],
subject=subject
)
student.save()
def save_professor(self, row, subject):
# Get a existent professor, or create one
try:
professor = Professor.objects.get(name=row['professor'])
except Professor.DoesNotExist:
professor = Professor(
name=row['professor'],
email=row['email do professor'],
)
professor.save()
subject.professor = professor
subject.save()
def check_subject(self, row):
# Get a existent subject, or create one
try:
subject = Subject.objects.get(
code=row['código'],
)
except Subject.DoesNotExist:
subject = Subject(
code=row['código'],
name=row['disciplina'],
classcode=row['turma']
)
subject.save()
return subject
def get_data(self):
if(self._data.extension == 'xlsx'):
df = pd.read_excel(self._data.document)
else:
df = pd.read_csv(self._data.document, delimiter = ',')
df.columns = [x.lower() for x in df.columns]
return df
def check_data(self, columns):
indexes = ['código', 'disciplina', 'turma',
'professor', 'email do professor',
'aluno', 'matrícula', 'email do aluno',
'menção', 'ira', 'nota final']
## All string to lowecase
columns = [x.lower() for x in list(columns)]
if(set(indexes).issuperset(set(columns))):
return True
return False
| true |
5566507752ab24ea3a5384bea6d7c1e98964a7d9 | Python | oursupreamacy/md | /wordcounter.py | UTF-8 | 324 | 3.203125 | 3 | [] | no_license | m = input("Введите имя файла: ")
with open(m, 'r', encoding='utf-8') as f:
d = {}
s = f.read()
s = s.lower()
s = s.split()
for x in s:
if x not in d:
d[x] = 1
else:
d[x] += 1
l = lambda z: z[1]
print(sorted(d.items(), key=l, reverse=True))
| true |
951184d1a784ab261d1fe179cd5488b8dd8e200b | Python | dsp6414/Maml_Reptile_PyTorch | /main.py | UTF-8 | 2,844 | 2.546875 | 3 | [
"MIT"
] | permissive | import numpy as np
import random
import torch
from net import SineModel
from DataLoader import SineWaveTask
from tools import sine_fit1, plot_sine_test, plot_sine_learning, maml_sine, reptile_sine
import matplotlib.pyplot as plt
TRAIN_SIZE = 10000
TEST_SIZE = 1000
SINE_TRAIN = [SineWaveTask() for _ in range(TRAIN_SIZE)]
SINE_TEST = [SineWaveTask() for _ in range(TEST_SIZE)]
SINE_TRANSFER = SineModel()
def fit_transfer(epochs=1):
optim = torch.optim.Adam(SINE_TRANSFER.params())
for _ in range(epochs):
for t in random.sample(SINE_TRAIN, len(SINE_TRAIN)):
sine_fit1(SINE_TRANSFER, t, optim)
def main():
# Mean And Random Version #
ONE_SIDED_EXAMPLE = None
while ONE_SIDED_EXAMPLE is None:
cur = SineWaveTask()
x, _ = cur.training_set()
x = x.numpy()
if np.max(x) < 0 or np.min(x) > 0:
ONE_SIDED_EXAMPLE = cur
fit_transfer()
plot_sine_test(SINE_TRANSFER, SINE_TEST[0], fits=[0, 1, 10], lr=0.02)
plot_sine_learning(
[('Transfer', SINE_TRANSFER), ('Random', SineModel())],
list(range(100)),
marker='',
linestyle='-', SINE_TEST=SINE_TEST)
# MaML #
SINE_MAML = [SineModel() for _ in range(5)]
for m in SINE_MAML:
maml_sine(m, 4, SINE_TRAIN=SINE_TRAIN)
plot_sine_test(SINE_MAML[0], SINE_TEST[0], fits=[0, 1, 10], lr=0.01)
plt.show()
plot_sine_learning(
[('Transfer', SINE_TRANSFER), ('MAML', SINE_MAML[0]), ('Random', SineModel())],
list(range(10)),
SINE_TEST=SINE_TEST
)
plt.show()
plot_sine_test(SINE_MAML[0], ONE_SIDED_EXAMPLE, fits=[0, 1, 10], lr=0.01)
plt.show()
# First Order #
SINE_MAML_FIRST_ORDER = [SineModel() for _ in range(5)]
for m in SINE_MAML_FIRST_ORDER:
maml_sine(m, 4, first_order=True, SINE_TRAIN=SINE_TRAIN)
plot_sine_test(SINE_MAML_FIRST_ORDER[0], SINE_TEST[0], fits=[0, 1, 10], lr=0.01)
plt.show()
plot_sine_learning(
[('MAML', SINE_MAML), ('MAML First Order', SINE_MAML_FIRST_ORDER)],
list(range(10)),
SINE_TEST=SINE_TEST
)
plt.show()
plot_sine_test(SINE_MAML_FIRST_ORDER[0], ONE_SIDED_EXAMPLE, fits=[0, 1, 10], lr=0.01)
plt.show()
# Reptile #
SINE_REPTILE = [SineModel() for _ in range(5)]
for m in SINE_REPTILE:
reptile_sine(m, 4, k=3, batch_size=1, SINE_TRAIN=SINE_TRAIN)
plot_sine_test(SINE_REPTILE[0], SINE_TEST[0], fits=[0, 1, 10], lr=0.01)
plt.show()
plot_sine_learning(
[('MAML', SINE_MAML), ('MAML First Order', SINE_MAML_FIRST_ORDER), ('Reptile', SINE_REPTILE)],
list(range(32)),
SINE_TEST=SINE_TEST
)
plt.show()
plot_sine_test(SINE_REPTILE[0], ONE_SIDED_EXAMPLE, fits=[0, 1, 10], lr=0.01)
plt.show()
if __name__ == "__main__":
main()
| true |
b83130eaeb53d1b05dff16d28afb8bed0c04f5c8 | Python | FacuBenito/ia-uncuyo-2021 | /tp3-busquedas-no-informadas/code/node.py | UTF-8 | 525 | 2.96875 | 3 | [] | no_license | # Anonymous object so that I can easily get each icon
icons = type('',(object,),{
'goal': u"\u2705",
'start': u'\u26F2',
'obstacle': u'\u26D4',
'path': u'\u26AA',
'normal': u'\u26AB',
'explored': ' '
})()
# Frontera: nodos a los que me puedo mover pero no he explorado
# State: nodos a los que me puedo mover
class Node:
def __init__(self, nodeType, cost, stateY, stateX):
self.nodeType = nodeType
self.cost = cost
self.state = (stateY, stateX)
self.icon = getattr(icons, nodeType)
self.parent = None
| true |
651f1fc25eceffc832bfdfd267385b30f2b76bc0 | Python | Girish2823/Trivago_ETL | /Stacks.py | UTF-8 | 876 | 4.21875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 27 23:12:13 2018
@author: giris
"""
from sys import maxsize
def createstack():
stack = []
return stack
#Stack is empty
def isEmpty(stack):
return len(stack) == 0
#Function to add item on stack.It increases size by 1
def push(stack,item):
stack.append(item)
print(item + "Item pushed into stack")
#Fucntion to remove item from the stack. It decreases size by 1
def pop(stack):
if (isEmpty(stack)):
return str(-maxsize - 1) #returns minus infinite
return stack.pop()
#Driver program to test the algorithm
stack = createstack()
push(stack,str(10))
push(stack,str(20))
push(stack,str(30))
print(pop(stack) + "popped from stack")
print(pop(stack) + "popped from stack")
print(pop(stack) + "popped from stack")
print(pop(stack) + "popped from stack") | true |
0fcd50529aacff08e35894e25bc477e8e5e443f3 | Python | codergeek42/CPSC471_FinalProject | /ServerConnection.py | UTF-8 | 2,949 | 2.671875 | 3 | [] | no_license | #!/bin/python3 -tt
# vim:set ts=4:
################################################################################
# Name: Peter Gordon
# Email: peter.gordon@csu.fullerton.edu
# Course: CPSC 471, T/Th 11:30-12:45
# Instructor: Dr. M. Gofman
# Assignment: 3 (FTP Server/Client)
################################################################################
# Copyright (c) 2014 Peter Gordon <peter.gordon@csu.fullerton.edu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################################
"""This module provides the base class that any libserver-using connection
handler must inherit from. When a client connects, the server will create an
object of this type and pass it the connection (socket) and address
(IP/port tuple), then call its handleClientConnection function to process its
input."""
class ServerConnectionHandler:
"""Base class for use with libserver functionality. To implement a server,
subclass this and implement its handleClientConnection method (at least),
then pass that new type as the second argument to one of the listenForever
functions.."""
__slots__ = ("_connSock", "_clientAddr")
def __init__(self, connSock, clientAddr):
"""Constructor. Upon a client connecting, the object is created and
passed the client's socket and address as an (IP, port) tuple."""
self._connSock = connSock
self._clientAddr = clientAddr
def handleClientConnection(self):
"""This is called to process the client connection, and should
terminate only when the client is finished (however that might be
defined for the protocol)."""
raise NotImplementedError("Subclass must implement this abstract method.")
@property
def clientAddr(self):
return "{ip}:{port}".format(ip=self._clientAddr[0], port=self._clientAddr[1])
@clientAddr.setter
def clientAddr(self, val):
raise AttributeError("clientAddr is immutable.")
| true |
c1e6ba5aa5d129978fc9101358a3ced462b88218 | Python | Firkraag/algorithm | /knapsack_0_1.py | UTF-8 | 1,849 | 3.328125 | 3 | [] | no_license | from numpy import zeros
def knapsack_0_1(W, w, v):
n = len(w)
return knapsack_0_1_aux(W, w, v, 1, n)
def knapsack_0_1_aux(W, w, v, m, n):
"""
m: the first item
n: the last item
W: total weight
w: the list of weights of items
v: the list of values of items
"""
# if m > n, then we have scanned all the items
if m > n:
return 0
# if W <= 0, then we can't take any more items
if W <= 0:
return 0
# We have two choices: take item m or not take item m
if W < w[m - 1]:
return knapsack_0_1_aux(W, w, v, m + 1, n)
else:
return max(knapsack_0_1_aux(W - w[m - 1], w, v, m + 1, n) + v[m - 1], knapsack_0_1_aux(W, w, v, m + 1, n))
def knapsack_0_1_memoized(W, w, v):
m = 1
n = len(w)
value = zeros((W + 1, n + 1))
solution = zeros((W + 1, n + 1))
for i in range(W + 1):
for j in range(n + 1):
value[i, j] = float("-Inf")
knapsack_0_1_memoized_aux(W, w, v, m, n, value, solution)
return value, solution
def knapsack_0_1_memoized_aux(W, w, v, m, n, value, solution):
if m > n:
return 0
if value[W, m] >= 0:
return value[W, m]
if W < w[m - 1]:
value[W, m] = knapsack_0_1_memoized_aux(W, w, v, m + 1, n, value, solution)
solution[W, m] = 0
else:
s = knapsack_0_1_memoized_aux(W - w[m - 1], w, v, m + 1, n, value, solution) + v[m - 1]
t = knapsack_0_1_memoized_aux(W, w, v, m + 1, n, value, solution)
if s > t:
solution[W, m] = 1
value[W, m] = max(s, t)
return value[W, m]
def print_knapsack_solution(solution, w):
W = solution.shape[0] - 1
n = solution.shape[1] - 1
i = 1
while W != 0 and i <= n:
if solution[W, i] == 1:
print(i)
W = W - w[i - 1]
i = i + 1
| true |
7767ad2343ba5f5fbddf9b3a6b2a814968014109 | Python | seongbeenkim/Algorithm-python | /BOJ(Baekjoon Online Judge)/DP/1932_정수 삼각형(integer triangle).py | UTF-8 | 695 | 2.796875 | 3 | [] | no_license | #https://www.acmicpc.net/problem/1932
import sys
n = int(sys.stdin.readline())
d = [[0]*(n+1) for _ in range(n+1)]
a = [0]
for i in range(n):
a.append([0] + list(map(int,sys.stdin.readline().split())))
d[1][1] = a[1][1]
for i in range(2,n+1):
for j in range(1,i+1):
if d[i][j] == 0:
d[i][j] = a[i][j]
if j == 1:
d[i][j] = d[i-1][j] + a[i][j]
continue
elif j == i:
d[i][j] = d[i-1][j-1] + a[i][j]
continue
if d[i][j] < d[i-1][j-1] + a[i][j]:
d[i][j] = d[i-1][j-1] + a[i][j]
if d[i][j] < d[i-1][j] + a[i][j]:
d[i][j] = d[i-1][j] + a[i][j]
print(max(d[n]))
| true |
b4bca4487052472e7a7cf39fc695dada070c5f21 | Python | gabrielksneiva/Jogod_do_NIM | /Jogo do NIM.py | UTF-8 | 4,657 | 3.90625 | 4 | [] | no_license | def computador_escolhe_jogada(n, m):
jogada = 0
if m == 1:
return 1
else:
for i in range(1, m):
if (n-i) % (m+1) == 0:
jogada = i
break
if jogada != i:
return m
else:
return jogada
def usuario_escolhe_jogada(n,m):
x=True
while x == True:
jogada=int(input("Quantas peças você quer tirar? "))
if jogada > m or jogada <= 0:
print("Oops! Jogada inválida! Tente de novo.")
else:
x=False
return jogada
def partida():
z = True
while z == True:
n = int(input("Quantas peças? "))
m = int(input("Limite de peças por jogada? "))
if n < m:
print("Por favor, digite um número de peças maior que número máximo de jogadas!")
else:
z = False
if n % (m+1) == 0:
print("Voce começa!")
print("")
primeiro = 'Usuário'
else:
print("Computador começa!")
print("")
primeiro = 'Computador'
x = True
while x == True:
if n > 0:
i = 2
for i in (2,3):
if primeiro == 'Computador':
if i == 2 and n != 0:
jogada = computador_escolhe_jogada(n, m)
n = n - jogada
if jogada == 1:
print("O Computador tirou uma peça.")
print("")
else:
print(f"O Computador tirou {jogada} peças.")
print("")
jogador = 'Computador'
if i == 3 and n != 0:
jogada = usuario_escolhe_jogada(n, m)
n = n - jogada
if jogada == 1:
print("Você tirou uma peça.")
print("")
else:
print(f"Você tirou {jogada} peças.")
print("")
jogador = 'Usuário'
if primeiro == 'Usuário':
if i == 2 and n != 0:
jogada = usuario_escolhe_jogada(n, m)
n = n - jogada
if jogada == 1:
print("Você tirou uma peça.")
print("")
else:
print(f"Você tirou {jogada} peças.")
print("")
jogador = 'Usuário'
if i == 3 and n != 0:
jogada = computador_escolhe_jogada(n, m)
n = n - jogada
if jogada == 1:
print("O Computador tirou uma peça.")
print("")
else:
print(f"O Computador tirou {jogada} peças.")
print("")
jogador = 'Computador'
if n == 1:
print("Agora resta apaneas uma peça.")
print("")
elif n ==0:
print("")
else:
print(f"Agora restam {n} peças.")
print("")
else:
x=False
print(f"Fim de jogo o {jogador} ganhou!")
return jogador
def campeonato():
comp = 0
usu = 0
for i in range(3):
ganhador = 0
print(f"**** Rodada {i} ****")
ganhador = partida()
if ganhador == 'Computador':
comp = comp +1
else:
usu = usu +1
print("")
print("**** Final do campeonato! ****")
print("")
print(f"Placar: Você {usu} X {comp} Computador")
def main():
z = True
while z == True:
escolha=int(input("""
Olá, seja bem-vindo ao jogo do NIM!
Dentre as opções a baixo, escolha uma:
1 - para jogar uma partida isolada
2 - para jogar um campeonato
"""))
if escolha == 1:
print("Você escolheu jogar uma partida isolada!")
partida()
z=False
elif escolha == 2:
print("Você escolheu jogar um campeonato!")
campeonato()
z=False
else:
print("Opção inválida, tente novamente!")
main()
| true |
8770a159303e6ee0f20bcf0b77741289d625fb47 | Python | Peng-YM/pymoo | /pymoo/usage/usage_problem.py | UTF-8 | 4,912 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive |
# --------------------------------------------------------------------------------------------
# Single
# --------------------------------------------------------------------------------------------
# START ackley
import numpy as np
from pymoo.factory import get_problem
from pymoo.util.plotting import plot_problem_surface
problem = get_problem("ackley", n_var=2, a=20, b= 1 /5, c=2 * np.pi)
plot_problem_surface(problem, 100, plot_type="wireframe+contour")
# END ackley
# START rastrigin
from pymoo.factory import get_problem
from pymoo.util.plotting import plot_problem_surface
problem = get_problem("rastrigin", n_var=2)
plot_problem_surface(problem, 100, plot_type="wireframe+contour")
# END rastrigin
# START sphere
from pymoo.factory import get_problem
from pymoo.util.plotting import plot_problem_surface
problem = get_problem("sphere", n_var=2)
plot_problem_surface(problem, 100, plot_type="wireframe+contour")
# END sphere
# START zakharov
from pymoo.factory import get_problem
from pymoo.util.plotting import plot_problem_surface
problem = get_problem("zakharov", n_var=2)
plot_problem_surface(problem, 100, plot_type="wireframe+contour")
# END zakharov
# --------------------------------------------------------------------------------------------
# Multi
# --------------------------------------------------------------------------------------------
# START zdt1
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt1")
plot(problem.pareto_front(), no_fill=True)
# END zdt1
# START zdt2
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt2")
plot(problem.pareto_front(), no_fill=True)
# END zdt2
# START zdt3
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt3")
plot(problem.pareto_front(), no_fill=True)
# END zdt3
# START zdt4
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt4")
plot(problem.pareto_front(), no_fill=True)
# END zdt4
# START zdt5_no_norm
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt5", normalize=False)
plot(problem.pareto_front(), no_fill=True)
# END zdt5_no_norm
# START zdt5
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt5")
plot(problem.pareto_front(), no_fill=True)
# END zdt5
# START zdt6
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("zdt6")
plot(problem.pareto_front(), no_fill=True)
# END zdt6
# START bnh
from pymoo.factory import get_problem
from pymoo.util.plotting import plot
problem = get_problem("bnh")
plot(problem.pareto_front(), no_fill=True)
# END bnh
# START rosenbrock
from pymoo.factory import get_problem
from pymoo.util.plotting import plot_problem_surface
problem = get_problem("rosenbrock", n_var=2)
plot_problem_surface(problem, 100, plot_type="wireframe+contour")
# END rosenbrock
# START griewank
from pymoo.factory import get_problem
from pymoo.util.plotting import plot_problem_surface
problem = get_problem("griewank", n_var=2)
plot_problem_surface(problem, 100, plot_type="wireframe+contour")
# END griewank
# START truss2d
from pymoo.factory import get_problem
from pymoo.visualization.scatter import Scatter
pf = get_problem("truss2d").pareto_front()
plot = Scatter(title="Pareto-front")
plot.add(pf, s=80, facecolors='none', edgecolors='r')
plot.add(pf, plot_type="line", color="black", linewidth=2)
plot.show()
# END truss2d
# START truss2d_log
plot.reset()
plot.do()
plot.apply(lambda ax: ax.set_yscale("log"))
plot.apply(lambda ax: ax.set_xscale("log"))
plot.show()
# END truss2d_log
# --------------------------------------------------------------------------------------------
# START from_string
from pymoo.factory import get_problem
p = get_problem("dtlz1_-1", n_var=20, n_obj=5)
# create a simple test problem from string
p = get_problem("Ackley")
# the input name is not case sensitive
p = get_problem("ackley")
# also input parameter can be provided directly
p = get_problem("dtlz1_-1", n_var=20, n_obj=5)
# END from_string
# --------------------------------------------------------------------------------------------
# START from_function
import numpy as np
from pymoo.model.problem import FunctionalProblem
objs = [
lambda x: np.sum((x - 2) ** 2),
lambda x: np.sum((x + 2) ** 2)
]
constr_ieq = [
lambda x: np.sum((x - 1) ** 2)
]
problem = FunctionalProblem(10,
objs,
constr_ieq=constr_ieq,
xl=np.array([-10, -5, -10]),
xu=np.array([10, 5, 10])
)
F, CV = problem.evaluate(np.random.rand(3, 10))
print(f"F: {F}\n")
print(f"CV: {CV}")
# END from_string
| true |
bcb416d6f1ee42bb360d1e0962f185b5ca9e46f2 | Python | LeetCodeTaiwan/LeetCode | /289_Game Of Life(by Eric, YoEugene, Pilagod).py | UTF-8 | 600 | 3.078125 | 3 | [] | no_license | class Solution(object):
def gameOfLife(self, board):
def checkLife(i, j):
liveCellNum = 0
for rd, cd in [(-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)]:
if rd + i >= 0 and rd + i < len(board) and cd + j >= 0 and cd + j < len(board[0]):
liveCellNum += board[rd+i][cd+j]
if liveCellNum == 3:
return 1
elif liveCellNum == 2:
return board[i][j] & 1
else:
return 0
nextTurnBoard = [row[:] for row in board]
for i in range(len(board)):
for j in range(len(board[0])):
nextTurnBoard[i][j] = checkLife(i,j)
board = nextTurnBoard
return board | true |
b6877606368903103811137578e79cbc875c2ddd | Python | maodunzhe/clean_code | /binarysearch/occurenceNumber.py | UTF-8 | 824 | 4.25 | 4 | [] | no_license | ## given a sorted arraywith duplicates, output the occurrences of a number inside the array
def index(array, number, first):
result = -1
low, high = 0, len(array) - 1
while low <= high:
mid = low + (high - low) /2
if array[mid] == number:
result = mid
if first:
high = mid - 1
else:
low = mid + 1
if array[mid] < number:
low = mid + 1
else:
high = mid - 1
return result
print index([1, 2, 2, 3, 4, 4], 2 ,True)
def occurenceNumber(array, number):
firstindex = index(array, number, True)
if firstindex == -1:
return 0
else:
lastindex = index(array, number, False)
return lastindex - firstindex + 1
print occurenceNumber([1, 2, 2, 2, 3, 4, 4], 2)
| true |
9935b8d88890ab287530e4a59d6f5f154c0378f2 | Python | ngthanhtrung23/CompetitiveProgramming | /codeforces/37/A.py | UTF-8 | 147 | 2.8125 | 3 | [] | no_license | n = input()
cnt = [0 for i in xrange(1100)]
for x in map(int, raw_input().split()):
cnt[x] += 1
print max(cnt), sum([x > 0 for x in cnt])
| true |
5217347647b9f473474ae65b5dbe29c7a0e47446 | Python | yongdae/hello-prime-python | /ch5/ColorShapes1.py | UTF-8 | 1,797 | 3.765625 | 4 | [] | no_license | import turtle
turtle.pensize(3) # 팬의 두께를 3픽셀로 설정
turtle.penup() # 펜을 들어올린다.
turtle.goto(-200, -50)
turtle.pendown() # 펜을 내려놓는다.
turtle.begin_fill() # 도형을 색상으로 채우기 시작
turtle.color("red")
turtle.circle(40, steps = 3) # 삼각형을 그린다.
turtle.end_fill() # 도형을 색상으로 채운다.
turtle.penup() # 펜을 들어올린다.
turtle.goto(-100, -50)
turtle.pendown() # 펜을 내려놓는다.
turtle.begin_fill() # 도형을 색상으로 채우기 시작
turtle.color("blue")
turtle.circle(40, steps = 4) # 사각형을 그린다.
turtle.end_fill() # 도형을 색상으로 채운다.
turtle.penup() # 펜을 들어올린다.
turtle.goto(0, -50)
turtle.pendown() # 펜을 내려놓는다.
turtle.begin_fill() # 도형을 색상으로 채우기 시작
turtle.color("green")
turtle.circle(40, steps = 5) # 오각형을 그린다.
turtle.end_fill() # 도형을 색상으로 채운다.
turtle.penup() # 펜을 들어올린다.
turtle.goto(100, -50)
turtle.pendown() # 펜을 내려놓는다.
turtle.begin_fill() # 도형을 색상으로 채우기 시작
turtle.color("yellow")
turtle.circle(40, steps = 6) # 육각형을 그린다.
turtle.end_fill() # 도형을 색상으로 채운다.
turtle.penup() # 펜을 들어올린다.
turtle.goto(200, -50)
turtle.pendown() # 펜을 내려놓는다.
turtle.begin_fill() # 도형을 색상으로 채우기 시작
turtle.color("purple")
turtle.circle(40) # 원을 그린다.
turtle.end_fill() # 도형을 색상으로 채운다.
turtle.penup() # 펜을 들어올린다.
turtle.goto(-130, 50)
turtle.pendown()
turtle.color("green")
turtle.write("화려한 형형색색의 도형", font = ("맑은 고딕", 18, "bold"))
turtle.hideturtle() # 펜을 숨긴다.
turtle.done()
| true |
067f72b1c2ab07df4ad14c5b6d40f24692fd5143 | Python | LeonardoCampos-EE/DiskManagerBackend | /src/preprocessor.py | UTF-8 | 2,417 | 3 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
import imutils
import pytesseract
class Preprocessor:
def __init__(self):
# List of Post-It colors
self.colors = {
"pink": [
np.array([167, 50, 50]), # F383A7
np.array([173, 255, 255]),
]
}
return
def crop_postit(self, image_path: str):
image = cv2.imread(image_path, 1)
edges = self.detect_edges(image.copy())
print(len(edges))
largest_contour = self.get_largest_contour(edges)
x, y, w, h = cv2.boundingRect(largest_contour)
new = np.zeros_like(image.copy())
cv2.drawContours(new, [largest_contour], -1, (255, 255, 255), -1)
new_mask = cv2.inRange(new, (255, 255, 255), (255, 255, 255))
result = cv2.bitwise_and(image.copy(), image.copy(), mask=new_mask)
print(len(largest_contour))
cropped_image = result[y : y + h, x : x + w]
return cropped_image
def detect_edges(self, image: np.ndarray):
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_image, self.colors["pink"][0], self.colors["pink"][1])
# Paint all pixels not equal to the postit color as black
hsv_image = cv2.bitwise_and(hsv_image, hsv_image, mask=mask)
rgb = cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
canny = imutils.auto_canny(gray)
return canny
def get_largest_contour(self, edges: np.ndarray):
contours, _ = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_len = 0
largest_contour = None
for contour in contours:
if len(contour) > max_len:
largest_contour = contour
max_len = len(contour)
return largest_contour
def get_prediction(self, cropped_image: np.ndarray):
cv2.namedWindow("T", 0)
cv2.imshow("T", cropped_image)
cv2.waitKey(0)
pred = pytesseract.image_to_string(
cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB)
)
print(pred)
return pred
if __name__ == "__main__":
image = cv2.imread("tests/disk_test.jpeg", 1)
preprocessor = Preprocessor()
cropped_image = preprocessor.crop_postit("tests/disk_test.jpeg")
pred = preprocessor.get_prediction(cropped_image)
| true |
b48bf9c7f8d2f3f52958ff6d0932df49b05d4731 | Python | mkowalski/python-advanced-course-cern | /simulation/PygletDisplay.py | UTF-8 | 1,070 | 2.875 | 3 | [] | no_license | from Display import Display
import pyglet
from math import sin, cos, pi
class PygletDisplay(Display):
def __init__(self):
super(PygletDisplay, self).__init__()
self.w = pyglet.window.Window(self.canvas_size[0], self.canvas_size[1])
self.fps_display = pyglet.clock.ClockDisplay()
self.w.push_handlers(self.on_draw)
def on_draw(self):
self.w.clear()
def circle_vertices(p):
twopi = 2 * pi
delta_angle = twopi / 20
angle = 0
while angle < twopi:
yield p.x + p.r * cos(angle)
yield p.y + p.r * sin(angle)
angle += delta_angle
for p in self.particles:
pyglet.gl.glColor3f(*p.colour.as_rgb_01())
pyglet.graphics.draw(20, pyglet.gl.GL_LINE_LOOP,
('v2f', tuple(circle_vertices(p))))
self.fps_display.draw()
def __call__(self, *args, **kwargs):
pyglet.clock.schedule_interval(self.update, self.refresh_rate)
pyglet.app.run()
| true |
ea1805ecd8184bb46462dd93f9ba986c40f1d5d6 | Python | satyapal06/misroute_assistant | /Source/readMongo.py | UTF-8 | 653 | 2.5625 | 3 | [] | no_license | import pprint
from pymongo import MongoClient
client = MongoClient()
my_db = client.custdb
col = my_db.cust_details
list_of_adr = [] # we will use this to store the details obtained from MongoDB
cursor = col.find()
count = 0
all_record_types = []
for document in cursor:
if "9535751827" in (document["phone_number"]):
count += 1
pprint.pprint(document)
print "\n\n\n"
if count == 30:
break
# for record in document['address_components']:
# for rec_type in record['types']:
# all_record_types.append(rec_type)
# client.close()
#
# all_record_types = list(set(all_record_types))
# pprint.pprint(all_record_types)
| true |
38fd44f9dcec0c6c5257ed103c20f4f8aec1c63f | Python | subburajs/san | /source/beam_search.py | UTF-8 | 7,750 | 2.53125 | 3 | [
"MIT"
] | permissive | """
This was heavily borrowed from
https://github.com/jadore801120/attention-is-all-you-need-pytorch
"""
''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from beam import Beam
def beam_decode(model, src, hand_regions, rel_mask, src_mask, max_len=30, start_symbol=1, n_beam=3):
if(type(rel_mask) != type(None)):
src_mask = rel_mask
memory = model.src_emb(src)
memory = model.position(memory)
memory = model.encode(memory, None, src_mask)
ys = []
scores = []
n_hyp = 1
for b in range(src.shape[0]):
src_seq = src[b].unsqueeze(0)
src_enc = memory[b].unsqueeze(0)
if(src_mask is None):
mask = None
else:
mask = src_mask[b].unsqueeze(0)
#NOTE: may produce eos tokens
batch_hyp, batch_scores = translate_batch(model, src_seq, src_enc, mask, n_beam, n_hyp, max_len)
pred = []
#Remove EOS token
for elem in batch_hyp[0][0]:
pred.append(elem)
if(elem == 2):
break
#NOTE:this is because we have just one hypotheses
ys.append(pred)
scores.append(batch_scores)
ys = [torch.IntTensor(y) for y in ys]
return ys
def translate_batch(model, src_seq, src_enc, src_mask, beam_size = 2, n_best=1, max_len=20, device='cuda'):
''' Translation work in one batch '''
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def collate_active_info(src_enc, inst_idx_to_position_map, active_inst_idx_list):
# Sentences which are still active are collected,
# so the decoder will not run on completed sentences.
n_prev_active_inst = len(inst_idx_to_position_map)
active_inst_idx = [inst_idx_to_position_map[k] for k in active_inst_idx_list]
active_inst_idx = torch.LongTensor(active_inst_idx).to(device)
#active_src_seq = collect_active_part(src_seq, active_inst_idx, n_prev_active_inst, n_bm)
active_src_enc = collect_active_part(src_enc, active_inst_idx, n_prev_active_inst, n_bm)
active_inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#return active_src_seq, active_src_enc, active_inst_idx_to_position_map
return active_src_enc, active_inst_idx_to_position_map
def beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):
''' Decode and update beam status, and then return active beam idx '''
def prepare_beam_dec_seq(inst_dec_beams, len_dec_seq):
dec_partial_seq = [b.get_current_state() for b in inst_dec_beams if not b.done]
dec_partial_seq = torch.stack(dec_partial_seq).to(device)
dec_partial_seq = dec_partial_seq.view(-1, len_dec_seq)
return dec_partial_seq
def prepare_beam_dec_mask(len_dec_seq, n_active_inst, n_bm):
#dec_partial_pos = torch.arange(1, len_dec_seq + 1, dtype=torch.long, device=device)
#dec_partial_pos = dec_partial_pos.unsqueeze(0).repeat(n_active_inst * n_bm, 1)
attn_shape = (n_active_inst*n_bm, len_dec_seq, len_dec_seq)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
dec_partial_pos = torch.from_numpy(subsequent_mask) == 0
dec_partial_pos = dec_partial_pos.type(torch.uint8).to('cuda')
return dec_partial_pos
def predict_word(dec_seq, dec_pos, src_enc, n_active_inst, n_bm):
#dec_mask = dec_mask.unsqueeze(1)
dec_output = model.decode(src_enc, dec_seq, src_mask, dec_pos)
#(beam, vocab)
dec_output = dec_output[:, -1, :] # Pick the last step: (bh * bm) * d_h
#print(dec_output.shape)
word_prob = model.output_layer(dec_output)
word_prob = word_prob.view(n_active_inst, n_bm, -1)
#print(word_prob.shape)
#sd
return word_prob
def collect_active_inst_idx_list(inst_beams, word_prob, inst_idx_to_position_map):
active_inst_idx_list = []
for inst_idx, inst_position in inst_idx_to_position_map.items():
is_inst_complete = inst_beams[inst_idx].advance(word_prob[inst_position])
if not is_inst_complete:
active_inst_idx_list += [inst_idx]
return active_inst_idx_list
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_mask(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_enc, n_active_inst, n_bm)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
def collect_hypothesis_and_scores(inst_dec_beams, n_best):
all_hyp, all_scores = [], []
for inst_idx in range(len(inst_dec_beams)):
scores, tail_idxs = inst_dec_beams[inst_idx].sort_scores()
all_scores += [scores[:n_best]]
hyps = [inst_dec_beams[inst_idx].get_hypothesis(i) for i in tail_idxs[:n_best]]
all_hyp += [hyps]
return all_hyp, all_scores
with torch.no_grad():
#-- Encode
#src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device)
#memory = model.module.encode(src_seq, regions, src_mask)
#-- Repeat data for beam search
n_bm = beam_size
n_inst, len_s, d_h = src_enc.size()
#src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s, -1)
src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
#print(src_enc.shape)
if(src_mask is not None):
src_mask = src_mask.repeat(1, n_bm, 1).view(n_inst * n_bm, 1, len_s)
#-- Prepare beams
inst_dec_beams = [Beam(n_bm, device=device) for _ in range(n_inst)]
#-- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
#-- Decode
for len_dec_seq in range(1, max_len + 1):
#print(len_dec_seq)
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)
if not active_inst_idx_list:
#print("BREAK")
break # all instances have finished their path to <EOS>
src_enc, inst_idx_to_position_map = collate_active_info(
src_enc, inst_idx_to_position_map, active_inst_idx_list)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, n_best)
return batch_hyp, batch_scores
| true |
2b9bc4e83669bedeb31ff47ed0347be60f6b97f0 | Python | osmanatam/PythonRehberi | /18_modul_kullanimi.py | UTF-8 | 1,638 | 3.703125 | 4 | [] | no_license | # import math
# 1. YÖNTEM
# import math as matematik # Diye kendimiz de adlandırabiliriz.
# deger = dir(math)
# deger = help(math)
# deger = math.sqrt(49) # Karekökü
# deger = math.factorial(5)
# 2. YÖNTEM
# from math import * # Şeklinde de kullanabiliriz. Burda math'in tüm yöntemlerini aldık.
# def sqrt(x):
# print("x: " + str(x))
# from math import factorial, sqrt # Böyle ise yalnız bu ikisini almış oluruz.
# deger = factorial(5) # math. dememize gerek kalmaz.
# deger = sqrt(9)
# print(deger)
##############################################
# import random
# sonuc = random.random() # 0.0 ila 1.0 arasında rastgele bir sayı üretilir.
# sonuc = random.random() * 70
# sonuc = random.uniform(552, 639) # 552 ila 639 arasında üretilecek.
# sonuc = int(random.uniform(552, 639)) # Kesirleri ortadan kaldırdık.
# sonuc = random.randint(552, 639) # Ya da bunu, bu yöntemle de yapabiliyoruz.
# adlar = ["Haluk", "Bumin", "Kürşad", "İstemi", "Yavuz", "Sencer", "Çağatay"]
# sonuc = adlar[random.randint(0, len(adlar) - 1)]
# sonuc = random.choice(adlar) # Bir üstteki kullanımın yerine kullanılabilecek bir yöntem.
# liste = list(range(10))
# random.shuffle(liste) # Bir üstteki listeyi rastgele sıralar.
# sonuc = liste
# liste = range(100)
# sonuc = random.sample(liste, 3)
# sonuc = random.sample(adlar, 2)
# print(sonuc)
############################################
import mod
islem = mod.sayi
islem = mod.sayilar
islem = mod.kisiSozlugu["ad"]
islem = mod.fonksiyon(70)
kisiNesnesi = mod.Kisi()
kisiNesnesi.konus()
print(islem)
| true |
5d52e0ba28ba1c835d3d8b9c0245571b868c23d4 | Python | nmessa/Python-2020 | /Lab Exercise 10.26.2020/problem1.py | UTF-8 | 1,001 | 4.25 | 4 | [] | no_license | ## Lab Exercise 10/26/2020 Problem 1
## Author:
## This program will display the force between two charged particles
from math import *
#This function will return the force between 2 charged particles
#and is provided with 3 parameters; charge1, charge2 and distance
#the particles are apart
def coulomb(q1, q2, r):
#Add code here
#Get input from user for charge1, charge2, and distance between charges
#and store in q1, q2, and r as floating point values
#Add code here
#Pass parameters to function and print out the force in Newtons
#Add code here
#Charge of proton = 1.6e-19 coulomb
#Charge of electron = -1.6e-19 coulomb
## Two protons seperated by 1 micron will give a
## force of 2.300862064124984e-10 Newtons
##Sample output
##Enter the number of coulombs charge charge 1: 1.6e-16
##Enter the number of coulombs charge charge 2: 1.6e-16
##Enter the distance the charges are seperated (in meters): 1e-6
##2.300862064124984e-10 Newtons
| true |
01f802b84656df881ab1b75f2a2a8c3adb76666c | Python | UVG-Teams/turing-machine | /turing.py | UTF-8 | 3,541 | 3.328125 | 3 | [
"MIT"
] | permissive | """
Logica Matematica
Maquina de Turing
18935 Jose Block
18049 Gian Luca Rivera
18676 Francisco Rosal
"""
from prettytable import PrettyTable
import json
# Referencia
# https://www.python-course.eu/turing_machine.php
class Tape(object):
blank_symbol = "-"
def __init__(self, tape_string):
self.tape = dict((enumerate(tape_string)))
def __str__(self):
s = ""
for i in self.tape:
s += self.tape[i]
return s
def __getitem__(self, index):
return self.tape[index] if index in self.tape else Tape.blank_symbol
def __setitem__(self, pos, char):
self.tape[pos] = char
class TuringMachine(object):
def __init__(self, filename):
self.head_position = 0
self.import_tape(filename)
def import_tape(self, filename):
configutations = self.read_json(filename)
self.possible_states = configutations["q"]
self.current_state = configutations["initial_state"]
self.transition_function = configutations["transition_function"]
self.final_states = configutations["final_states"]
self.__tape = Tape(configutations["tape"])
@property
def actual_tape(self):
return str(self.__tape)
@property
def is_final(self):
return True if self.current_state in self.final_states else False
def step(self):
actual_bit = self.__tape[self.head_position]
transition_index = "{},{}".format(self.current_state, actual_bit)
if transition_index in self.transition_function \
and self.current_state in self.possible_states:
transition = self.transition_function[transition_index]
self.current_state = transition["state"]
self.__tape[self.head_position] = transition["value"]
if transition["direc"] == "R":
self.head_position += 1
elif transition["direc"] == "L":
self.head_position -= 1
else:
pass
def run(self):
my_table = PrettyTable(["Id", "Configuracion"])
cont = 0
with open('output.txt', 'w') as output_file:
while True:
actual_setting = ""
for i in range(len(self.actual_tape)):
if self.head_position == i:
actual_setting += self.current_state
actual_setting += self.actual_tape[i]
if self.head_position == len(self.actual_tape) \
and (i + 1) == len(self.actual_tape):
actual_setting += self.current_state
print(actual_setting)
my_table.add_row([cont, actual_setting])
output_file.write(actual_setting + "\n")
if self.is_final: return print(my_table)
self.step()
cont += 1
def read_json(self, filename):
with open(filename) as my_json:
return json.load(my_json)
# ==================================================================
# ~ . ~ . ~ . ~ . ~ . ~ . ~ . ~ . ~
# ~ . ~ . ~ . ~ . ~ . ~ . ~ . ~ . ~
# ~ . ~ . ~ . ~ . ~ . ~ . ~ . ~ . ~
# ==================================================================
turing = TuringMachine("input-ejemplo.json")
# turing = TuringMachine("input-aceptacion.json")
# turing = TuringMachine("input-rechazo.json")
# turing = TuringMachine("input-infinito.json")
turing.run()
| true |
46d38de52c8ed0e5113005a701595aaf7e8d5c66 | Python | Mohit1352/OXFix | /OXFix.py | UTF-8 | 1,889 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env python3
#Place this file in the directory of all broken PPTs or DOCs, or provide directory name as input, or pipe it from another command.
import os
import shutil
import sys
directories=["."]
filetypes=[".ppt",".doc"]
mode='0' #default mode, will go through all formats, else index+1 for each option in the list.
for i in sys.argv[1:]:
if "filemode" in i:
mode=i.split("=")[-1]
else:
directories.append(i)
if mode!='0':
filetypes=filetypes[int(mode)-1:int(mode)]
print("Pipe may be empty, press Enter key to continue if no output is seen.")
for line in sys.stdin:
if line=="\n":
break
line=line.replace(" ","")
line=line.replace("\n","")
directories.append(line)
print("Mode: ",end="")
[print(i[1:],end=" ") for i in filetypes]
print()
print("Current Working Directory:",os.getcwd())
fc=0
#Will work only on PPT files in the directory.
for d in directories:
files=os.listdir(f"./{d}")
files2=[]
for i in filetypes:
files2+=[j for j in files if i in j]
files2=list(set(files2))
if len(files2)>0:
print("\nFolder",d,":")
print(len(files2),"files to fix.")
os.chdir(f"./{d}")
for i in files2:
try:
print(f"[{files2.index(i)+1}]",i,end=" ...")
shutil._unpack_zipfile(i,f"./{i}_Temp")
os.chdir(f"./{i}_Temp")
shutil._make_zipfile(f"../{i}",".")
os.chdir("..")
os.remove(i)
shutil.rmtree(f"{i}_Temp",ignore_errors=True)
os.rename(f"{i}.zip",i)
print("Done.")
fc+=1
except:
print("No issue detected.")
if d!=".":
os.chdir("..")
if fc>0:
print("All files fixed.")
else:
print("No files found matching criteria in current directory.")
| true |
5d978e2136ba3666b7d15de24885b7d36623031e | Python | SamuelStephen/CP1404_practicals | /Practical_2/exceptions_demo.py | UTF-8 | 919 | 3.84375 | 4 | [] | no_license | # try:
# numerator = int(input("Enter the numerator: "))
# denominator = int(input("Enter the denominator: "))
# fraction = numerator / denominator
# print(fraction)
# except ValueError:
# print("Numerator and denominator must be valid numbers!")
# except ZeroDivisionError:
# print("Cannot divide by zero!")
# print("Finished.")
# a value error will occur when the numerator entered is an integer
# a zero division error will occur when the entered denominator is == 0
# updated to error check denominator
try:
numerator = int(input("Enter the numerator: "))
denominator = 0
while denominator <= 0:
denominator = int(input("Enter the denominator: "))
fraction = numerator / denominator
print(fraction)
except ValueError:
print("Numerator and denominator must be valid numbers!")
except ZeroDivisionError:
print("Cannot divide by zero!")
print("Finished.")
| true |