blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
81933c5e45315f17f9b4b5d9f98d9adbc20b7f86 | Python | David90Mar/Pokebot | /calculate_probability.py | UTF-8 | 3,318 | 2.59375 | 3 | [] | no_license | class game:
def __init__(self, name='partita_poker', blind=0.005):
self.name = name
self.deck = deck_poker()
self.players=0
self.turni=[]
self.inizio=[]
self.bet = True
self.piatto=0
self.max_bet=0
self.scommesse=1
self.blind=blind
a={}
for k in range(1,11):a[k]=('no', 0)
self.punti = a
a={}
for k in range(1,11):a[k]='0.000'
self.action = a
a={}
for k in range(1,11):a[k]=0
self.capitale = a
a={}
for k in range(1,11):a[k]=0
self.investimento = a
def start(self):
dt = datetime.now()
rand=int(dt.microsecond**2/(dt.year+dt.month*dt.second+dt.day*dt.minute+dt.hour+1))
np.random.seed(rand)
self.players = int(round(np.random.uniform(low=2, high=10, size=None),0))
inizio = int(round(np.random.uniform(low=1, high=self.players, size=None),0))
k=[]
k2=[]
for i in range(1,self.players+1):
if i>=inizio: k.append(i)
else: k2.append(i)
k.extend(k2)
self.turni = k
self.inizio=k[2:]
k2=k[0:2]
self.inizio.extend(k2)
# mescolo mazzo
self.deck =deck_poker()
self.deck=self.deck.create_deck().shuffle()
# creo casualmente il capitale
for k in range(1,self.players+1):
self.capitale[k]=round(np.random.uniform(low=0, high=1, size=None),3)
#resetto carte
self.deck.table_cards = []
a={}
for k in range(1,11):a[k]=[]
self.deck.player_cards = a
#resetto action
self.bet=False
a={}
for k in range(1,11):a[k]= '0.000'
self.action = a
a={}
for k in range(1,11):a[k]= 0
self.investimento = a
self.scommesse=1
self.bet==True
return self
def simulate(self):
# distribuisco le carte
for i in self.turni:
self.deck.take(n=2, player=i)
# prendo le carte dal deck
self.deck.take(n=5, player=0)
for i in self.turni:
self.deck.player_cards[i].extend(self.deck.table_cards)
hand_cards=self.deck.player_cards[i][0:2]
values=[]
colors=[]
for gi in np.array(self.deck.player_cards[i]):
for color in ['p','f','q','c']:
if color in gi:
values.append(int(gi.replace(color,'')))
colors.append(color)
a=list(zip(values,colors))
def getKey(item):
return item[0]
s=sorted(a, key=getKey, reverse= True)
values =[]
colors=[]
for si in s:
values.append(si[0])
colors.append(si[1])
self.punti[i]=point(values, colors,hand_cards)
return self
| true |
84828bec6b5822d1acddb7bc43cbc26ab55a7c98 | Python | taehoon95/python-study | /enumerate와리스트내포/확인문제(2,8,16진법).py | UTF-8 | 321 | 3.609375 | 4 | [] | no_license | print("{:b}".format(10))
print("{:o}".format(10))
print("{:x}".format(20))
print(int("1010",2))
print()
print(int(12),8)
print(int(10),16)
output = [i for i in range(1,100+1)
if "{:b}".format(i).count("0") == 1]
for i in output:
print("{} : {}".format(i, "{:b}".format(i)))
print("합계: ", sum(output)) | true |
36a2f893e947a0b74f675b4061de6d63ccc77b7e | Python | Satxm/aliddns | /GetIPv6Address/LinuxIPv6.py | UTF-8 | 768 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | import os
import re
def ShortIPv6Address():
output = os.popen("ifconfig").read()
result = re.findall(r" inet6 ([a-f0-9:]*::[a-f0-9:]*) prefixlen 128 scopeid 0x0<global>", output, re.I)
return result
def LongIPv6Address():
output = os.popen("ifconfig").read()
result = re.findall(r" inet6 ([a-f0-9:]*:[a-f0-9:]*) prefixlen 64 scopeid 0x0<global>", output, re.I)
return result
if __name__ == "__main__":
print("获取到本机短IPv6地址:%s" % ShortIPv6Address())
print("获取到本机长IPv6地址:%s" % LongIPv6Address())
ipv6 = ShortIPv6Address()
if ipv6 == []:
ipv6 = LongIPv6Address()[0]
else:
ipv6 = ShortIPv6Address()[0]
print("获取到本机IPv6地址:%s" % ipv6) | true |
dcbfede07fb07cf8984e4cc37dc87887203217f1 | Python | ryavorsky/EduMap | /python/parse_direktor_shkoly.py | UTF-8 | 3,311 | 2.609375 | 3 | [] | no_license | import os
import sys
os.chdir('../data/Direktor_Shkoly/html/')
file_names = os.listdir()
print(os.getcwd(), os.listdir())
res_data = []
for file_name in file_names:
f_in = open(file_name, "r", encoding="cp1251")
source_text = f_in.read()
f_in.close()
year = file_name.split(".")[0].split("_")[0]
issue = file_name.split(".")[0].split("_")[1]
source_text = source_text.split('alt="Содержание номера"></p>')[1]
source_text = source_text.split('</td>')[0]
source_text = source_text.replace('\t\t<p class="main_tagline"><b>',"Authors:")
source_text = source_text.replace("<br></b>", "\nAbstract:")
source_text = source_text.replace('\t\t<p class="main" style="text-align: left"><b>', "\nTitle:")
source_text = source_text.replace("</b></p>", "")
#source_text = source_text.replace("</p>", "\n")
lines = source_text.split("\n")
#f_out.write(source_text)
#f_out.close()
#sys.exit("Done so far")
last_line_type = "Space"
for line in lines:
#f_out.write(last_line_type + "###" + line + "\n")
if last_line_type == "Space" :
if line.find("Title:") > -1 :
title = line[6:].upper()
if title.find("<") == 0:
title = title.split(">")[1]
title = title.split("<")[0]
last_line_type = "Title"
elif last_line_type == "Title" :
if line.find("Authors:") > -1:
authors = line[8:]
last_line_type = "Authors"
else:
authors = ""
abstract = ""
res_data.append([title, authors, abstract, year, issue])
last_line_type = "Space"
elif last_line_type == "Authors":
if line.find("Abstract:") > -1:
abstract = line[9:]
last_line_type = "Abstract"
pos_b = abstract.find("<")
if pos_b > -1:
abstract = abstract.split("<")[0]
res_data.append([title, authors, abstract, year, issue])
last_line_type = "Space"
else:
abstract = ""
res_data.append([title, authors, abstract, year, issue])
last_line_type = "Space"
elif last_line_type == "Abstract":
abstract += line
pos_b = abstract.find("<")
if pos_b > -1:
abstract = abstract.split("<")[0]
res_data.append([title, authors, abstract, year, issue])
last_line_type = "Space"
f_titles = open("../titles.txt", "w", encoding="utf-8")
f_authors = open("../authors.txt", "w", encoding="utf-8")
f_abstracts = open("../abstracts.txt", "w", encoding="utf-8")
f_titles_and_abstracts = open("../titles_and_abstracts.txt", "w", encoding="utf-8")
for block in res_data:
if len(block[2]) > 0:
f_titles.write(block[4] + ":" + block[0] + "\n")
f_authors.write(block[4] + ":" + block[1] + "\n")
f_abstracts.write(block[4] + ":" + block[2] + "\n")
f_titles_and_abstracts.write(block[3] + "\t" + block[4] + "\n" + block[0] + "\n" + block[2] + "\n\n")
f_titles.close()
f_authors.close()
f_abstracts.close()
f_titles_and_abstracts.close()
| true |
477edd944af535b442f03794bee7565241694faa | Python | JoshuaPedro/5E-Character | /CSM/character/management/commands/populate_db.py | UTF-8 | 1,676 | 2.734375 | 3 | [] | no_license | """Single command to call all other populate commands. This will require that all initial migrations have been made."""
# Django Imports:
from django.core.management.base import BaseCommand
from django.core.management import call_command
class Command(BaseCommand):
"""
Command to populate the database with all information stored in CSVs for 5th Edition DnD.
"""
# args
help = 'Will auto populate the database with all the base information from 5th Edition Dungeons and Dragons.'
def handle(self, *args, **kwargs):
COMMANDS = [
"populate_skills", "populate_spell_tables", "populate_languages", "populate_tools", "populate_items",
"populate_damage_types", "populate_actions", "populate_weapon_properties", "populate_weapons",
"populate_armor", "populate_features_part_1", "populate_alignments", "populate_prestige_classes",
"populate_subraces", "populate_classes", "populate_races", "populate_backgrounds",
"populate_spells", "populate_conditions", "populate_mounts_and_vehicles", "populate_features_part_2",
"populate_dragon_ancestries", "populate_land_types", "populate_enemy_races",
# "populate_personality_traits", "populate_ideals", "populate_bonds", "populate_flaws",
]
for command in COMMANDS:
print("Starting {} command...".format(command))
try:
call_command(command, *args, **kwargs)
except Exception as e:
print("Had error with {} command: {}".format(command, e))
else:
print("Completed {} command successfully!".format(command))
| true |
9aae3b7e49def95860e965b89ecc700ec8e145c8 | Python | faizkhan12/Basics-of-Python | /exercise 5.11.py | UTF-8 | 445 | 4.09375 | 4 | [] | no_license | Ordinals=[1,2,3,4,5,6,7,8,9]
for Ordinal in Ordinals:
if Ordinal==1:
print("1st")
elif Ordinal==2:
print("2nd")
elif Ordinal==3:
print("3rd")
elif Ordinal==4:
print('4th')
elif Ordinal==5:
print('5th')
elif Ordinal==6:
print('6th')
elif Ordinal==7:
print('7th')
elif Ordinal==8:
print('8th')
else:
print('9th')
| true |
d25a86f26a9137faa1d310753a1bf68f9bb5c50e | Python | jondfin/Python-Projects | /AddAccess.py | UTF-8 | 2,537 | 2.9375 | 3 | [] | no_license | import sys, os, re
def userGroupExists(userGroupName):
data = []
with open("./files/UserGroups.txt", "r") as f:
data = f.readlines()
for line in data:
(group, users) = filter(None, line.split("="))
if group.strip() == userGroupName:
return
print("User group {} does not exist").format(userGroupName)
sys.exit(0)
def objGroupExists(objectGroupName):
data = []
with open("./files/ObjectGroups.txt", "r") as f:
data = f.readlines()
for line in data:
(group, objects) = filter(None, line.split("="))
if group.strip() == objectGroupName:
return
print("Object group {} does not exist").format(objectGroupName)
sys.exit(0)
def AddAccess(operation, userGroupName, objectGroupName):
#Check if user group and object group exists
userGroupExists(userGroupName)
objGroupExists(objectGroupName)
data = []
index = 0
opExists = False
#search data
with open("./files/AccessPermissions.txt", "r") as f:
data = f.readlines()
for line in data:
(op, groups) = filter(None, line.split("="))
if op.strip() == operation:
opExists = True
groups = groups.strip()
pairs = re.findall("\(\s?\'(.*?)\'\s?,\s?\'(.*?)\'\s?\)", groups) #turn into tuple
#search each tuple
for pair in pairs:
(u, o) = pair
if u == userGroupName and o == objectGroupName:
print("User group {} already has permission <{}> for object group: {}").format(userGroupName, operation, objectGroupName)
return
#access doesnt exist for usergroup
break
index += 1
#add new operation if it doesnt exists
if opExists == False:
with open("./files/AccessPermissions.txt", "a") as f:
t = (userGroupName, objectGroupName)
newline = "{} = [{}]".format(operation, t)
print("Success: {}").format(newline)
f.write(newline)
return
#add new tuple to operation
with open("./files/AccessPermissions.txt", "r+") as f:
(op, groups) = filter(None, data[index].split("="))
groups = groups.strip()
pairs = re.findall("\(\s?\'(.*?)\'\s?,\s?\'(.*?)\'\s?\)", groups)
t = (userGroupName, objectGroupName)
pairs.append(t)
newline = "{} = {}\n".format(operation, pairs)
data[index] = newline
print("Success: {} = {}").format(operation, pairs)
f.writelines(data)
return
if __name__ == "__main__":
#If object group is defined, need to add object instead of user?
try:
if len(sys.argv) == 4:
AddAccess(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) == 3:
AddAccess(sys.argv[1], sys.argv[2], "null")
except IndexError:
print("Invalid arguments")
| true |
498ee4978d8f9555dbb54f43def7ce3fcfc1ab0c | Python | afcarl/swedish_chef | /preprocessing/mover.py | UTF-8 | 1,952 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | """
Module for moving files around for the preprocessor.
"""
import glob
import os
import shutil
import myio.myio as myio
import preprocessing.prep_global as prep_global
import chef_global.debug as debug
import chef_global.config as config
def _append_all_recipe_files():
"""
Takes each file in the config.DATA_DIRECTORY folder and
appends them onto a single recipe file which will then
contain plain English recipes all in one file.
@return: void
"""
if not os.path.isdir(config.DATA_DIRECTORY):
raise ValueError("config.DATA_DIRECTORY not set.")
else:
files = glob.glob(config.DATA_DIRECTORY + "/*.xml")
myio.join_all_files(files, config.RECIPE_FILE_PATH)
def _copy_master_to_data_location():
"""
Copies the master directory files over the data
directory. Deletes any files in the data directory
before copying.
@return: void
"""
debug.debug_print("_copy_master_to_data_location called, making assertions...")
debug.assert_value_is_set(config.MASTER_DATA_DIRECTORY,
"config.MASTER_DATA_DIRECTORY")
debug.assert_value_is_set(config.DATA_DIRECTORY,
"config.DATA_DIRECTORY")
print("Deleting old data files...")
prep_global._apply_func_to_each_data_file(os.remove)
print("Copying data files from " + str(config.MASTER_DATA_DIRECTORY) +
" to " + str(config.DATA_DIRECTORY))
# Collect files to copy
list_of_file_names = [os.path.join(config.MASTER_DATA_DIRECTORY, file_name) for
file_name in os.listdir(config.MASTER_DATA_DIRECTORY)]
for f in list_of_file_names:
f_name = os.path.split(f)[-1]
if f_name != "zuni.xml":
# Don't include zuni - it is super weird
debug.debug_print("Copying " + f_name + " to new directory...")
shutil.copyfile(f, os.path.join(config.DATA_DIRECTORY, f_name))
| true |
bcdba2a7326f2a793454324b69acbf2ad3b23f0f | Python | saubhik/leetcode | /problems/valid_parentheses.py | UTF-8 | 935 | 3.71875 | 4 | [] | no_license | from unittest import TestCase
class Solution:
# Time Complexity: O(n).
# Space Complexity: O(n).
def isValid(self, s: str) -> bool:
stack = []
for char in s:
if char in ("(", "{", "["):
stack.append(char)
else:
if stack and (
(char == ")" and stack[-1] == "(")
or (char == "}" and stack[-1] == "{")
or (char == "]" and stack[-1] == "[")
):
stack.pop()
else:
return False
return not stack
class TestSolution(TestCase):
def test_is_valid(self):
assert Solution().isValid(s="()") is True
assert Solution().isValid(s="()[]{}") is True
assert Solution().isValid(s="(]") is False
assert Solution().isValid(s="([)]") is False
assert Solution().isValid(s="{[]}") is True
| true |
8d72865268c7b7ddad22de2664db3bcf9eb10db8 | Python | higor-gomes93/curso_programacao_python_udemy | /Sessão 8 - Exercícios/ex36.py | UTF-8 | 458 | 4 | 4 | [] | no_license | '''
Faça uma função não-recursiva que receba um número inteiro positivo n e retorne o superfatorial desse número. O
superfatorial de um número N é definido pelo produto dos N primeiros fatoriais de N.
'''
def superfatorial(numero):
produto = 1
fatorial = 1
for i in range(1, numero+1):
for j in range(1, i+1):
produto *= j
fatorial *= produto
produto = 1
return fatorial
print(superfatorial(4))
| true |
d0a4729a6f98d20944f59b7f6195064a25e8957e | Python | WHOIGit/nes-lter-ims | /neslter/workflow/underway.py | UTF-8 | 1,563 | 2.515625 | 3 | [
"MIT"
] | permissive | from . import logger
import pandas as pd
from neslter.parsing.files import Resolver
from neslter.parsing.underway import Underway, DATETIME
from .api import Workflow
UNDERWAY = 'underway'
class UnderwayWorkflow(Workflow):
def __init__(self, cruise):
self.cruise = cruise
def directories(self):
return Resolver().directories(UNDERWAY, self.cruise)
def filename(self):
return '{}_underway'.format(self.cruise)
def produce_product(self):
return Underway(self.cruise).to_dataframe()
class TimeToLocation(object):
def __init__(self, underway_data):
"""underway data is the product of the underway workflow"""
self.uw = underway_data.copy()
self.uw.index = pd.to_datetime(self.uw[DATETIME], utc=True)
def _infer_lat_lon_cols(self):
if 'gps_furuno_latitude' in self.uw.columns:
# FIXME search for other gps models
return 'gps_furuno_latitude', 'gps_furuno_longitude'
elif 'dec_lat' in self.uw.columns:
return 'dec_lat', 'dec_lon'
else:
raise KeyError('cannot infer lat/lon columns of underway data')
def time_to_location(self, time):
lat_col, lon_col = self._infer_lat_lon_cols()
index = max(0, self.uw.index.searchsorted(pd.to_datetime(time), utc=True) - 1)
row = self.uw.iloc[index]
return row[lat_col], row[lon_col]
def time_to_lat(self, time):
return self.time_to_location(time)[0]
def time_to_lon(self, time):
return self.time_to_location(time)[1] | true |
529ec3f542c22783e90fcf7c7bb138018ad07751 | Python | hafsaabbas/-saudidevorg | /23 day.py | UTF-8 | 938 | 3.0625 | 3 | [] | no_license | Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 19:29:22) [MSC v.1916 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> dic={"model":"ford","brand":"mus","year":12344}
>>> if "model" in dic:
print("yyas")
yyas
>>> print(len(dic))
3
>>>
>>> dic["black"]="color"
>>> print(dic)
{'model': 'ford', 'brand': 'mus', 'year': 12344, 'black': 'color'}
>>> dic.pop("model")
'ford'
>>> print(dic)
{'brand': 'mus', 'year': 12344, 'black': 'color'}
>>> dic.popitem()
('black', 'color')
>>> print(dic)
{'brand': 'mus', 'year': 12344}
>>> del dic["year"]
>>> print(dic)
{'brand': 'mus'}
>>> del dic
>>> print(dic)
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
print(dic)
NameError: name 'dic' is not defined
>>> dic={'model': 'ford', 'brand': 'mus', 'year':12344, 'black': 'color'}
>>> dic.clear()
>>> print(dic)
{}
>>>
| true |
f8bba9213a799ff0a2bf31440c37f5b9213ebb20 | Python | daniboomberger/modul122_LB2 | /create_invoice_text.py | UTF-8 | 2,765 | 2.78125 | 3 | [] | no_license | import configuration
import create_log
from string import Template
class createInvoice():
def __init__(self):
self.finished_invoice_text = ''
self.positions_text = ''
#creates the invoice.txt locally
def writeInvoice(self, invoice_data, calculated_date, invoice_positions, calculated_price):
invoice_text = open(configuration.TEMPLATE_INVOICE_TEXT).read()
invoice_template = Template(invoice_text)
self.createInvoicePosString(invoice_positions)
calculated_price = "{:10.2f}".format(calculated_price)
filename = open(invoice_data[7] + "_" + invoice_data[0].replace('\'Rechnung_', '') + "_invoice.txt", "w", encoding='utf-8')
try:
self.finished_invoice_text = invoice_template.substitute(
firmname = invoice_data[8],
name = invoice_data[9],
address = invoice_data[10],
firm_location = invoice_data[11],
uid = invoice_data[12],
location = invoice_data[2],
date = invoice_data[3],
customer_number = invoice_data[7],
job_number = invoice_data[1].replace('Auftrag_', ''),
invoice_number = invoice_data[0].replace('\'Rechnung_', ''),
receiver_firm = invoice_data[16],
receiver_firm_address = invoice_data[17],
receiver_firm_location = invoice_data[18],
calculated_date = calculated_date,
positions_text = self.positions_text,
total_price = calculated_price
)
filename.write(self.finished_invoice_text)
except:
create_log.log().createLog('failed to create: ' + filename)
#creates a dynamic string for position (its possible to get multiple invoice positions in .data file)
def createInvoicePosString(self, position_data):
invoice_position_template_text = open(configuration.TEMPLATE_INVOICE_TEXT_POSITION).read()
invoice_postion_template = Template(invoice_position_template_text)
try:
for i in range(0, len(position_data)):
self.positions_text += invoice_postion_template.substitute(
position_id = position_data[i][1],
position_description = position_data[i][2],
quantity = position_data[i][3],
price_pre_quantity = position_data[i][4],
price = position_data[i][5],
mwst = position_data[i][6]
)
except:
create_log.log().createLog('failed to create invoice position template for text')
| true |
2aa2dd4e0f6626da47a9289fa0e0ba7f733a6351 | Python | Madhu-Kumar-S/Python_Basics | /pattern_printing/dimond.py | UTF-8 | 215 | 3.515625 | 4 | [] | no_license | n = int(input("Enter limit:"))
s = n
for i in range(1, n+1):
print(" " * s, end=' ')
print("* "*i)
s = s-1
s = s+2
for i in range(n-1, 0, -1):
print(" " * s, end=' ')
print("* "*i)
s = s+1
| true |
246a0878df467c2ae71d20b66b0bdb5afea2eaab | Python | jinseoo/DataSciPy | /src/파이썬코드(py)/Ch11/code_11_7.py | UTF-8 | 551 | 3.25 | 3 | [] | no_license | #
# 따라하며 배우는 파이썬과 데이터과학(생능출판사 2020)
# 11.7 막대형 차트도 손쉽게 그려보자, 290쪽
#
from matplotlib import pyplot as plt
# 1인당 국민소득
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [67.0, 80.0, 257.0, 1686.0, 6505, 11865.3, 22105.3]
plt.bar(range(len(years)), gdp)
plt.title("GDP per capita") # 제목을 설정한다.
plt.ylabel("dollars") # y축에 레이블를 붙인다.
# y축에 틱을 붙인다.
plt.xticks(range(len(years)), years)
plt.show() | true |
72bab96fa805c97010cd6fb4cc1d2575a4ee8fa1 | Python | davelush/autotrader-search | /app.py | UTF-8 | 770 | 2.640625 | 3 | [] | no_license | from autotrader.scraper import Scraper
from autotrader.vehiclerepository import VehicleRepository
import psycopg2
def main():
# set up search parameters
max_distance = 1500
postcode = "rg315nr"
berths = 6
max_price = 27500
keywords = "bunk"
postgres_host = "localhost"
postgres_port = 5432
postgres_db = "postgres"
try:
postgres_connection = psycopg2.connect(
host=postgres_host,
port=postgres_port,
dbname=postgres_db
)
scraper = Scraper()
repo = VehicleRepository(postgres_connection)
vans = scraper.get_vehicles(max_distance, postcode, berths, max_price, keywords)
repo.store(vans)
finally:
postgres_connection.close()
main()
| true |
9e758e15f20b4ee61294e0bbb7b782b3d145970e | Python | LimaRubson/Estudos-Python | /ex004.py | UTF-8 | 736 | 4.6875 | 5 | [] | no_license |
#Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo a todos as informações possíveis sobre ele.
a = input('Digite algo: ')#Retorna uma STRING independente do tipo
print('O tipo primitivo desse valor é ', type(a))
print("Só tem espaço? ", a.isspace())#Só tem espacos
print("É um número? ", a.isnumeric())#Verifica se tem um número
print("É alfabético? ", a.isalpha())#Verifica se é alfabético
print("É alfanumérico? ", a.isalnum())#Verifica se é alfanumérico
print("Está em maiúscula? ", a.isupper())#Verifica se está em maiúscula
print("Está em minúscula? ", a.islower())#Verifica se está em minúscula
print("Está capitalizada? ", a.istitle())#Verifica se está capitalizada
| true |
fbb07fd358f0e477dc48c0a213c2990d626a9cb8 | Python | bricaud/OCR-classif | /classify_docs_from_graph.py | UTF-8 | 1,090 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
""" Classify the documents using the graph and the texts in pickle file (dataframe).
"""
import txt2graph
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Create the graph from the texts in pickle file (dataframe).')
parser.add_argument('folder',
help='folder where the pdf files are stored')
parser.add_argument('csv_file',
help='csv file to store the classification results')
args = parser.parse_args()
input_dic = vars(args)
PDF_PATH = input_dic['folder']
PICKLE_PATH = os.path.join(PDF_PATH,'pickle')
CSV_FILE = input_dic['csv_file']
print('Pickle path: ',PICKLE_PATH)
#PICKLE_PATH = os.path.join(input_dic['folder'],'pickle')
PICKLE_FILE = os.path.join(PICKLE_PATH,'texts2.pkl')
GRAPH_NAME = os.path.join(PICKLE_PATH,'graph.pkl')
CSV_PATH = os.path.join(PDF_PATH,'csv')
if not os.path.exists(CSV_PATH):
os.makedirs(CSV_PATH)
csv_filename = os.path.join(CSV_PATH,CSV_FILE)
txt2graph.doc_classif(GRAPH_NAME,PICKLE_FILE,csv_filename)
print('CSV file saved in {}'.format(csv_filename))
| true |
25ff0b26a1e3992c6074899053b835ed97abe0b9 | Python | Shreyansmalu/numberGuessingGame | /countingCha2.py | UTF-8 | 258 | 3.53125 | 4 | [] | no_license | count = input("hi")
print (count)
characterCount = 0
wordCount = 1
for cap in count:
if cap ==' ':
wordCount = wordCount +1
characterCount = characterCount +1
print (cap)
print (characterCount)
print (wordCount) | true |
d8dc915167c15375e7f68f2c31665069b0d2d0e8 | Python | python-practice-b02-006/gromov | /lab2.2/ex3.py | UTF-8 | 403 | 3.21875 | 3 | [] | no_license | from turtle import *
from numpy import *
def addnum(num:int):
inp = open('ex3_sup.txt', 'r').readlines()
s = inp[num].rstrip()
commlist = s.split(' -> ')
for command in commlist:
eval(command)
def drawindex(index:float):
for num in index:
addnum(int(num))
shape('turtle')
speed(8)
color('blue','blue')
width(3)
left(90)
index = '141700'
drawindex(index)
done()
| true |
1171336cc7181b886ec2c0c3fb4f912efc6b3009 | Python | Dhavade/Data-structure | /Stack/stack_list.py | UTF-8 | 931 | 3.796875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 21 11:33:45 2021
@author: Admin
"""
stack=[]
def push():
if len(stack)==n:
print("stack is full")
else:
element=input("enter tha element")
stack.append(element)
print(stack)
def pop_element():
if not stack:
print("stack is empty")
else:
e=stack.pop()
print("remove element",e)
print(stack)
def display():
if not stack:
print("stack is empty")
else:
print(stack)
n=int(input("limit of stack"))
while True:
print('select tha oparation 1. push 2.pop 3.display 4.quit')
choice=int(input())
if choice==1:
push()
elif choice==2:
pop_element()
elif choice==3:
display()
elif choice==4:
break
else:
print("enter tha correct oparation") | true |
fd4381e571b39bca9de867080ae0ed5b2bf402ac | Python | ShelbertT/20210513_arcgis-earth-api | /WorldTour.py | UTF-8 | 4,726 | 2.5625 | 3 | [] | no_license | import random
import time
import requests
import os
import sys
import getpass
import json
import cv2
import readkml
base_address = "http://localhost:8000/"
camera = "arcgisearth/camera"
snapshot = "arcgisearth/snapshot"
def mkdir(path):
path = path.strip()
path = path.rstrip("\\")
exist = os.path.exists(path)
if not exist:
os.makedirs(path)
else:
return
def random_location():
global x # 这里使用全局变量是为了方便写入文件名
x = (random.random() * 360) - 180 # 经度
global y
y = (random.random() * 180) - 90
while not readkml.whether_point_within((x, y), region): # 如果(x, y)不在限定区域内,它就会继续刷点,直到刷出来在里面的
x = (random.random() * 360) - 180
y = (random.random() * 180) - 90
location_format_content = location_format()
return location_format_content
def location_format():
location = f'''
{{
"position": {{
"x": {x},
"y": {y},
"z": 8000,
"spatialReference": {{
"wkid": 4326
}}
}},
"heading": 0,
"tilt": 0,
"roll": 0
}}
'''
return location.strip('\n')
def set_camera():
location = random_location()
url = base_address + camera
# print(url)
headers = {'content-Type': 'application/json'}
r = requests.put(url, location, headers=headers, verify=False)
def get_camera():
url = base_address + camera
r = requests.get(url, verify=False)
print(r.content)
return r.content
def get_snapshot():
url = base_address + snapshot
r = requests.get(url, stream=True, verify=False)
filename = f'{x},{y}'
print(filename)
path = f"{mkdirpath}\\{filename}.jpg"
with open(path, 'wb') as f:
for chunk in r:
f.write(chunk)
def check_earth():
try:
set_camera()
return
except:
print('Please install and run ArcGIS Earth first! You can download it from here:')
print('https://www.esri.com/en-us/arcgis/products/arcgis-earth/overview')
time.sleep(20)
sys.exit(0)
def check_settings():
username = getpass.getuser()
with open(f'C:\\Users\\{username}\\Documents\\ArcGISEarth\\automation\\settings.json', 'r') as f:
data = json.load(f)
if data['autoStart']:
return
else:
data['autoStart'] = True
with open(f'C:\\Users\\{username}\\Documents\\ArcGISEarth\\automation\\settings.json', 'w') as r:
json.dump(data, r)
print('Successfully updated the Auto-API settings, please restart ArcGIS Earth and this program.')
time.sleep(20)
sys.exit(0)
def manual_snapshot():
camera_raw = get_camera()
camera_dict = json.loads(camera_raw)
print(camera_dict)
global x
x = camera_dict['position']['x']
global y
y = camera_dict['position']['y']
location = location_format()
url = base_address + camera
headers = {'content-Type': 'application/json'}
r = requests.put(url, location, headers=headers, verify=False)
time.sleep(10)
get_snapshot()
if __name__ == '__main__':
region = 'Africa'
check_settings()
check_earth() # 运行前检查
i = 1
mkdirpath = f'C:\\What_a_Wonderful_World\\{region}'
mkdir(mkdirpath)
print(
'\n' + '\n' + '------------------------------' + '\n' + 'What\'s life without whimsy?' + '\n' + '------------------------------' + '\n' + '\n' + '\n')
print('Initiating......Files are stored in this location:' + '\n' + mkdirpath)
time.sleep(5)
while i < 1000:
print('\n' + 'Teleporting to the next location... No.' + str(i))
set_camera()
time.sleep(20)
get_snapshot()
i = i + 1
# mkdirpath = f'C:\\What_a_Wonderful_World\\manual'
# mkdir(mkdirpath)
# manual_snapshot()
# driver = webdriver.Chrome()
# driver.get("http://localhost:8000/sample/index.html")
#
# driver.find_element_by_xpath('//*[@id="apisPanel"]/div[2]/div').click()
# driver.find_element_by_id("inputArea").send_keys(location)
# driver.find_element_by_xpath('//*[@id="apiArgs"]/div/button[2]').click()
#
# time.sleep(3)
#
# driver.find_element_by_xpath('//*[@id="apisPanel"]/div[15]/div').click()
# driver.find_element_by_xpath('//*[@id="btnDiv"]').click()
#
# url = driver.find_element_by_xpath('//*[@id="respImg"]').get_attribute('src')
# driver.quit()
#
# r = requests.get(url)
#
# with open('test', 'wb') as f:
# f.write(r.content)
| true |
a0716135a887d942c106a4162f89a26f29a04a43 | Python | alecherryy/CS5001_Computer_Science | /Homework _4/kmeans_driver.py | UTF-8 | 5,337 | 2.828125 | 3 | [] | no_license | '''
Alessia Pizzoccheri - CS 5001 02
'''
import random
import turtle
import kmeans_viz
DATA = [
[-32.97, -21.06], [9.01, -31.63], [-20.35, 28.73], [-0.18, 26.73],
[-25.05, -9.56], [-0.13, 23.83], [19.88, -18.32], [17.49, -14.09],
[17.85, 27.17], [-30.94, -8.85], [4.81, 42.22], [-4.59, 11.18],
[9.96, -35.64], [24.72, -11.39], [14.44, -43.31], [-10.49, 33.55],
[4.24, 31.54], [-27.12, -17.34], [25.24, -12.61], [20.26, -4.7],
[-16.4, -19.22], [-15.31, -7.65], [-26.61, -20.31], [15.22, -30.33],
[-29.3, -12.42], [-50.24, -21.18], [-32.67, -13.11], [-30.47, -17.6],
[-23.25, -6.72], [23.08, -9.34], [-25.44, -6.09], [-37.91, -4.55],
[0.14, 34.76], [7.93, 49.21], [-6.76, 12.14], [-19.13, -2.24],
[12.65, -7.23], [11.25, 25.98], [-9.03, 22.77], [9.29, -26.2],
[15.83, -1.45], [-22.98, -27.37], [-25.12, -23.35], [21.12, -26.68],
[20.39, -24.66], [26.69, -28.45], [-45.42, -25.22], [-8.37, -21.09],
[11.52, -16.15], [7.43, -32.89], [-31.94, -11.86], [14.48, -10.08],
[0.63, -20.52], [9.86, 13.79], [-28.87, -17.15], [-29.67, -22.44],
[-20.94, -22.59], [11.85, -9.23], [30.86, -21.06], [-3.8, 22.54],
[-5.84, 21.71], [-7.01, 23.65], [22.5, -11.17], [-25.71, -14.13],
[-32.62, -15.93], [-7.27, 12.77], [26.57, -13.77], [9.94, 26.95],
[-22.45, -23.18], [-34.7, -5.62], [29.53, -22.88], [0.7, 31.02],
[-22.52, -10.02], [-23.36, -14.54], [-19.44, -12.94], [-0.5, 23.36],
[-45.27, -19.8], [8.95, 13.63], [47.16, -14.46], [5.57, 4.85],
[-19.03, -25.41], [28.16, -13.86], [-15.42, -14.68], [10.19, -25.08],
[0.44, 23.65], [-20.71, -20.94], [35.91, -20.07], [42.81, -21.88],
[5.1, 9.33], [-15.8, -18.47], [5.39, -26.82], [-40.53, -17.16],
[-29.54, 23.72], [7.8, 23.4], [-22.19, -27.76], [-23.48, -25.01],
[-21.2, -21.74], [23.14, -24.14], [-28.13, -13.04], [-24.38, -6.79] ]
SQUARE_ROOT = 0.5
POWER = 2
NUM_CENTROIDS = 4
DISTANCE = 100000000
COLORS = ['Purple','Red','Blue','Orange']
def initialize_centroids():
'''
Name: initialize_centroids
Parameters: None
Return: list
'''
centroids = []
# pick 4 random centroids from DATA
while len(centroids) < NUM_CENTROIDS:
point = random.choice(DATA)
# if centroid does not match existing centroids
# append to the list
if point not in centroids:
centroids.append(point)
return centroids
def euclidean(a,b):
'''
Name: euclidean
Parameters: list,list
Return: float
'''
# calculate Euclidean distance
x = ( a[0] - b[0] ) ** POWER
y = ( a[1] - b[1] ) ** POWER
tot = ( x + y )
distance = tot ** SQUARE_ROOT
return distance
def create_cluster(centroids):
'''
Name: create_cluster
Parameter: nested list
Return: nested list
'''
assignment = []
for i in range(len(DATA)):
min_distance = DISTANCE
centroid_index = None
for p in range(len(centroids)):
# calculate Euclidean distance for each DATA and centroids
distance = euclidean(DATA[i],centroids[p])
# if new distance less than the current one, update new distance
if distance < min_distance:
min_distance = distance
centroid_index = p
# create assignment pair and append it to assignment list
assignment_pairs = [i,centroid_index]
assignment.append(assignment_pairs)
return assignment
def optimize_centroids(centroids,assignment):
'''
Name; optimize_centroids
Parameter: nested list, nested list
Return: nested list
'''
new_centroids = []
for i in range(len(centroids)):
# empty lists for the x and coordinates
x_coordinates = []
y_coordinates = []
# assign each DATA pair value to respective centroid
for j in range(len(assignment)):
if assignment[j][1] == i:
data_index = assignment[j][0]
x_coordinates.append(DATA[data_index][0])
y_coordinates.append(DATA[data_index][1])
# sum total values of x and total values of y
x_tot = sum(x_coordinates)
y_tot = sum(y_coordinates)
# calculate the average value of x coordinate and y coordinate
x_average = x_tot / len(x_coordinates)
y_average = y_tot / len(y_coordinates)
# assign new coordinates to each centroid
pair = [x_average,y_average]
# append pairs to empty new centroids list
new_centroids.append(pair)
return new_centroids
def main():
# create centroids
centroids = initialize_centroids()
# create assignment list of centroids and DATA
assignment = create_cluster(centroids)
# uncomment line 141 and 142 to see initial centroids
# kmeans_viz.draw_centroids(centroids,COLORS)
# kmeans_viz.draw_assignment(centroids, DATA, assignment, COLORS)
# improve centroids' coordinates
centroids = optimize_centroids(centroids,assignment)
# update assignment based on improved centroids
assignment = create_cluster(centroids)
# draw clusters
kmeans_viz.draw_centroids(centroids,COLORS)
kmeans_viz.draw_assignment(centroids, DATA, assignment, COLORS)
main() | true |
f776988ae3ddad5fd626b971dc11d59030cefdf8 | Python | PerfectFit-project/goal_setting_virtual_coach | /actions/actions.py | UTF-8 | 26,719 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | # This files contains your custom actions which can be used to run
# custom Python code.
## if you want to run code, that would go in the actions.py file (python).
# code that you create and action, if you need action to be triggered add to stories, add to list of actions in domain file, if the action
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
from cgitb import text
from typing import Any, Text, Dict, List
from urllib import response
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet
from rasa_sdk.events import FollowupAction
from rasa_sdk.events import ConversationPaused
import pandas as pd
import numpy as np
import datetime
import time
# Moods, sorted by quadrant w.r.t. valence and arousal
moods_ha_lv = ["afraid", "alarmed", "annoyed", "distressed", "angry",
"frustrated"]
moods_la_lv = ["miserable", "depressed", "gloomy", "tense", "droopy", "sad",
"tired", "bored", "sleepy"] # sleepy actually in different quadrant
moods_la_hv = ["content", "serene", "calm", "relaxed", "tranquil"]
moods_ha_hv = ["satisfied", "pleased", "delighted", "happy", "glad",
"astonished", "aroused", "excited"]
# Extract custom data from rasa webchat
def extract_metadata_from_tracker(tracker: Tracker):
events = tracker.current_state()['events']
user_events = []
for e in events:
if e['event'] == 'user':
user_events.append(e)
return user_events[-1]['metadata']
# Answer based on mood
class ActionAnswerMood(Action):
def name(self):
return "action_answer_mood"
async def run(self, dispatcher, tracker, domain):
curr_mood = tracker.get_slot('mood')
if curr_mood == "neutral":
dispatcher.utter_message(response="utter_mood_neutral")
elif curr_mood in moods_ha_lv:
dispatcher.utter_message(response="utter_mood_negative_valence_high_arousal_quadrant")
elif curr_mood in moods_la_lv:
dispatcher.utter_message(response="utter_mood_negative_valence_low_arousal_quadrant")
elif curr_mood in moods_la_hv:
dispatcher.utter_message(response="utter_mood_positive_valence_low_arousal_quadrant")
else:
dispatcher.utter_message(response="utter_mood_positive_valence_high_arousal_quadrant")
return []
# Introduce bot
class ExplainBot(Action):
def name(self) -> Text:
return "explain_bot"
def run(self, dispatcher, tracker, domain):
dispatcher.utter_message(response="utter_bot_name")
dispatcher.utter_message(text="Physical activity has significant health benefits for hearts, bodies and minds.")
dispatcher.utter_message(text="In this session, we will focus on running or walking to stay or become active.")
dispatcher.utter_message(text="And why should you set a goal for that? Because having a goal is great to stay motivated and focused on what you want to achieve.")
dispatcher.utter_message(text="We will set a long-term running or walking goal.")
dispatcher.utter_message(response="utter_long_term")
return[]
# Introduce bot
class Overview(Action):
def name(self) -> Text:
return "overview"
def run(self, dispatcher, tracker, domain):
dispatcher.utter_message(text="Alright then, let me give you an overview of what we're going to do:")
dispatcher.utter_message(text="We're going to set a running or walking goal that you would like to achieve.")
dispatcher.utter_message(text="First, I’ll ask you to think about a running or walking goal that you want to achieve.")
dispatcher.utter_message(text="This is only to make sure that you have a first idea of the goal that you would like to set.")
dispatcher.utter_message(text="Next, I will show you some examples of other people to give you an idea of how you can refine your goal.")
dispatcher.utter_message(text="I will also ask you some details about your goal to make your goal as complete as possible.")
dispatcher.utter_message(text="And don't worry, before we finalize your goal, we will look at it together and I will give you the chance to make changes if you would like to.")
return[]
# Display goal type buttons
class GoalType(Action):
def name(self) -> Text:
return "action_goal_type"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
buttons = [
{"payload":'/running{"goal_type":"running"}', "title":"Running 🏃♀️"},
{"payload":'/walking{"goal_type":"walking"}', "title":"Walking 🚶"}
]
dispatcher.utter_message(text="Would you like to set a long-term goal for **running** or **walking**?", buttons=buttons)
return []
# Ask first goal
class FirstGoal(Action):
def name(self) -> Text:
return "ask_first_goal"
def run(self, dispatcher, tracker, domain):
goal_type = tracker.get_slot('goal_type')
dispatcher.utter_message(text="Let's think about the " + goal_type + " goal you want to achieve.")
dispatcher.utter_message(text="Don't worry, this is not your final goal yet! We will also look at some examples of other people later to help you make your goal more specific.")
dispatcher.utter_message(text="What " + goal_type + " goal would you like to achieve?")
return[]
# Respond on first goal
class GoalResponse(Action):
def name(self) -> Text:
return "goal_response"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
def contains_word(s, w):
return (' ' + w + ' ') in (' ' + s + ' ')
goal_type = tracker.get_slot('goal_type')
print(goal_type)
last_message = (tracker.latest_message)['text']
dispatcher.utter_message(text="Okay!")
if contains_word(last_message, 'step') or contains_word(last_message, 'steps'):
print("word detected step")
dispatcher.utter_message(text="It's always good to specify the amount that you want to go " + goal_type + ".")
elif contains_word(last_message, 'km') or contains_word(last_message, 'kilometers') or contains_word(last_message, 'kilometres') or contains_word(last_message, 'miles'):
print("word detected km")
dispatcher.utter_message(text="It's always good to specify the amount that you want to go " + goal_type + ".")
elif contains_word(last_message, 'every day') or contains_word(last_message, 'everyday') or contains_word(last_message, 'daily'):
print("word detected everyday")
dispatcher.utter_message(text="It's always good to specify how often you want to go " + goal_type + ".")
elif contains_word(last_message, 'marathon'):
print("word detected marathon")
dispatcher.utter_message(text="It's great to train for a marathon.")
elif contains_word(last_message, 'minutes') or contains_word(last_message, 'hour') or contains_word(last_message, 'hours'):
print("word detected duration")
dispatcher.utter_message(text="It's always good to specify the duration of your physical activity.")
elif contains_word(last_message, 'around the house') or contains_word(last_message, 'gym') or contains_word(last_message, 'grocery store') or contains_word(last_message, 'street'):
print("word detected duration")
dispatcher.utter_message(text="It's always good to specify where you want to go " + goal_type + ".")
else:
print("no word detected")
dispatcher.utter_message(text="It is good that you have thought about your " + goal_type + " goal.")
return[FollowupAction(name="first_example"), SlotSet("goal_type", goal_type)]
# Timer to measure the second example reading time
class Timer(Action):
def name(self) -> Text:
return "timer"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
end_read2 = time.perf_counter()
start_time2 = tracker.get_slot('start_time2')
read_second = int(end_read2 - start_time2)
print(read_second)
return[SlotSet("read_second", read_second), FollowupAction(name="opinion2_form")]
# First example
class FirstExample(Action):
def name(self) -> Text:
return "first_example"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
goal_type = tracker.get_slot('goal_type')
metadata = extract_metadata_from_tracker(tracker)
user_id = metadata['userid']
goals = pd.read_csv("experiment_data/goals.csv", sep=';', index_col='example')
user_examples = pd.read_csv("experiment_data/user_examples.csv", sep=';', index_col='id')
index = user_examples.loc[user_id]['example1']
intro1 = "🧍* " + goals.loc[index]['introduction']+"*"
goalhow1 = "**"+"🎯 " + goals.loc[index]['goal'] + " " + goals.loc[index]['how']+"**"
start_time1 = time.perf_counter()
dispatcher.utter_message(text="Now that you have an idea of the goal you want to achieve, let me show you two examples of people who achieved a running or walking goal.")
dispatcher.utter_message(text="Each person will first introduce themselves 🧍 and then tell you about the goal they achieved 🎯.")
dispatcher.utter_message(text="Here is how the person from the first example introduces themselves:")
dispatcher.utter_message(text=intro1)
dispatcher.utter_message(text="This person achieved the following goal:")
dispatcher.utter_message(text=goalhow1)
dispatcher.utter_message(text="Let me know when you finished reading this example!", buttons=[
{"payload":'/finished_reading', "title":"I have read the first example"}
])
print(start_time1)
return [SlotSet("start_time1", start_time1)]
# Second example
class SecondExample(Action):
def name(self) -> Text:
return "second_example"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
end_read1 = time.perf_counter()
start_time1 = tracker.get_slot('start_time1')
read_first = int(end_read1 - start_time1)
metadata = extract_metadata_from_tracker(tracker)
user_id = metadata['userid']
goals = pd.read_csv("experiment_data/goals.csv", sep=';', index_col='example')
user_examples = pd.read_csv("experiment_data/user_examples.csv", sep=';', index_col='id')
index = user_examples.loc[user_id]['example2']
intro2 = "🧍* " + goals.loc[index]['introduction']+"*"
goalhow2 = "**"+"🎯 " + goals.loc[index]['goal'] + " " + goals.loc[index]['how']+"**"
start_time2 = time.perf_counter()
dispatcher.utter_message(text="Here is how the person from the second example introduces themselves:")
dispatcher.utter_message(text=intro2)
dispatcher.utter_message(text="This person achieved the following goal:")
dispatcher.utter_message(text=goalhow2)
dispatcher.utter_message(text="Let me know when you read the example!", buttons=[
{"payload":'/finished_examples', "title":"I'm done!"}, {"payload":'/finished_examples', "title":"I read the example"}
])
print(read_first)
return [SlotSet("read_first", read_first), SlotSet("start_time2", start_time2)]
# Ask goal the user wants to achieve (more specific)
class AskGoal(Action):
def name(self) -> Text:
return "ask_goal"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Thank you for sharing that with me!")
dispatcher.utter_message(text="Now that you've seen the examples, maybe you have a better idea about what you want to accomplish.")
dispatcher.utter_message(response="utter_remind_goal")
dispatcher.utter_message(text="Please review your goal and try to make it as specific as possible.")
dispatcher.utter_message(text="Making your goal specific is important, because it helps with understanding what exactly you want to achieve.")
dispatcher.utter_message(text="*Make sure to write down your whole goal again, even if it's the same goal.*")
return[FollowupAction(name="goal_form")]
# Introduction to importance questions
class IntroImportance(Action):
def name(self) -> Text:
return "intro_importance"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Alright!")
dispatcher.utter_message(text="You have thought about how to make your goal more specific.")
dispatcher.utter_message(text="It is also important to set goals that are relevant to you.")
dispatcher.utter_message(text="To think more about that, let's now look at why this goal is important to you")
return[FollowupAction(name="utter_ask_importance")]
# Introduction to achievability question
class IntroAchievable(Action):
def name(self) -> Text:
return "intro_achievable"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="Thank you for telling me!")
dispatcher.utter_message(text="It's great that you thought about why this goal is important to you.")
dispatcher.utter_message(text="We also want to think about whether your goal is achievable or not, as we want to be sure that you have the time and resources to achieve your goal.")
dispatcher.utter_message(text="Let's think about how achievable your goal is.")
return[FollowupAction(name="utter_ask_achievable")]
# Answer based on chosen achievability goal
class Achievability(Action):
def name(self) -> Text:
return "achievability"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
achievability = tracker.get_slot('achievable')
if achievability == "1" or achievability == "2":
print("1 or 2")
dispatcher.utter_message(text="Hmm, if you think your goal is too difficult, it is okay to start with an easier goal.")
dispatcher.utter_message(text="Your goal should be realistic to achieve, but challenging enough to keep you motivated.")
buttons = [
{"payload":'/change_goal_achievability', "title":"I want to make my goal more achievable"},
{"payload":'/set_deadline', "title":"I want to keep the goal that I set, I can do this"}
]
dispatcher.utter_message(text="Do you want to change your goal, or are you up for the challenge?", buttons=buttons)
elif achievability == "6" or achievability == "7":
print("6 or 7")
dispatcher.utter_message(text="Hmm, it seems like you can easily achieve your goal.")
buttons = [
{"payload":'/change_goal_achievability', "title":"I want to make my goal more challenging"},
{"payload":'/set_deadline', "title":"I want to keep the goal I set, it is challenging enough"}
]
dispatcher.utter_message(text="What do you think of challenging yourself a bit more?", buttons=buttons)
else:
print("3-4-5")
buttons = [
{"payload":'/set_deadline', "title":"I agree!"},
{"payload":'/set_deadline', "title":"Yes"}
]
dispatcher.utter_message(text="That seems good.")
dispatcher.utter_message(text="Setting realistic, achievable goals increases the chance of success.", buttons=buttons)
return []
# Change goal or goal deadline
class Change(Action):
def name(self) -> Text:
return "change"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
last_message = (tracker.latest_message)['text']
if last_message == "/goal_change":
print("if goal")
dispatcher.utter_message(text="Alright, let's change it.")
dispatcher.utter_message(text="What is the new goal you would like to set?")
return [SlotSet("goal", None), FollowupAction(name="goal_form")]
elif last_message == "/deadline_change":
print("if goal deadline")
dispatcher.utter_message(text="Alright, let's change it.")
dispatcher.utter_message(text="What is the new deadline you would like to set?")
return [SlotSet("deadline", None), FollowupAction(name="deadline_form")]
elif last_message == "/change_goal_achievability":
print("if goal achievability")
dispatcher.utter_message(text="Let's do that!")
dispatcher.utter_message(text="What is the new goal you would like to set?")
return [SlotSet("achievable", None), SlotSet("goal", None), FollowupAction(name="goal_form")]
else:
print("else")
dispatcher.utter_message(text=last_message)
return []
# Answer based on given date
class CheckDate(Action):
def name(self) -> Text:
return "check_date"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
last_message = (tracker.latest_message)['text']
today = datetime.date.today()
print("Today's date:", today)
for format in ['%d-%m-%Y', '%d/%m/%Y', '%d %m %Y']:
try:
deadline = datetime.datetime.strptime(last_message, format)
print('correct format date')
break
except ValueError as e:
print('wrong format date')
dispatcher.utter_message(text="Hmm, I didn't quite get that.")
dispatcher.utter_message(text="Could you please reformulate the goal deadline you want to set?")
return[SlotSet("deadline", None), FollowupAction(name="deadline_form")]
break
difference = (deadline.date()-today).days
print(difference)
if difference < 7 and difference >= 0:
print("less than a week later")
dispatcher.utter_message(text="Hmm, this seems a bit soon. How about setting a new deadline?")
return[SlotSet("deadline", None), FollowupAction(name="deadline_form")]
elif difference < 0:
print("in the past")
dispatcher.utter_message(text="Hmm, it seems like you have set a deadline in the past!")
dispatcher.utter_message(text="Could you set a deadline in the future?")
return[SlotSet("deadline", None), FollowupAction(name="deadline_form")]
else:
print(deadline)
print(deadline.date())
deadline = tracker.get_slot('deadline')
if difference <= 14:
buttons = [
{"payload":'/confirm_deadline', "title":"That is correct"},
{"payload":'/deadline_change', "title":"No, I want to set a different deadline"}
]
dispatcher.utter_message(text="Okay!")
dispatcher.utter_message(text="So you want to achieve your goal in " + str(difference) + " days by " + str(deadline) + ", is that correct?", buttons=buttons)
return[]
elif difference > 14 and difference < 70:
weeks = int(difference / 7)
buttons = [
{"payload":'/confirm_deadline', "title":"That is correct"},
{"payload":'/deadline_change', "title":"No, I want to set a different deadline"}
]
dispatcher.utter_message(text="Okay!")
dispatcher.utter_message(text="So you want to achieve your goal in around " + str(weeks) + " weeks by " + str(deadline) + ", is that correct?", buttons=buttons)
return[]
elif difference > 70:
months = int(difference / 30)
buttons = [
{"payload":'/confirm_deadline', "title":"That is correct"},
{"payload":'/deadline_change', "title":"No, I want to set a different deadline"}
]
dispatcher.utter_message(text="Okay!")
dispatcher.utter_message(text="So you want to achieve your goal in around " + str(months) + " months by " + str(deadline) + ", is that correct?", buttons=buttons)
return[]
# Save slots to a file
class Save(Action):
def name(self) -> Text:
return "save"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
metadata = extract_metadata_from_tracker(tracker)
user_id = metadata['userid']
print(user_id)
if user_id is None:
print("User id not found")
user_id = tracker.sender_id
textfile = open("experiment_data/"+user_id+".txt", "w")
textfile.write("user_id: " + user_id + "\n")
textfile.write("mood: " + tracker.get_slot('mood') + "\n")
textfile.write("goal_type: " + tracker.get_slot('goal_type') + "\n")
textfile.write("first_goal: " + tracker.get_slot('first_goal') + "\n")
textfile.write("goal: " + tracker.get_slot('goal') + "\n")
textfile.write("importance: " + tracker.get_slot('importance') + "\n")
textfile.write("importance_why: " + tracker.get_slot('importance_why') + "\n")
textfile.write("deadline " + tracker.get_slot('deadline') + "\n")
textfile.write("takeaway: " + tracker.get_slot('opinion2') + "\n")
textfile.write("start 1: " + str(tracker.get_slot('start_time1')) + "\n")
textfile.write("reading time 1: " + str(tracker.get_slot('read_first')) + "\n")
textfile.write("start 2: " + str(tracker.get_slot('start_time2')) + "\n")
textfile.write("reading time 2: " + str(tracker.get_slot('read_second')) + "\n")
textfile.close()
return[]
# Finish the conversation
class Finish(Action):
def name(self) -> Text:
return "finish_conversation"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(response="utter_goodbye")
return[]
class End(Action):
def name(self) -> Text:
return "conversation_over"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
dispatcher.utter_message(text="*This is the end of the session. Please close this window and return to the questionnaire in Qualtrics.*")
return[ConversationPaused()]
# Check length of the user's answer to the take away question
class CheckLengthOpinion(Action):
def name(self) -> Text:
return "check_length_opinion"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
last_message = (tracker.latest_message)['text']
if (len(last_message) < 25):
print("too short opinion")
dispatcher.utter_message(text="Hmm, could you please elaborate a bit more on that?")
return[SlotSet("opinion2", None), FollowupAction(name="opinion2_form")]
else:
return[FollowupAction(name="ask_goal")]
# Check length of the goal the user has set
class CheckLengthGoal(Action):
def name(self) -> Text:
return "check_length_goal"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
last_message = (tracker.latest_message)['text']
if (len(last_message) < 25):
print("too short goal")
dispatcher.utter_message(text="Hmm, that seems a bit short.")
dispatcher.utter_message(text="Could you please elaborate a bit more on that?")
dispatcher.utter_message(text="*Please make sure to write down your whole goal, even if it's the same as before.*")
return [SlotSet("goal", None), FollowupAction(name="goal_form")]
else:
return[FollowupAction(name="intro_importance")]
# Check length of the user's answer to the importance question
class CheckLengthImportance(Action):
def name(self) -> Text:
return "check_length_importance"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
last_message = (tracker.latest_message)['text']
if (len(last_message) < 25):
print("too short importance")
dispatcher.utter_message(text="Hmm, could you please elaborate a bit more on that?")
return [SlotSet("importance_why", None), FollowupAction(name="importance_form")]
else:
return[FollowupAction(name="intro_achievable")] | true |
6544e454cf9ac82fe2f64acffa0f744f94e0d5f8 | Python | lmccalman/spacerace | /clients/zmq/state_watcher.py | UTF-8 | 2,402 | 2.6875 | 3 | [
"MIT"
] | permissive | # state_watcher.py
#
# threaded state monitoring - pushed from spacerace over zmq
import json
import argparse
import time
import threading
import zmq
# from zmq.eventloop import ioloop
# from zmq.eventloop.ioloop import ZMQIOLoop
# from zmq.eventloop.zmqstream import ZMQStream
# install PyZMQ's IOLoop
# ioloop.install()
import logging
logger = logging.getLogger(__name__)
state_lock = threading.Lock()
DEFAULTS = {
'hostname': 'localhost',
'state_port': 5556,
'control_port': 5557,
'lobby_port': 5558,
}
# Helper functions
make_address = 'tcp://{}:{}'.format
def main():
parser = argparse.ArgumentParser(
description='Spacerace: Manned Spacecraft')
parser.add_argument('--hostname', type=str, help='Server hostname',
default=DEFAULTS['hostname'])
parser.add_argument('--state_port', type=int, help='State port',
default=DEFAULTS['state_port'])
parser.add_argument('--lobby_port', type=int, help='Lobby port',
default=DEFAULTS['lobby_port'])
args = parser.parse_args()
logger.debug(args)
game_state = [{'state': 'finished'}]
t = threading.Thread(target=state_client,
args=(args.hostname, args.state_port, game_state))
t.daemon = True # die with main thread
t.start()
# Now do our own "Game Loop"
print("Original thread game loop:")
while True:
with state_lock:
state = game_state[0]
print(state)
time.sleep(1)
def state_client(hostname, state_port, game_state):
context = zmq.Context()
context.linger = 0
state_address = make_address(hostname, state_port)
# Using State socket
print('Connecting to state socket at [{0}]...'.format(state_address))
game_name = "" # I'm hoping this applies to all games
state_sock = context.socket(zmq.SUB)
print("Subscribe")
state_sock.setsockopt_string(zmq.SUBSCRIBE, game_name)
print("Connect")
state_sock.connect(state_address)
print("Recv Loop init.")
while True:
msg_filter_b, state_b = state_sock.recv_multipart()
new_game_state = game_state[0]
try:
new_game_state = json.loads(state_b.decode())
except:
continue
with state_lock:
game_state[0] = new_game_state
return
if __name__ == '__main__':
main()
| true |
c9705c48c24f2aefbd6b3922f8c1e5c1206764d9 | Python | javaTheHutts/Java-the-Hutts | /src/unittest/python/test_sa_id_book_old.py | UTF-8 | 11,119 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | """
----------------------------------------------------------------------
Authors: Jan-Justin van Tonder
----------------------------------------------------------------------
Unit tests for the SA ID book old module.
----------------------------------------------------------------------
"""
from hutts_verification.id_contexts.sa_id_book_old import SAIDBookOld
def test_get_id_info_empty_in_str():
"""
Test the case in which an empty string is passed to the get_id_info function.
"""
sa_id_book = SAIDBookOld()
assert sa_id_book.get_id_info('') == {
'identity_number': None,
'surname': None,
'names': None,
'sex': None,
'date_of_birth': None,
'country_of_birth': None,
'status': None
}
def test_get_id_info_default_skip_unnecessary():
"""
Test the get_id_info function's ability to search for relevant (pre-specified) information.
"""
sa_id_book = SAIDBookOld()
in_str = (
'Not legit\n'
'SA Burger SA Citizen'
'Ignore\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Nationality\n'
'RSA\n'
'Geboortedistrik of-land\n'
'District or Country of Birth\n'
'SOUTH AFRICA\n'
'Skip\n'
'Skip this too\n'
'Sex\n'
'M\n'
'geboortedatum 1971-01-13'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': None,
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': 'South Africa',
'status': 'Citizen',
}
def test_get_id_info_default_id_num_found():
"""
Test the case in which an ID number was found by get_id_info and whether it is used to extract other information
such as date of birth, status and sex.
"""
sa_id_book = SAIDBookOld()
in_str = (
'ID No 7101135111011\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Geboortedistrik of-land\n'
'District or Country of Birth\n'
'South Africa\n'
'geboortedatum 1971-01-13'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': 'South Africa',
'status': 'Citizen'
}
def test_get_id_info_default_id_num_not_found():
"""
Test the case in which an ID number was not found by get_id_info.
"""
sa_id_book = SAIDBookOld()
in_str = (
'Nothing to find here... 7101135111011\n'
'Not legit\n'
'Ignore\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Nationality\n'
'RSA\n'
'Geboortedistrik of-land\n'
'District or Country of Birth\n'
'South Africa\n'
'Skip\n'
'Skip this too\n'
'Status\n'
'Hungry\n'
'Sex\n'
'M\n'
'geboortedatum 1971-01-13'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': None,
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': 'South Africa',
'status': None
}
def test_get_id_info_id_in_barcode():
"""
Test the case in which an ID number was extracted from a barcode and passed to get_id_info and whether it is used to
extract other information such as date of birth, status and sex.
"""
sa_id_book = SAIDBookOld()
in_str = (
'id no 123456789\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'Jane-Michael\n'
'Robert'
)
assert sa_id_book.get_id_info(in_str, barcode_data={'identity_number': '7101134111111'}) == {
'identity_number': '7101134111111',
'surname': 'Doe',
'names': 'Jane-Michael Robert',
'sex': 'F',
'date_of_birth': '1971-01-13',
'country_of_birth': None,
'status': 'Non Citizen'
}
def test_get_id_info_multi_line_1():
"""
Test the ability of the get_id_info function to retrieve field values over multiple lines.
This case checks for a maximum of 2 lines.
"""
sa_id_book = SAIDBookOld()
in_str = (
'id no 7101135111011\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Ignore'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': None,
'status': 'Citizen'
}
def test_get_id_info_multi_line_2():
"""
Test the ability of the get_id_info function to retrieve field values over multiple lines.
This case checks if a match to multi_line_end was found.
"""
sa_id_book = SAIDBookOld()
in_str = (
'id no 7101135111011\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': None,
'status': 'Citizen'
}
def test_get_id_info_multi_line_3():
"""
Test the ability of the get_id_info function to retrieve field values over multiple lines.
This case checks how a specified multi_line field value is dealt with if the value does not exist at the end of
the in_string.
"""
sa_id_book = SAIDBookOld()
in_str = 'Forenames'
assert sa_id_book.get_id_info(in_str) == {
'identity_number': None,
'surname': None,
'names': None,
'sex': None,
'date_of_birth': None,
'country_of_birth': None,
'status': None
}
def test_get_id_info_bare_multi_line_4():
"""
Test the ability of the get_id_info function to retrieve field values over multiple lines.
This case checks how a specified multi_line field value is dealt with if the value exists, but is at the end of
the in_string.
"""
sa_id_book = SAIDBookOld()
in_str = (
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John\n'
'Robert'
)
assert sa_id_book.get_id_info(in_str, max_multi_line=4) == {
'identity_number': None,
'surname': 'Doe',
'names': 'John Robert',
'sex': None,
'date_of_birth': None,
'country_of_birth': None,
'status': None
}
def test_get_id_info_max_multi_line():
"""
Test the ability of the get_id_info function to retrieve field values over multiple lines.
This case checks if the correct number of multi_line was considered when specified.
"""
sa_id_book = SAIDBookOld()
in_str = (
'ID NO 7101135111011\n'
'VanSurname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Douglas\n'
'Ignore'
)
assert sa_id_book.get_id_info(in_str, max_multi_line=3) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert Douglas',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': None,
'status': 'Citizen'
}
def test_get_id_info_fuzzy_1():
"""
Tests to see if get_id_info is capable of retrieving field values through reasonable or commonly required fuzzy
matching to be performed.
"""
sa_id_book = SAIDBookOld()
in_str = (
'Ide nom 7101135111011\n'
'VanSuriname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Gebooortedistrikt or-sand\n'
'Distric r County o Bnth\n'
'SUID-AFRIKA\n'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': 'South Africa',
'status': 'Citizen'
}
def test_get_id_info_fuzzy_2():
"""
Tests to see if get_id_info is capable of retrieving field values through reasonable or commonly required fuzzy
matching to be performed.
"""
sa_id_book = SAIDBookOld()
in_str = (
'ed no 7101135111011\n'
'VanSuriname 00ee\n'
'Doe\n'
'vornamef0renames iii\n'
'John-Michael\n'
'Robert\n'
'Geb00rtedistrikt 0r-sand\n'
'dstrct or country 0 bnth\n'
'SUID-AFRIKA\n'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': '7101135111011',
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': '1971-01-13',
'country_of_birth': 'South Africa',
'status': 'Citizen'
}
def test_get_id_info_fuzzy_min_ratio():
"""
Tests the get_id_info function fuzzy matching with a specified minimum ratio.
"""
sa_id_book = SAIDBookOld()
in_str = (
'edn0 7101135111011\n'
'VanSuriname\n'
'Doe\n'
'VoornameForenames\n'
'John-Michael\n'
'Robert\n'
'Sex\n'
'M\n'
'Geb00rtedistrikt 0r-sand\n'
'dstrct or county 0 bnth\n'
'South Africa\n'
)
assert sa_id_book.get_id_info(in_str, fuzzy_min_ratio=90.00) == {
'identity_number': None,
'surname': 'Doe',
'names': 'John-Michael Robert',
'sex': 'M',
'date_of_birth': None,
'country_of_birth': None,
'status': None
}
def test_get_id_info_bare():
"""
Test the get_id_info function's behaviour when a field name is matched, but no field value follows and is at the end
of the in_string.
"""
sa_id_book = SAIDBookOld()
in_str = (
'VanSurname\n'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': None,
'surname': None,
'names': None,
'sex': None,
'date_of_birth': None,
'country_of_birth': None,
'status': None
}
def test_get_id_info_invalid_date_of_birth():
"""
Test the get_id_info function's behaviour when an invalid date of birth is given for formatting.
We expect it return the malformed 'date'.
"""
sa_id_book = SAIDBookOld()
in_str = (
'geboortedatum 123-0-1971\n'
'Geboortedistrik of-land\n'
'District or Country of Birth\n'
'South Africa'
)
assert sa_id_book.get_id_info(in_str) == {
'identity_number': None,
'surname': None,
'names': None,
'sex': None,
'date_of_birth': '123-0-1971',
'country_of_birth': 'South Africa',
'status': None
}
| true |
1a84c4ad7a9c5f200b98a6a76522566e957f8191 | Python | alperkesen/codecarbon | /codecarbon/core/cpu.py | UTF-8 | 12,632 | 2.609375 | 3 | [
"MIT"
] | permissive | """
Implements tracking Intel CPU Power Consumption on Mac and Windows
using Intel Power Gadget
https://software.intel.com/content/www/us/en/develop/articles/intel-power-gadget.html
"""
import os
import shutil
import subprocess
import sys
import time
import warnings
from typing import Dict, Tuple
import pandas as pd
with warnings.catch_warnings(record=True) as w:
from fuzzywuzzy import fuzz
from codecarbon.core.rapl import RAPLFile
from codecarbon.core.util import detect_cpu_model
from codecarbon.external.logger import logger
from codecarbon.input import DataSource
def is_powergadget_available():
try:
IntelPowerGadget()
return True
except Exception as e:
logger.debug(
"Not using PowerGadget, an exception occurred while instantiating"
+ f" IntelPowerGadget : {e}",
)
return False
def is_rapl_available():
try:
IntelRAPL()
return True
except Exception as e:
logger.debug(
"Not using the RAPL interface, an exception occurred while instantiating "
+ f"IntelRAPL : {e}",
)
return False
class IntelPowerGadget:
_osx_exec = "PowerLog"
_osx_exec_backup = "/Applications/Intel Power Gadget/PowerLog"
_windows_exec = "PowerLog3.0.exe"
_windows_exec_backup = "C:\\Program Files\\Intel\\Power Gadget 3.5\\PowerLog3.0.exe"
def __init__(
self,
output_dir: str = ".",
duration=1,
resolution=100,
log_file_name="intel_power_gadget_log.csv",
):
self._log_file_path = os.path.join(output_dir, log_file_name)
self._system = sys.platform.lower()
self._duration = duration
self._resolution = resolution
self._setup_cli()
def _setup_cli(self):
"""
Setup cli command to run Intel Power Gadget
"""
if self._system.startswith("win"):
if shutil.which(self._windows_exec):
self._cli = shutil.which(
self._windows_exec
) # Windows exec is a relative path
elif shutil.which(self._windows_exec_backup):
self._cli = self._windows_exec_backup
else:
raise FileNotFoundError(
f"Intel Power Gadget executable not found on {self._system}"
)
elif self._system.startswith("darwin"):
if shutil.which(self._osx_exec):
self._cli = self._osx_exec
elif shutil.which(self._osx_exec_backup):
self._cli = self._osx_exec_backup
else:
raise FileNotFoundError(
f"Intel Power Gadget executable not found on {self._system}"
)
else:
raise SystemError("Platform not supported by Intel Power Gadget")
def _log_values(self):
"""
Logs output from Intel Power Gadget command line to a file
"""
returncode = None
if self._system.startswith("win"):
returncode = subprocess.call(
[
self._cli,
"-duration",
str(self._duration),
"-resolution",
str(self._resolution),
"-file",
self._log_file_path,
],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
elif self._system.startswith("darwin"):
returncode = subprocess.call(
f"'{self._cli}' -duration {self._duration} -resolution {self._resolution} -file {self._log_file_path} > /dev/null", # noqa: E501
shell=True,
)
else:
return None
if returncode != 0:
logger.warning(
"Returncode while logging power values using "
+ f"Intel Power Gadget: {returncode}"
)
return
def get_cpu_details(self, **kwargs) -> Dict:
"""
Fetches the CPU Power Details by fetching values from a logged csv file
in _log_values function
"""
self._log_values()
cpu_details = dict()
try:
cpu_data = pd.read_csv(self._log_file_path).dropna()
for col_name in cpu_data.columns:
if col_name in ["System Time", "Elapsed Time (sec)", "RDTSC"]:
continue
if "Cumulative" in col_name:
cpu_details[col_name] = cpu_data[col_name].iloc[-1]
else:
cpu_details[col_name] = cpu_data[col_name].mean()
except Exception as e:
logger.info(
f"Unable to read Intel Power Gadget logged file at {self._log_file_path}\n \
Exception occurred {e}",
exc_info=True,
)
return cpu_details
class IntelRAPL:
def __init__(self, rapl_dir="/sys/class/powercap/intel-rapl"):
self._lin_rapl_dir = rapl_dir
self._system = sys.platform.lower()
self._rapl_files = list()
self._setup_rapl()
def _is_platform_supported(self) -> bool:
return self._system.startswith("lin")
def _setup_rapl(self):
if self._is_platform_supported():
if os.path.exists(self._lin_rapl_dir):
self._fetch_rapl_files()
else:
raise FileNotFoundError(
f"Intel RAPL files not found at {self._lin_rapl_dir} "
+ f"on {self._system}"
)
else:
raise SystemError("Platform not supported by Intel RAPL Interface")
return
def _fetch_rapl_files(self):
"""
Fetches RAPL files from the RAPL directory
"""
# consider files like `intel-rapl:$i`
files = list(filter(lambda x: ":" in x, os.listdir(self._lin_rapl_dir)))
i = 0
for file in files:
path = os.path.join(self._lin_rapl_dir, file, "name")
with open(path) as f:
name = f.read().strip()
if "package" in name:
name = f"Processor Energy Delta_{i}(kWh)"
i += 1
rapl_file = os.path.join(self._lin_rapl_dir, file, "energy_uj")
try:
# Try to read the file to be sure we can
with open(rapl_file, "r") as f:
_ = float(f.read())
self._rapl_files.append(RAPLFile(name=name, path=rapl_file))
logger.debug(f"We will read Intel RAPL files at {rapl_file}")
except PermissionError as e:
logger.error(
"Unable to read Intel RAPL files for CPU power, we will use a constant for your CPU power."
+ " Please view https://github.com/mlco2/codecarbon/issues/244"
+ f" for workarounds : {e}"
)
return
def get_cpu_details(self, delay: float, **kwargs) -> Dict:
"""
Fetches the CPU Energy Deltas by fetching values from RAPL files
"""
cpu_details = dict()
try:
list(map(lambda rapl_file: rapl_file.start(), self._rapl_files))
time.sleep(delay)
list(map(lambda rapl_file: rapl_file.end(), self._rapl_files))
for rapl_file in self._rapl_files:
cpu_details[rapl_file.name] = rapl_file.energy_delta.kWh
except Exception as e:
logger.info(
f"Unable to read Intel RAPL files at {self._rapl_files}\n \
Exception occurred {e}",
exc_info=True,
)
return cpu_details
class TDP:
def __init__(self):
self.model, self.tdp = self._main()
@staticmethod
def _get_cpu_constant_power(match: str, cpu_power_df: pd.DataFrame) -> int:
"""Extract constant power from matched CPU"""
return cpu_power_df[cpu_power_df["Name"] == match]["TDP"].values[0]
def _get_cpu_power_from_registry(self, cpu_model_raw: str) -> int:
cpu_power_df = DataSource().get_cpu_power_data()
cpu_matching = self._get_matching_cpu(cpu_model_raw, cpu_power_df)
if cpu_matching:
power = self._get_cpu_constant_power(cpu_matching, cpu_power_df)
return power
else:
return None
@staticmethod
def _get_cpus(cpu_df, cpu_idxs) -> list:
return [cpu_df["Name"][idx] for idx in cpu_idxs]
@staticmethod
def _get_direct_matches(moodel: str, cpu_df: pd.DataFrame) -> list:
model_l = moodel.lower()
return [fuzz.ratio(model_l, cpu.lower()) for cpu in cpu_df["Name"]]
@staticmethod
def _get_token_set_matches(model: str, cpu_df: pd.DataFrame) -> list:
return [fuzz.token_set_ratio(model, cpu) for cpu in cpu_df["Name"]]
@staticmethod
def _get_single_direct_match(
ratios: list, max_ratio: int, cpu_df: pd.DataFrame
) -> str:
idx = ratios.index(max_ratio)
cpu_matched = cpu_df["Name"].iloc[idx]
return cpu_matched
def _get_matching_cpu(
self, model_raw: str, cpu_df: pd.DataFrame, greedy=False
) -> str:
"""
Get matching cpu name
:args:
model_raw (str): raw name of the cpu model detected on the machine
cpu_df (DataFrame): table containing cpu models along their tdp
greedy (default False): if multiple cpu models match with an equal
ratio of similarity, greedy (True) selects the first model,
following the order of the cpu list provided, while non-greedy
returns None.
:return: name of the matching cpu model
:notes:
Thanks to the greedy mode, even though the match could be a model
with a tdp very different from the actual tdp of current cpu, it
still enables the relative comparison of models emissions running
on the same machine.
THRESHOLD_DIRECT defines the similarity ratio value to consider
almost-exact matches.
THRESHOLD_TOKEN_SET defines the similarity ratio value to consider
token_set matches (for more detail see fuzz.token_set_ratio).
"""
THRESHOLD_DIRECT = 100
THRESHOLD_TOKEN_SET = 100
ratios_direct = self._get_direct_matches(model_raw, cpu_df)
ratios_token_set = self._get_token_set_matches(model_raw, cpu_df)
max_ratio_direct = max(ratios_direct)
max_ratio_token_set = max(ratios_token_set)
# Check if a direct match exists
if max_ratio_direct >= THRESHOLD_DIRECT:
cpu_matched = self._get_single_direct_match(
ratios_direct, max_ratio_direct, cpu_df
)
return cpu_matched
# Check if an indirect match exists
if max_ratio_token_set < THRESHOLD_TOKEN_SET:
return None
else:
cpu_idxs = self._get_max_idxs(ratios_token_set, max_ratio_token_set)
cpu_machings = self._get_cpus(cpu_df, cpu_idxs)
if (cpu_machings and len(cpu_machings) == 1) or greedy:
cpu_matched = cpu_machings[0]
return cpu_matched
else:
return None
@staticmethod
def _get_max_idxs(ratios: list, max_ratio: int) -> list:
return [idx for idx, ratio in enumerate(ratios) if ratio == max_ratio]
def _main(self) -> Tuple[str, int]:
"""
Get CPU power from constant mode
:return: model name (str), power in Watt (int)
"""
cpu_model_detected = detect_cpu_model()
if cpu_model_detected:
power = self._get_cpu_power_from_registry(cpu_model_detected)
if power:
logger.debug(
f"CPU : We detect a {cpu_model_detected} with a TDP of {power} W"
)
return cpu_model_detected, power
else:
logger.warning(
f"We saw that you have a {cpu_model_detected} but we don't know it."
+ " Please contact us."
)
return cpu_model_detected, None
else:
logger.warning(
"We were unable to detect your CPU using the `cpuinfo` package."
+ " Resorting to a default power consumption of 85W."
)
return "Unknown", None
| true |
88b07716aaf6a2c040bed53442dca199ef0b285c | Python | meatware/FixedTermDepositGrapher | /ftd_main/models.py | UTF-8 | 1,614 | 2.6875 | 3 | [
"MIT"
] | permissive | from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin, LoginManager
db = SQLAlchemy()
login = LoginManager()
login.login_view = 'login'
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
__tablename__ = "user"
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(256))
deposits = db.relationship('FixedDeposit', backref='author', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
class FixedDeposit(db.Model):
__tablename__ = "deposits"
id = db.Column(db.Integer, primary_key=True) #TODO: change to acc_no?
ac_no = db.Column(db.String(255))
start_date = db.Column(db.DateTime) #db.DateTime, index=True)
end_date = db.Column(db.DateTime) #, index=True)
interest_rate = db.Column(db.Float)
interest_scheme = db.Column(db.String(255))
period = db.Column(db.Integer)
initial_deposit = db.Column(db.Float)
final_deposit = db.Column(db.Float)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return "<FixedDeposit: {}>".format(self.ac_no + str(self.user_id))
| true |
d38c869b8781053037287d60008a91d414236bee | Python | konflic/python_qa_test_data | /examples/1_txt_reader.py | UTF-8 | 441 | 3.25 | 3 | [] | no_license | from files import TXT_FILE_PATH
some_file = open(TXT_FILE_PATH, "r")
# Read the exact bites amount
print(some_file.read(7))
# Read a single line
print(some_file.readline())
# Get all lines as list
print(some_file.readlines(), "\n")
# Read from current cursor position till the end
print(some_file.read())
# Position cursor within the file
some_file.seek(0)
# To open it as writable use r+
# some_file.write("test")
some_file.close()
| true |
4af37495b47067211210dd088f1bda225f4d77f8 | Python | scaraclette/pycode | /interview_practice/APIHackerrank/practice1.py | UTF-8 | 1,808 | 2.953125 | 3 | [] | no_license | import requests, pprint
def getTotalPages(team, year):
link = 'https://jsonmock.hackerrank.com/api/football_matches?year=' + str(year) + '&team1=' + team + '&page=1'
req = requests.get(link).json()
totalPages = req.get('total_pages')
print('TOTAL PAGES:', totalPages)
return totalPages
def getTotalGoals(team, year):
totalPages = getTotalPages(team, year)
otherTeam = set([])
currentGoal = 0
for i in range(1, totalPages+1):
print('I:', i)
link = 'https://jsonmock.hackerrank.com/api/football_matches?year=' + str(year) + '&team1=' + team + '&page=' + str(i)
req = requests.get(link).json()
# pprint.pprint(req)
data = req.get('data')
for j in range(len(data)):
currentData = data[j]
team2 = currentData.get('team2')
homeGoal = currentData.get('team1goals')
otherTeam.add(team2)
currentGoal += int(homeGoal)
print('current teams:', otherTeam)
print('current goal:', currentGoal)
for other in otherTeam:
totalPages = getTotalPages(other, year)
print('otherTeam:', other, ' totalPages:', totalPages)
for i in range(1, totalPages+1):
link = 'https://jsonmock.hackerrank.com/api/football_matches?year=' + str(year) + '&team1=' + other + '&page=' + str(i)
req = requests.get(link).json()
data = req.get('data')
for j in range(len(data)):
currentData = data[j]
team2 = currentData.get('team2')
if (team2 == team):
team2Goals = currentData.get('team2goals')
currentGoal += int(team2Goals)
return currentGoal
totalGoals = getTotalGoals('Barcelona', 2011)
print(totalGoals) | true |
fca21f66c8384845f940fc19957900e92d92f7ba | Python | tawrahim/Taaye_Kahinde | /Tawheed/chp6/flav.py | UTF-8 | 112 | 2.671875 | 3 | [] | no_license | import easygui
disney = int(easygui.enterbox("What is your number "))
addme = disney + 20
easygui.msgbox(addme)
| true |
ebb7e636e6b821a3c0264479046de6682a6f15ca | Python | wilkinsonlab/m-oryzae-polya | /info_content_motif.py | UTF-8 | 1,586 | 2.640625 | 3 | [] | no_license | import sys, re, numpy, math
from Bio import SeqIO
g = open(sys.argv[1], 'r')
f = open(sys.argv[2], 'r')
m = sys.argv[3]
length = int(sys.argv[4])
m = m.replace('R', '[GA]').replace('Y', '[TC]').replace('S', '[GC]').replace('W', '[TA]').replace('K', '[GT]').replace('M', '[AC]').replace('D', '[GTA]').replace('H', '[TAC]').replace('B', '[GTC]').replace('V', '[GAC]').replace('N', '[ATGC]')
A_genome = 0
T_genome = 0
C_genome = 0
G_genome = 0
N_genome = 0
for seq_record in SeqIO.parse(g, 'fasta'):
A_genome += str(seq_record.seq).upper().count('A')
T_genome += str(seq_record.seq).upper().count('T')
C_genome += str(seq_record.seq).upper().count('C')
G_genome += str(seq_record.seq).upper().count('G')
tot = float(A_genome + T_genome + C_genome + G_genome)
A_genome = A_genome / tot
T_genome = T_genome / tot
C_genome = C_genome / tot
G_genome = G_genome / tot
genome = (A_genome, C_genome, G_genome, T_genome)
lines = 0.0
count = []
for x in range(length): count.append([1,1,1,1])
count = numpy.array(count)
for line in f:
l = line.strip().upper()
find = re.findall(m, l)
if len(find) > 0:
for i, x in enumerate(find[-1]):
if x == 'A':
count[i][0] += 1
elif x == 'C':
count[i][1] += 1
elif x == 'G':
count[i][2] += 1
elif x == 'T':
count[i][3] += 1
lines += 1
count = count / lines
count /= count.sum(axis=1)[:,numpy.newaxis]
info = 0
for i, l in enumerate(count):
for k, n in enumerate(l):
info += n * math.log(n / genome[k], 2)
print info | true |
0c27014186243c1f8b9c372e1da00d043bd14cdd | Python | xulu199705/LeetCode | /leetcode_1252.py | UTF-8 | 538 | 2.96875 | 3 | [
"MIT"
] | permissive | from typing import List
class Solution:
def oddCells(self, n: int, m: int, indices: List[List[int]]) -> int:
matrix = [[0 for i in range(m)] for i in range(n)]
for i in range(len(indices)):
for j in range(n):
matrix[j][indices[i][1]] += 1
for j in range(m):
matrix[indices[i][0]][j] += 1
count = 0
for i in range(n):
for j in range(m):
if matrix[i][j] % 2 != 0:
count += 1
return count
| true |
d652e228edd90daccda256d167331deeb9a5aeed | Python | blepfo/RL-MDPs | /policy_approximators.py | UTF-8 | 4,882 | 3.203125 | 3 | [] | no_license | """ Approximates an RL policy from sample episodes. """
import itertools as it
from collections import deque
from typing import Deque, Dict, Tuple
import numpy as np
from _types import Action, History, Position, State, Policy
from utils import policy_shape
def naive_approx(states: np.ndarray, actions: np.ndarray, rewards: np.ndarray,
history_length: int) -> Policy:
"""Approximates a policy as a Monte-Carlo estimate of the action taken from each history."""
time_horizon = actions.shape[1]
n_samples = actions.shape[0]
assert rewards.shape == actions.shape
assert states.shape[0] == n_samples
assert states.shape[1] - 1 == time_horizon
state_size: int = max(states)
action_size: int = max(actions)
history_counts: Dict[History, int] = {}
history_action_counts: Dict[Tuple[History, Action], int] = {}
history_deque: Deque[State] = deque(
(0, ) * history_length, maxlen=history_length)
for i in range(n_samples):
trajectory = states[i, :]
for time, state in it.islice(
enumerate(trajectory), 0, time_horizon - 1):
history_deque.appendleft(state)
history = np.asarray(history_deque)
history_counts[history] = history_counts.get(history, 0) + 1
context = (history, actions[i, time])
history_action_counts[context] = history_action_counts.get(
context, 0) + 1
naive_probabilities: Policy = np.ndarray(
shape=policy_shape(state_size, action_size, history_length))
for (history, action), count in history_action_counts.items():
naive_probabilities[history, action] = float(count) / float(
history_counts[history])
return naive_probabilities
def sc_probability(history: History, action: Action, gamma: float,
positional_state_counts: dict,
positional_state_action_counts: dict,
history_length: int) -> float:
"""Gets the probability estimate of an action given a specific history.
"""
probability = 0
for tau in range(history_length):
if positional_state_counts.get((history[tau], tau), 0.0) > 0.0:
probability += (gamma**tau) * \
positional_state_action_counts.get((history[tau], tau, action), 0.0) \
/ positional_state_counts.get((history[tau], tau), 0.0)
return probability * (1.0 - gamma) / (1.0 - gamma**(history_length))
def sparsity_corrected_approx(states: np.ndarray, actions: np.ndarray,
rewards: np.ndarray, gamma: float,
lmdp) -> Policy:
""" Approximates a policy using the sparsity corrected method.
"""
time_horizon = actions.shape[1]
n_samples = actions.shape[0]
history_length = lmdp.history_length
state_size = lmdp.state_size
action_size = lmdp.action_size
assert rewards.shape == actions.shape
assert states.shape[0] == n_samples
assert states.shape[1] - 1 == time_horizon
positional_state_counts: Dict[Tuple[State, Position], int] = {}
positional_state_action_counts: Dict[Tuple[State, Position, Action],
int] = {}
history_deque: Deque[State] = deque(
(0, ) * history_length, maxlen=history_length)
for i in range(n_samples):
trajectory = states[i, :]
for time, state in it.islice(
enumerate(trajectory), 0, time_horizon - 1):
history_deque.appendleft(state)
for k, state_in_history in enumerate(history_deque):
state_context: Tuple[State, Position] = (state_in_history, k)
positional_state_counts[
state_context] = positional_state_counts.get(
state_context, 0) + 1
state_action_context: Tuple[State, Position, Action] = (
state_in_history, k, actions[i, time])
positional_state_action_counts[state_action_context] = \
positional_state_action_counts.get(
state_action_context, 0) + 1
history_sizes = it.repeat(list(range(state_size + 1)), history_length)
history_action_sizes = it.chain(history_sizes, [list(range(action_size))])
history_actions = it.product(*history_action_sizes)
sc_history_action_probabilities = np.ndarray(
shape=policy_shape(state_size, action_size, history_length))
for history_action in history_actions:
history = np.asarray(history_action[:-1])
action = history_action[-1]
sc_history_action_probabilities[history_action] = \
sc_probability(history, action, gamma, positional_state_counts,
positional_state_action_counts, history_length)
return sc_history_action_probabilities
| true |
0993ce7e889d6e89ea60fafa64150c63328fb5c2 | Python | xtracthub/xtract-maps | /border_extraction.py | UTF-8 | 7,270 | 3.234375 | 3 | [] | no_license | import numpy as np
import cv2
import logging
from contouring import pre_process_image
from coordinate_extraction import pixel_to_coords_map, coord_to_abs
# Setup for the debug logger
logging.basicConfig(format='%(asctime)s - %(filename)s - %(funcName)s - %('
'message)s', level=logging.DEBUG,
filename='debug.txt', filemode='w')
logger = logging.getLogger('logger')
def valid_contour(contour, img_dim, min_fraction=0.001, max_fraction=0.7):
"""Checks if contour is a valid shape in the map by checking its
area.
Parameters:
contour (list(tuple)): List of boundary points for a contour.
img_dim (tuple): 2-tuple of image dimensions.
min_fraction (float): Minimum fraction of image dimensions that
contour's area has to be greater than.
max_fraction (float): Maximum fraction of image dimensions that
contour's area has to be less than.
"""
height, width = img_dim[:2]
img_area = float(height * width)
min_area, max_area = min_fraction * img_area, max_fraction * img_area
area = cv2.contourArea(contour)
if area < min_area or area > max_area:
return False
return True
def contour_approximation(contour, approximation=0.01):
"""Returns an approximate polygon/contour for the contour given.
contour (list(tuple)): A list of boundary points for a contour.
approximation (float): Error distance between approximation and
original.
Return:
approx (numpy array): Array of points for contour approximation.
"""
if not approximation:
return contour
epsilon = approximation * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
return approx
def extract_borders(img, path_given=False, approximation=None, close=True,
min_fraction=0.001, max_fraction=0.7, debug=False):
"""Given an image, extracts contours from it, and returns those
contours that fit the area bound criteria provided.
Parameters:
img (OpenCV image or file path): OpenCV image or file path to
extract borders from.
path_given (bool): Whether a file path is given in img.
approximation (float): Error distance between approximation and
original contour.
close (bool): Whether to close holes and imperfections in img.
min_fraction (float): Minimum fraction of img that
contour's area has to be greater than.
max_fraction (float): Maximum fraction of img that
contour's area has to be less than.
Return:
(list): List of approximated contours that fit the area bound
criteria.
"""
cv_img = cv2.imread(img) if path_given else np.array(img)
cv_img = pre_process_image(cv_img, close=close, whiten=False)
if debug:
cv2.imwrite('pre_processed_image.jpg', cv_img)
logger.debug('Written pre_processed_image.jpg')
ret, thresh = cv2.threshold(cv_img, 127, 255, 0)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
filtered_contours = [
contour_approximation(x, approximation) for x in contours
if valid_contour(x, cv_img.shape, min_fraction, max_fraction)
]
return filtered_contours
def borders_to_coordinates(borders, pixel_to_coords, absolute=False):
"""Maps a list of contours to a list of coordinate borders.
Parameters:
borders (list): List of contours.
pixel_to_coords (func): Function that maps pixel coordinates to
latitudes and longitudes.
absolute (bool): Returns positive or negative floats if true,
returns a value-direction tuple if false.
Return:
(numpy array): Numpy array of latitudes and longitudes of each
border.
"""
coord_borders = list()
for border in borders:
# OpenCV contours have an extra layer to get to each x,y pair
border = np.array([p[0] for p in border])
coord_border = [pixel_to_coords(x, y) for x, y in border]
if absolute:
coord_border = [
(coord_to_abs(x[0], x[1], flip_lon=False, to_int=False),
coord_to_abs(y[0], y[1], flip_lon=False, to_int=False))
for x, y in coord_border
]
coord_borders.append(coord_border)
return np.array(coord_borders)
def extract_borders_with_coordinates(img, path_given=False, approximation=None,
absolute=False, debug=False):
"""Given an image, generates a pixel_to_coords map using the
coordinate_extraction module, then extracts pixel contours from the
image, and then maps these contours to coordinate borders.
Note: The original image is left unchanged.
Parameters:
img (OpenCV image or file path): OpenCV image or file path to
extract borders from.
path_given (bool): Whether a file path is given in img.
approximation (float): Error distance between approximation and
original contour.
absolute (bool): Returns positive or negative floats if true,
returns a value-direction tuple if false.
Return:
(numpy array): Numpy array of latitudes and longitudes of each
border.
"""
cv_img = cv2.imread(img) if path_given else np.array(img)
try:
pixel_to_coords = pixel_to_coords_map(cv_img, path_given=False,
debug=debug)
if debug:
logger.debug('Generated pixel_to_coords map')
borders = extract_borders(cv_img, path_given=False,
approximation=approximation, debug=debug)
return borders_to_coordinates(borders, pixel_to_coords,
absolute=absolute)
except RuntimeError as e:
logger.error(exc_info=True)
return None
# Test code, please ignore
# if __name__ == '__main__':
# images = [
# # 'pub8_images/CAIBOX_2009_map.jpg',
# # 'pub8_images/GOMECC2_map.jpg',
# # 'pub8_images/EQNX_2015_map.jpg',
# # 'pub8_images/Marion_Dufresne_map_1991_1993.jpg',
# # 'pub8_images/P16S_2014_map.jpg',
# 'Oscar_Dyson_map.jpg'
# # 'pub8_images/Bigelow2015_map.jpg',
# #'testpic.png'
# # 'pub8_images/woce_a25.gif',
# # 'pub8_images_2/pub8.oceans.save.SAVE.jpg'
# ]
#
# for image in images:
# cv_img = cv2.imread(image)
# borders = extract_borders_with_coordinates(cv_img, path_given=False,
# approximation=0.01,
# debug=False, absolute=True)
# print('For image', image, '\n')
# for i, border in enumerate(borders):
# print('Border', i)
# print(border)
#
# for image in images:
# cv_img = cv2.imread(image)
# borders = extract_borders(cv_img, approximation=0.01)
# cv_img = cv2.drawContours(cv_img, borders, -1, (0,0,255), 4)
# img = Image.fromarray(cv_img)
# img.show()
| true |
2dcde9920fc8ffa7f8e6cebe0d3a83fe48fd7b3d | Python | vtt-project/VTT_vid_emotion | /src/loss_def.py | UTF-8 | 1,209 | 2.984375 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
class emo_loss(nn.Module):
def __init__(self, loss_lambda):
super(type(self), self).__init__()
# balancing term
self.loss_lambda = loss_lambda
# bce loss obj
self.bce_loss = nn.BCEWithLogitsLoss()
def loss_neutral(self, e0, e1):
# loss term only for neutral emotion
# based on cross entropy
# e0-gt, e1-estimate where shape=(batch_size, 7)
# convert to neutral / non-neutral labels
e0_n = torch.cat((e0[:,[-1]], e0[:,:-1].sum(dim=1,keepdim=True)) ,dim=1) # gt label
e1_n = torch.cat((e1[:,[-1]], e1[:,:-1].max(dim=1,keepdim=True)[0]),dim=1) # predicted label
# cross-entropy loss
loss = self.bce_loss(e1_n, e0_n)
return loss
def loss_emotion(self, e0, e1):
# loss term for all emotions
# based on cross entropy
# cross-entropy loss
loss = self.bce_loss(e1, e0)
return loss
def forward(self, e0, e1):
# total combined loss with balancing term
loss = self.loss_lambda*self.loss_neutral(e0,e1) + (1.-self.loss_lambda)*self.loss_emotion(e0,e1)
return loss
| true |
3e3a7c9bfe649f292a57c5d924f5cc7c2dd2725c | Python | aanimesh23/ReturnToSleep | /src/checkers-python/StudentAI.py | UTF-8 | 5,438 | 3.125 | 3 | [] | no_license | from random import randint
from BoardClasses import Move
from BoardClasses import Board
#The following part should be completed by students.
#Students can modify anything except the class name and exisiting functions and varibles.
class StudentAI():
def __init__(self,col,row,p):
self.col = col
self.row = row
self.p = p
self.board = Board(col,row,p)
self.board.initialize_game()
self.color = ''
self.opponent = {1:2,2:1}
self.color = 2
self.f = open("debug.txt", "w")
def heuristic_black(self):
count_b = 0
count_w = 0
for i in range(int(len(self.board.board) / 2)):
for j in range(len(self.board.board[i])):
if self.board.board[i][j].color == 1:
if self.board.board[i][j].is_king == True:
count_b += 10
else:
count_b += 5
else:
if self.board.board[i][j].is_king == True:
count_w += 10
else:
count_w += 7
for i in range(int(len(self.board.board) / 2), len(self.board.board)):
for j in range(len(self.board.board[i])):
if self.board.board[i][j].color == 1:
if self.board.board[i][j].is_king == True:
count_b += 10
else:
count_b += 7
else:
if self.board.board[i][j].is_king == True:
count_w += 10
else:
count_w += 5
# for i in self.board.board:
# for j in i:
#
# if j.color == 1:
# if j.is_king == True:
# count_b += 7 + self.row
# else:
# count_b += 5 + (self.row - j.row)
# elif j.color == 2:
# if j.is_king == True:
# count_w += 7 + self.row
# else:
# count_w += 5 + j.row
return count_b - count_w
def get_move(self,move):
if len(move) != 0:
self.board.make_move(move,self.opponent[self.color])
else:
self.color = 1
moves = self.board.get_all_possible_moves(self.color)
self.f.write("Curr Moves: " + str(moves) + '\n')
if len(moves) == 1 and len(moves[0]) == 1:
move = moves[0][0]
self.board.make_move(move,self.color)
return move
move = self.minimax(moves)
self.f.write("Chosen Move: " + str(move) + '\n')
# index = randint(0,len(moves)-1)
# inner_index = randint(0,len(moves[index])-1)
# move = moves[index][inner_index]
self.board.make_move(move,self.color)
return move
def minimax(self, moves):
dic_l1 = dict()
for peice in range(len(moves)):
for i in range(len(moves[peice])):
move = moves[peice][i]
self.board.make_move(move,self.color)
if self.board.is_win(self.color) == self.color:
self.board.undo()
return moves[peice][i]
l2_moves = self.board.get_all_possible_moves(self.opponent[self.color])
# print("Opponent Moves: \n peice: ",peice, "\n dir: ",i, "\nMoves\n", l2_moves)
dic_l2 = dict()
for opp_peice in range(len(l2_moves)):
for j in range(len(l2_moves[opp_peice])):
move = l2_moves[opp_peice][j]
self.board.make_move(move,self.opponent[self.color])
l3_moves = self.board.get_all_possible_moves(self.color)
dic_l3 = dict()
# print("L3 ",l3_moves)
for my_peice in range(len(l3_moves)):
flag = 0
for k in range(len(l3_moves[my_peice])):
move = l3_moves[my_peice][k]
self.board.make_move(move,self.color)
value = -1
if self.color == 1:
value = (self.board.black_count / (self.board.black_count + self.board.white_count)) * 100
else:
value = (self.board.white_count / (self.board.black_count + self.board.white_count)) * 100
key = str(my_peice) + ' ' + str(k)
# print(key, ' ', value)
dic_l3[key] = value
self.board.undo()
if self.board.is_win(self.color) == self.color:
flag = 1
break
if flag == 1:
break
if len(dic_l3) == 0:
key = str(opp_peice) + ' ' + str(j)
dic_l2[key] = int(0x40000)
self.board.undo()
else:
inverse = [(value, key) for key, value in dic_l3.items()]
l2_value = max(inverse)[0]
key = str(opp_peice) + ' ' + str(j)
dic_l2[key] = l2_value
self.board.undo()
if len(dic_l2) == 0:
key = str(peice) + ' ' + str(i)
dic_l1[key] = int(-0x40000)
self.board.undo()
else:
inverse = [(value, key) for key, value in dic_l2.items()]
l1_value = min(inverse)[0]
key = str(peice) + ' ' + str(i)
dic_l1[key] = l1_value
self.board.undo()
inverse = [(value, key) for key, value in dic_l1.items()]
l0_value = max(inverse)[1]
# print(dic_l1)
# print(l0_value)
x,y = l0_value.split(' ')
return moves[int(x)][int(y)]
| true |
f9cbeaa6d4481b9599feb947224eab534ff3bdd3 | Python | Rosovskyy/ellipticCurvePoints | /ellipticCurve.py | UTF-8 | 3,391 | 3.640625 | 4 | [] | no_license | class Helper:
@staticmethod
def modularSqrt(s, mod):
if Helper.multiplicative(s, mod) != 1 or s == 0:
return 0
part, number = mod - 1, 0
while part % 2 == 0:
part /= 2
number += 1
nWithLegend = 2
while Helper.multiplicative(nWithLegend, mod) != -1:
nWithLegend += 1
guess = pow(s, (int(s) + 1) // 2, mod)
fudgeFac = pow(s, int(s), mod)
power = pow(nWithLegend, int(s), mod)
exponent = number
while True:
t, m = fudgeFac, 0
for m in range(exponent):
if t == 1:
break
t = pow(t, 2, mod)
if m == 0:
return guess
gs = pow(power, 2 ** (exponent - m - 1), mod)
g = (gs * gs) % mod
guess = (guess * gs) % mod
fudgeFac = (power * g) % mod
exponent = m
@staticmethod
def multiplicative(a, p): # legend symbol
ls = pow(a, (p - 1) // 2, p)
return ls if ls != p - 1 else -1
@staticmethod
def modOfValue(a, m): # mod of inverse value
g, x, y = Helper.egcd(a % m, m)
if g != 1:
print("x is not on the curve.")
return
else:
return x % m
@staticmethod
def egcd(a, b): # Euclidian algo
if a == 0:
return b, 0, 1
else:
g, y, x = Helper.egcd(b % a, a)
return g, x - (b // a) * y, y
class EllipticCurve:
def __init__(self, a, b, mod):
self.a = a
self.b = b
self.mod = mod
class PointOnTheEllipticCurve:
def __init__(self, x, y, curve):
self.x = x
self.y = y
self.curve = curve
self.prepareData()
def prepareData(self):
self.y1 = Helper.modularSqrt((self.x ** 3 + self.curve.a * self.x + self.curve.b) % self.curve.mod,
self.curve.mod)
if self.y1 == 0:
raise Exception("No solution for y1 on curve.")
def addition(self, point):
z = (point.x ** 3 + self.curve.a * point.x + self.curve.b) % self.curve.mod
y2 = Helper.modularSqrt(z, self.curve.mod)
if y2 != 0 and self.x != point.x:
s = (point.y - self.y) * Helper.modOfValue(point.x - self.x, self.curve.mod)
x3 = (s ** 2 - point.x - self.x) % self.curve.mod
y3 = (s * (point.x - x3) - point.y) % self.curve.mod
return PointOnTheEllipticCurve(x3, y3, self.curve)
if y2 != 0 and self.x == point.x:
s = ((3 * self.x ** 2) + self.curve.a) * Helper.modOfValue(2 * self.y, self.curve.mod)
x2 = (s ** 2 - 2 * self.x) % self.curve.mod
y2 = ((s * (self.x - x2)) - self.y) % self.curve.mod
return PointOnTheEllipticCurve(x2, y2, self.curve)
def multiplication(self, k):
point = self
for _ in range(k):
point = self.addition(self)
return point
def main():
curve = EllipticCurve(0, 7, 37)
point1 = PointOnTheEllipticCurve(6, 1, curve)
point2 = PointOnTheEllipticCurve(3, 16, curve)
pointAdd = point1.addition(point2)
print("x: {}, y: {}".format(pointAdd.x, pointAdd.y))
pointMull = point1.multiplication(7)
print("x: {}, y: {}".format(pointMull.x, pointMull.y))
main()
| true |
2bb30a2628e7ff3e3f5053185a9eae5a5d80e998 | Python | mdyousuf77/python | /fibonacci.py | UTF-8 | 410 | 4 | 4 | [] | no_license | #program to display fibonacci series upto nth term
n=int(input('enter the limit:')) #accepting the limit from user
n1,n2=0,1 #first two terms
i=0
if n<=0: #checking if the number is valid
print("enter a positive integer")
elif n==1:
print("fibonacci series:",n1)
else:
print("fibonacci series:")
while i<n:
print(n1)
n3=n1+n2
n1=n2 #updating the values
n2=n3
i+=1
| true |
9c552b5a57047f180ab0ad5f3959d9867590b121 | Python | prajeet-oza/High-Performance-Computing | /md/lammps_plot.py | UTF-8 | 1,247 | 2.765625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
file1 = open('log.lammps', 'r')
lines = file1.readlines()
start = 71
length = 1000
data_lmp = lines[start:start+length+2]
file1.close()
file2 = open('lammps_data', 'w')
file2.writelines(data_lmp)
file2.close()
data_ = pd.read_table('lammps_data', sep = '\s+')
data_500 = data_[data_['Step'] % 10 == 0]
plt.figure()
plt.plot(data_500['Step'], data_500['PotEng'], label = 'PE/N', linewidth = 1.5)
plt.plot(data_500['Step'], data_500['KinEng'], label = 'KE/N', linewidth = 1.5)
plt.plot(data_500['Step'], data_500['TotEng'], label = 'TE/N', linewidth = 1.5)
plt.plot(data_500['Step'], data_500['Temp'], label = 'Temperature', linewidth = 1.5)
plt.plot(data_500['Step'], data_500['Press'], label = 'Pressure', linewidth = 1.5)
plt.title('LAMMPS Output', fontsize=14)
plt.ylabel('Properties', fontsize=14)
plt.xlabel('Steps', fontsize=14)
plt.legend()
plt.show()
# print('Enter the required number of plots: ')
# N = int(input())
# plt.figure()
# print('Enter x axis: ')
# x_int = int(input())
# x = header[x_int-1]
# for i in range(N):
# print('Enter yi axis: ')
# y_int = int(input())
# y = header[y_int-1]
# plt.plot(data_[x], data_[y], label = y)
# plt.legend()
# plt.show() | true |
e93fb31f77ccd91b7b115d50e0d2c5a489cb4a7f | Python | janroddy/physics-ws | /physicsproblem_solution.py | UTF-8 | 1,247 | 3.609375 | 4 | [] | no_license | from math import sqrt
from matplotlib import pyplot
# A block of mass m_1 slides down a frictionless ramp of height h and
# collides inelastically with a block of m_2 that is initially at rest.
# The solver function uses an energy calculation to find the speed of
# m_1 at the bottom of the ramp, and then uses a momentum calculation to
# find the final speed of the blocks after they stick together.
# Lastly, we can use pyplot to vary one of the parameters (e.g., the
# height of the ramp h, while holding the other parameters constant)
# and see how the ramp height affects the final velocity of the system
def solver(m_1,m_2,h):
g=9.8
#first find the speed of a block at the bottom of the ramp
potential_energy = m_1*g*h
v_1 = sqrt(2*potential_energy/m_1)
#now find the final speed of the blocks after the collision and return that value
total_momentum = m_1*v_1+m_2*0
total_mass = m_1+m_2
v_final = total_momentum/total_mass
return v_final
for h in range(1,100,2):
pyplot.plot(h,solver(5,5,h),'ro')
for m_2 in range(1,100,2):
pyplot.plot(m_2,solver(5,m_2,20),'bo')
for m_1 in range(1,100,2):
pyplot.plot(m_1,solver(m_1,5,20),'go')
pyplot.show()
| true |
8247991d23b03577162bb500c211c77eb6f72ab3 | Python | hanzhenlei767/NLP_Learn | /文本匹配/MIX/代码/data_process.py | UTF-8 | 18,383 | 2.671875 | 3 | [] | no_license | import os
import numpy as np
import pandas as pd
import tensorflow as tf
import jieba
import re
import copy
import pickle
import json
import nltk
from nltk.corpus import stopwords
from gensim.models import word2vec
from gensim.models import KeyedVectors
#nltk.download('averaged_perceptron_tagger')
def clean_str(text):
text = str(text)
text = re.sub(r"\'s", " is", text)#有出错的case,:it not only our country\'s droom but also is our everyone\'s
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"'t", " not ", text)
text = re.sub(r"'m", " am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
#连续多个句号替换成一个空格
connect_list = re.findall(r"\.\.+[A-Z]", text, flags=0)
for i in connect_list:
second = re.findall(r"[A-Z]", i, flags=0)
text = text.replace(i," . "+second[0])
connect_list = re.findall(r"\.\.+\s[A-Z]", text, flags=0)
for i in connect_list:
second = re.findall(r"[A-Z]", i, flags=0)
text = text.replace(i," . "+second[0])
connect_list = re.findall(r"\.\.+\s[a-z0-9]", text, flags=0)
for i in connect_list:
second = re.findall(r"\s[a-z0-9]", i, flags=0)
text = text.replace(i,second[0])
connect_list = re.findall(r"\.\.+[a-z0-9]", text, flags=0)
for i in connect_list:
second = re.findall(r"[a-z0-9]", i, flags=0)
text = text.replace(i," "+second[0])
#标点前后插入空格
text = text.replace("?"," ? ")
text = text.replace(","," , ")
text = text.replace("."," . ")
text = text.replace("!"," ! ")
#小写单词和大写单词连一块的拆分
connect_list = re.findall(r"\s[a-z]+[A-Z][a-z]*", text, flags=0)
for i in connect_list:
first = re.match(r"^[a-z]*", i[1:], flags=0)
second = re.findall(r"[A-Z][a-z]*", i[1:], flags=0)
text = re.sub(i, " "+ first.group() + " . " + second[0], text)
#两个开头大写的单词连一块的拆分: MadamI'm
connect_list = re.findall(r"\s[A-Z][a-z]+[A-Z][a-z]*", text, flags=0)
for i in connect_list:
first = re.match(r"^[A-Z][a-z]*", i[1:], flags=0)
second = re.findall(r"[A-Z][a-z]*", i[1:], flags=0)
text = re.sub(i, " "+ first.group() + " . " + second[1], text)
#文章开头乱码去除
pattern = r"[A-Z][a-z]+"
pattern = re.compile(pattern)
res = pattern.search(text)
if res:
text = text[res.start():]
#去除识别出来的噪声:Dear Sir or Madam, - I am Li Hua,
text = re.sub(r"-", " ", text)
#乱码符号去除
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=\s\"\?]", "", text)
#多个空格替换成一个空格
text = re.sub(r"\s+", " ", text)
#两个小写单词连一块拆分:manysomething ,schoolabout
#一个完整单词识别成了两个:fri -endships,be a go od journlist
#将文本转成小写
text = text.lower()
return text
# 文件路径
path = 'data/'
original_path = path+'original_data/'
assist_path = path+'assist_data/'
mid_path = path+'mid_data/'
#训练数据
train_data_path = original_path+'train_data.csv'
dev_data_path = original_path+'dev_data.csv'
test_data_path = original_path+'test_data.csv'
#训练数据的分析的外部数据
google_word2vec_path = assist_path+'GoogleNews-vectors-negative300.bin'
'''
def read_data(data_path):
#读取训练数据
data_frame = pd.read_csv(data_path, header=0, names=['location','result','fw1','fw2', 'tt_label', 'dd_label'])
return data_frame
train_data = read_data(train_data_path)
dev_data = read_data(dev_data_path)
test_data = read_data(test_data_path)
print("train data shape:")
print(train_data.shape)
print(train_data.head())
print("dev data shape:")
print(dev_data.shape)
print(dev_data.head())
print("test data shape:")
print(test_data.shape)
print(test_data.head())
'''
#处理数据
'''
def preprocess_data(data_all):
data_processed = pd.DataFrame()
# 加载停用词
stopwords_list = stopwords.words('english')
for index, row in data_all.iterrows():
for col_name in ['result','fw1','fw2']:
# 分词+去除停用词
text = clean_str(row[col_name])
text_list = nltk.word_tokenize(text)
filtered = [w for w in text_list if(w not in stopwords_list and w != ' ')]
pos_tags = nltk.pos_tag(filtered)
#分词之后的DataFrame
data_processed.at[index, col_name+"_word"] = " ".join([k[0] for k in pos_tags])
data_processed.at[index, col_name+"_pos"] = " ".join([k[1] for k in pos_tags])
data_processed.at[index, "location"] = row["location"]
data_processed.at[index, "tt_label"] = row["tt_label"]
return data_processed
'''
'''
构建词表
'''
'''
train_data_processed = preprocess_data(train_data)
dev_data_processed = preprocess_data(dev_data)
test_data_processed = preprocess_data(test_data)
print("train data processed shape:")
print(train_data_processed.shape)
print(train_data_processed.head())
'''
'''
def merge_words(train_all):
train_data_process = copy.deepcopy(train_all)
print('merge word process...')
texts = []
texts.extend(train_data_process['result_word'].tolist())
texts.extend(train_data_process['fw1_word'].tolist())
texts.extend(train_data_process['fw2_word'].tolist())
print(len(texts))
return texts
train_texts = merge_words(train_data_processed)
dev_texts = merge_words(dev_data_processed)
test_texts = merge_words(test_data_processed)
texts = []
texts.extend(train_texts)
texts.extend(dev_texts)
texts.extend(test_texts)
def vocab_build(texts,min_count = -float("inf")):
"""
:param min_count: 最小词频
:return: word2id = {'<PAD>':0, 'word1':id_1, ……, '<UNK>':id_n}
"""
word2id_ct = {}
for word_list in texts:
for word in word_list.split():
if word not in word2id_ct:
word2id_ct[word] = [len(word2id_ct)+1, 1]#'词':[词序,词频]
else:
word2id_ct[word][1] += 1#词频加一
print("len(word2id_ct):", len(word2id_ct))
low_freq_words = []
for word, [word_id, word_freq] in word2id_ct.items():
if word_freq < min_count:
low_freq_words.append(word)
for word in low_freq_words:
del word2id_ct[word] # 删除低频词
word2id = {}
new_id = 1
for word in word2id_ct.keys():
word2id[word] = new_id # word2id = {'<PAD>':0, 'word1':id_1, ......, '<UNK>':id_n}
new_id += 1
word2id['<UNK>'] = new_id
word2id['<PAD>'] = 0
print("len(word2id):", len(word2id))
return word2id
word2id = vocab_build(texts)
json.dump(word2id, open(mid_path+"word2id.json", 'w',encoding='utf-8'), indent=4)
'''
word2id=json.load(open(mid_path+"word2id.json",encoding='utf-8'))
'''
构建词性表
'''
'''
def merge_pos(train_all):
train_data_process = copy.deepcopy(train_all)
print('merge pos process...')
pos = []
pos.extend(train_data_process['result_pos'].tolist())
pos.extend(train_data_process['fw1_pos'].tolist())
pos.extend(train_data_process['fw2_pos'].tolist())
print(len(pos))
return pos
train_pos = merge_pos(train_data_processed)
dev_pos = merge_pos(dev_data_processed)
test_pos = merge_pos(test_data_processed)
pos = []
pos.extend(train_pos)
pos.extend(dev_pos)
pos.extend(test_pos)
def pos_build(pos,min_count = -float("inf")):
"""
:param min_count: 最小词频
:return: word2id = {'<PAD>':0, 'word1':id_1, ……, '<UNK>':id_n}
"""
pos2id_ct = {}
for pos_list in pos:
for single_pos in pos_list.split():
if single_pos not in pos2id_ct:
pos2id_ct[single_pos] = [len(pos2id_ct)+1, 1]#'词':[词序,词频]
else:
pos2id_ct[single_pos][1] += 1#词频加一
print("len(pos2id_ct):", len(pos2id_ct))
low_freq_pos = []
for single_pos, [pos_id, pos_freq] in pos2id_ct.items():
if pos_freq < min_count:
low_freq_pos.append(single_pos)
for single_pos in low_freq_pos:
del pos2id_ct[single_pos] # 删除低频词
pos2id = {}
new_id = 1
for single_pos in pos2id_ct.keys():
pos2id[single_pos] = new_id # pos2id = {'<PAD>':0, 'word1':id_1, ......, '<UNK>':id_n}
new_id += 1
pos2id['<UNK>'] = 0
#pos2id['<PAD>'] = 0
print("len(pos2id):", len(pos2id))
return pos2id
pos2id = pos_build(pos)
json.dump(pos2id, open(mid_path+"pos2id.json", 'w',encoding='utf-8'), indent=4)
'''
pos2id=json.load(open(mid_path+"pos2id.json",encoding='utf-8'))
'''
构建idf
'''
'''
result_word = []
result_word.extend(train_data_processed["result_word"].tolist())
result_word.extend(dev_data_processed["result_word"].tolist())
result_word.extend(test_data_processed["result_word"].tolist())
def get_idf(data_all,word2id):
word2idf = {}
wordid2idf = {}
all_keys = list(word2id.keys())[:-2]#最后两个是<PAD>,<UNK>,刨除<PAD>,<UNK>.
#print(all_keys)
for i in all_keys:#词汇表
num = 0
for k in data_all:#遍历作文
if i in k.split():
num += 1
word2idf[i] = np.log10(float(len(data_all)/(num+1)))
wordid2idf[word2id[i]] = word2idf[i]
return word2idf,wordid2idf
word2idf, wordid2idf = get_idf(result_word,word2id)
json.dump(word2idf, open(mid_path+"word2idf.json", 'w',encoding='utf-8'), indent=4)
word2idf=json.load(open(mid_path+"word2idf.json",encoding='utf-8'))
def get_word_idf(data):
idf = []
for i in data.split():
idf.append(word2idf[i])
idf = " ".join([str(k) for k in idf])
return idf
train_data_processed["result_idf"] = train_data_processed["result_word"].apply(get_word_idf)
train_data_processed["fw1_idf"] = train_data_processed["fw1_word"].apply(get_word_idf)
train_data_processed["fw2_idf"] = train_data_processed["fw2_word"].apply(get_word_idf)
dev_data_processed["result_idf"] = dev_data_processed["result_word"].apply(get_word_idf)
dev_data_processed["fw1_idf"] = dev_data_processed["fw1_word"].apply(get_word_idf)
dev_data_processed["fw2_idf"] = dev_data_processed["fw2_word"].apply(get_word_idf)
test_data_processed["result_idf"] = test_data_processed["result_word"].apply(get_word_idf)
test_data_processed["fw1_idf"] = test_data_processed["fw1_word"].apply(get_word_idf)
test_data_processed["fw2_idf"] = test_data_processed["fw2_word"].apply(get_word_idf)
print("train data processed idf shape:")
print(train_data_processed.shape)
print(train_data_processed.head())
train_data_processed.to_csv(mid_path+"train_data_processed.csv", index=False)
dev_data_processed.to_csv(mid_path+"dev_data_processed.csv", index=False)
test_data_processed.to_csv(mid_path+"test_data_processed.csv", index=False)
'''
train_data_processed = pd.read_csv(mid_path+"train_data_processed.csv", \
header=0, names=['result_word','result_pos','fw1_word','fw1_pos', \
'fw2_word', 'fw2_pos','location','tt_label', 'result_idf', 'fw1_idf', 'fw2_idf'])
dev_data_processed = pd.read_csv(mid_path+"dev_data_processed.csv", \
header=0, names=['result_word','result_pos','fw1_word','fw1_pos', \
'fw2_word', 'fw2_pos','location','tt_label', 'result_idf', 'fw1_idf', 'fw2_idf'])
test_data_processed = pd.read_csv(mid_path+"test_data_processed.csv", \
header=0, names=['result_word','result_pos','fw1_word','fw1_pos', \
'fw2_word', 'fw2_pos','location','tt_label', 'result_idf', 'fw1_idf', 'fw2_idf'])
print("train data processed idf shape:")
print(train_data_processed.shape)
print(train_data_processed.head())
'''
构建预训练嵌入词向量
'''
'''
def google_wordvec_embedding(word2id):
"""
:param min_count: 最小词频
:return: word2id = {'<PAD>':0, 'word1':id_1, ……, '<UNK>':id_n}
"""
print('reading word embedding data...')
embedding_matrix = np.random.randn(len(word2id),300)
from gensim.models import KeyedVectors
wv_from_text = KeyedVectors.load_word2vec_format(google_word2vec_path, binary=True)
np.save(mid_path+"Google_word.npy",list(wv_from_text.wv.vocab.keys()))
count = 0
unregistered_word = []
for word in word2id:
if word in wv_from_text.wv.vocab.keys():
embedding_matrix[word2id[word]] = wv_from_text[word]
count += 1
else:
unregistered_word.append(word)
print(len(word2id),len(wv_from_text.vocab),count)
print(len(unregistered_word))
np.save(mid_path+"OOV_words.npy",unregistered_word)
return embedding_matrix
embedding_matrix = google_wordvec_embedding(word2id)
#embedding_matrix = np.random.randn(len(word2id),300)
np.save(mid_path+"embedding_matrix.npy",embedding_matrix)
Google_word = np.load(mid_path+"Google_word.npy")
'''
unregistered_word = np.load(mid_path+"OOV_words.npy")
'''
one-hot标签
'''
def to_one_hot(labels, tag_nums):
length = len(labels)#batch的样本个数
res = np.zeros((length, tag_nums), dtype=np.float32)
for i in range(length):
res[i][int(labels[i])] = 1.
#res[i][1] = 1.
return np.array(res)
x1_max_len = 150
x2_max_len = 150
x3_max_len = 150
def word_to_seq_num(train_data_processed,word2id):
train_seq_num = copy.deepcopy(train_data_processed)
yw_word_all = []
yw_pos_all = []
yw_idf_all = []
yw_position_all = []
fw1_word_all = []
fw1_pos_all = []
fw1_idf_all = []
fw1_position_all = []
fw2_word_all = []
fw2_pos_all = []
fw2_idf_all = []
fw2_position_all = []
for index, row in train_data_processed.iterrows():
# 分别遍历每行的两个句子,并进行分词处理
for col_name in ['result_word','fw1_word','fw2_word']:
tmp_word_list = []
tmp_pos_list = []
tmp_idf_list = []
tmp_position_list = []
word_list = str(row[col_name]).split()
pos_list = str(row[col_name[:-4]+'pos']).split()
idf_list = str(row[col_name[:-4]+'idf']).split()
for index in range(len(word_list)):
if word_list[index] not in unregistered_word:
tmp_word_list.append(word_list[index])
tmp_pos_list.append(pos_list[index])
tmp_idf_list.append(idf_list[index])
tmp_position_list = [i+1 for i in range(len(tmp_word_list))]
if col_name == "result_word":
if len(tmp_word_list) != 0:
alpha = (1.0 * x1_max_len / len(tmp_word_list))
else:
tmp_word_list = ['<UNK>'] * x1_max_len
tmp_pos_list = ['<UNK>'] * x1_max_len
tmp_idf_list = [0] * x1_max_len
tmp_position_list = [0] * x1_max_len
alpha = 1.0
yw_word_all.append([word2id[tmp_word_list[int(i / alpha)]] for i in range(x1_max_len)])
yw_pos_all.append([pos2id[tmp_pos_list[int(i / alpha)]] for i in range(x1_max_len)])
yw_idf_all.append([tmp_idf_list[int(i / alpha)] for i in range(x1_max_len)])
yw_position_all.append([tmp_position_list[int(i / alpha)] for i in range(x1_max_len)])
elif col_name == "fw1_word":
if len(tmp_word_list) != 0:
alpha = (1.0 * x2_max_len / len(tmp_word_list))
else:
tmp_word_list = ['<UNK>'] * x2_max_len
tmp_pos_list = ['<UNK>'] * x2_max_len
tmp_idf_list = [0] * x2_max_len
tmp_position_list = [0] * x2_max_len
alpha = 1.0
fw1_word_all.append([word2id[tmp_word_list[int(i / alpha)]] for i in range(x2_max_len)])
fw1_pos_all.append([pos2id[tmp_pos_list[int(i / alpha)]] for i in range(x2_max_len)])
fw1_idf_all.append([tmp_idf_list[int(i / alpha)] for i in range(x2_max_len)])
fw1_position_all.append([tmp_position_list[int(i / alpha)] for i in range(x2_max_len)])
else:
if len(tmp_word_list) != 0:
alpha = (1.0 * x3_max_len / len(tmp_word_list))
else:
tmp_word_list = ['<UNK>'] * x3_max_len
tmp_pos_list = ['<UNK>'] * x3_max_len
tmp_idf_list = [0] * x3_max_len
tmp_position_list = [0] * x3_max_len
alpha = 1.0
fw2_word_all.append([word2id[tmp_word_list[int(i / alpha)]] for i in range(x3_max_len)])
fw2_pos_all.append([pos2id[tmp_pos_list[int(i / alpha)]] for i in range(x3_max_len)])
fw2_idf_all.append([tmp_idf_list[int(i / alpha)] for i in range(x3_max_len)])
fw2_position_all.append([tmp_position_list[int(i / alpha)] for i in range(x3_max_len)])
return yw_word_all,yw_pos_all,yw_idf_all,yw_position_all, \
fw1_word_all,fw1_pos_all,fw1_idf_all,fw1_position_all, \
fw2_word_all,fw2_pos_all,fw2_idf_all,fw2_position_all
print("data process start:")
train_seq = word_to_seq_num(train_data_processed,word2id)
print("train over")
dev_seq = word_to_seq_num(dev_data_processed,word2id)
print("dev over")
test_seq = word_to_seq_num(test_data_processed,word2id)
print("test over")
np.save(mid_path+"train_yw_word.npy",np.array(train_seq[0]))
np.save(mid_path+"train_yw_pos.npy",np.array(train_seq[1]))
np.save(mid_path+"train_yw_idf.npy",np.array(train_seq[2]))
np.save(mid_path+"train_yw_position.npy",np.array(train_seq[3]))
np.save(mid_path+"train_fw1_word.npy",np.array(train_seq[4]))
np.save(mid_path+"train_fw1_pos.npy",np.array(train_seq[5]))
np.save(mid_path+"train_fw1_idf.npy",np.array(train_seq[6]))
np.save(mid_path+"train_fw1_position.npy",np.array(train_seq[7]))
np.save(mid_path+"train_fw2_word.npy",np.array(train_seq[8]))
np.save(mid_path+"train_fw2_pos.npy",np.array(train_seq[9]))
np.save(mid_path+"train_fw2_idf.npy",np.array(train_seq[10]))
np.save(mid_path+"train_fw2_position.npy",np.array(train_seq[11]))
np.save(mid_path+"train_labels.npy",np.array(to_one_hot(np.array(train_data_processed['tt_label'].tolist())-1, 5)))
np.save(mid_path+"dev_yw_word.npy",np.array(dev_seq[0]))
np.save(mid_path+"dev_yw_pos.npy",np.array(dev_seq[1]))
np.save(mid_path+"dev_yw_idf.npy",np.array(dev_seq[2]))
np.save(mid_path+"dev_yw_position.npy",np.array(dev_seq[3]))
np.save(mid_path+"dev_fw1_word.npy",np.array(dev_seq[4]))
np.save(mid_path+"dev_fw1_pos.npy",np.array(dev_seq[5]))
np.save(mid_path+"dev_fw1_idf.npy",np.array(dev_seq[6]))
np.save(mid_path+"dev_fw1_position.npy",np.array(dev_seq[7]))
np.save(mid_path+"dev_fw2_word.npy",np.array(dev_seq[8]))
np.save(mid_path+"dev_fw2_pos.npy",np.array(dev_seq[9]))
np.save(mid_path+"dev_fw2_idf.npy",np.array(dev_seq[10]))
np.save(mid_path+"dev_fw2_position.npy",np.array(dev_seq[11]))
np.save(mid_path+"dev_labels.npy",np.array(to_one_hot(np.array(dev_data_processed['tt_label'].tolist())-1, 5)))
np.save(mid_path+"test_yw_word.npy",np.array(test_seq[0]))
np.save(mid_path+"test_yw_pos.npy",np.array(test_seq[1]))
np.save(mid_path+"test_yw_idf.npy",np.array(test_seq[2]))
np.save(mid_path+"test_yw_position.npy",np.array(test_seq[3]))
np.save(mid_path+"test_fw1_word.npy",np.array(test_seq[4]))
np.save(mid_path+"test_fw1_pos.npy",np.array(test_seq[5]))
np.save(mid_path+"test_fw1_idf.npy",np.array(test_seq[6]))
np.save(mid_path+"test_fw1_position.npy",np.array(test_seq[7]))
np.save(mid_path+"test_fw2_word.npy",np.array(test_seq[8]))
np.save(mid_path+"test_fw2_pos.npy",np.array(test_seq[9]))
np.save(mid_path+"test_fw2_idf.npy",np.array(test_seq[10]))
np.save(mid_path+"test_fw2_position.npy",np.array(test_seq[11]))
np.save(mid_path+"test_labels.npy",np.array(to_one_hot(np.array(test_data_processed['tt_label'].tolist())-1, 5)))
| true |
98e963718746dc4e7594c847545543cc9298f7c7 | Python | multavici/DSR-Bird-Song | /birdsong/data_management/utils/signal_extraction.py | UTF-8 | 4,713 | 3.15625 | 3 | [] | no_license | """
The following functions serve to highlight sections of bird vocalizations in a
audio recording. They are based on the methodologies described in Lasseck 2013
and Sprengler 2017.
Usage:
import the function "signal_timestamps" from this script and pass it a path to
a audio file. It will then return the total duration, the summed duration of
foreground bird-vocalizations and a json of start and stop timestamps for
sections with bird vocalizations.
"""
import librosa
import librosa.display
import numpy as np
from scipy.ndimage import morphology
import json
def normalized_stft(audio):
""" Short-time Fourier Transform with hann window 2048 and 75% overlap (default),
normalized into 0-1 scale. """
stft = np.abs(librosa.stft(audio, n_fft=2048)) # 2048, win_length = 512))
stft -= stft.min()
return stft / stft.max()
def median_mask(spec, threshold, inv=False):
""" Returns a binary mask for values that are above a threshold times the row
and column medians. Inverting the mask is possible. """
row_medians = np.expand_dims(np.median(spec, axis=1), axis=1)
col_medians = np.expand_dims(np.median(spec, axis=0), axis=0)
# Broadcasting the median vectors over the spectrogram:
if inv:
mask = np.where((spec > threshold * row_medians) &
(spec > threshold * col_medians), 0, 1)
else:
mask = np.where((spec > threshold * row_medians) &
(spec > threshold * col_medians), 1, 0)
return mask
def morphological_filter(mask):
""" Morphological operation to enhance signal segments. Literature reports at
least two different methods:
- Lasseck 2013: Closing followed by dilation
- Sprengler 2017: Opening
We experimentally developed our own approach: Opening followed by another dilation:
"""
op = morphology.binary_opening(
mask, structure=np.ones((4, 4))).astype(np.int)
dil = morphology.binary_dilation(
op, structure=np.ones((4, 4))).astype(np.int)
return dil
def indicator_vector(morph, inv=False):
""" Takes a binary mask and computes a time scale indicator vector """
if inv:
vec = np.min(morph, axis=0).reshape(1, -1)
else:
vec = np.max(morph, axis=0).reshape(1, -1)
vec = morphology.binary_dilation(
vec, structure=np.ones((1, 15))).astype(np.int)
#vec = np.repeat(vec, morph.shape[0], axis=0)
return vec
def vector_to_timestamps(vec, audio, sr):
""" Turns an indicator vector into timestamps in seconds """
# Pad with zeros to ensure that starts and stop at beginning and end are being picked up
vec = np.pad(vec.ravel(), 1, mode='constant', constant_values=0)
starts = np.empty(0)
stops = np.empty(0)
for i, e in enumerate(vec):
if e == 1 and vec[i-1] == 0:
start = i-1 # Subtract 1 because of padding
starts = np.append(starts, start)
if e == 0 and vec[i-1] == 1:
stop = i-1
stops = np.append(stops, stop)
ratio = audio.shape[0] / vec.shape[0]
timestamps = np.vstack([starts, stops])
# Scale up to original audio length
timestamps *= ratio
# Divide by sample rate to get seconds
timestamps /= sr
timestamps = np.round(timestamps, 3)
# Get total duration of signal
sum_signal = np.sum(timestamps[1] - timestamps[0])
return json.dumps([tuple(i) for i in timestamps.T]), sum_signal
def signal_noise_separation(audio):
""" Directly returns signal and noise components for a selected raw audio
vector. Used for precomputing slices when storing timestamps is unnecessary. """
stft = normalized_stft(audio)
mask = median_mask(stft, 3)
morph = morphological_filter(mask)
vec = indicator_vector(morph)
ratio = audio.shape[0] // vec.shape[1] #Results in miniscule time dilation of ~0.001 seconds but is safe
vec_stretched = np.repeat(vec, ratio).astype(bool)
signal_indeces = np.where(vec_stretched)[0]
noise_indeces = np.where(~vec_stretched)[0]
signal = audio[signal_indeces]
noise = audio[noise_indeces]
return signal, noise
def signal_timestamps(audio, sr):
""" Takes audio and sample rate from a bird soundfile and returns the overall
duration, the total seconds containing bird vocalizations and a json with start and stop
markers for these bird vocalizations."""
stft = normalized_stft(audio)
mask = median_mask(stft, 3)
morph = morphological_filter(mask)
vec = indicator_vector(morph)
timestamps, sum_signal = vector_to_timestamps(vec, audio, sr)
duration = audio.shape[0] / sr
return duration, sum_signal, timestamps
| true |
49199666485420604dae4311e5992de9720fd80a | Python | C0deSamurai/deep-2048-shredder | /train_nnet.py | UTF-8 | 840 | 3.234375 | 3 | [] | no_license | """Runs a round of training where the bot plays lots and lots of games, and then reads them all in
again to learn from its mistakes. Saves the net's data to a file afterwards."""
import nnet
from game import Game
N_GAMES = 10
N_EPOCHS = 10
N_TRAINING_BOARDS = 100
agent = nnet.QLearningNNet()
for epoch in range(N_EPOCHS):
training_games = []
for i in range(N_GAMES):
g = Game()
agent.play_game_to_completion(g) # this will just make random moves
training_games.append(g)
# now, we train on what we just learned
agent.train_on_games(training_games, N_TRAINING_BOARDS)
agent.epsilon -= (0.9 / N_EPOCHS) # after the last one, it should be at .1
agent.save_state("trained_nnet.pkl")
print(agent.W)
g = Game()
agent.in_training = False
agent.play_game_to_completion(g)
g.print_boards()
| true |
9f99d868e580ecf18df8c65d049b39094a1173bd | Python | UnaStankovic/DisorderProteinsMetapredictor | /gui_and_socket/disprot_service.py | UTF-8 | 1,285 | 2.640625 | 3 | [] | no_license | import requests
import json
import re
def shortened_sequence(sequence):
if(sequence[0] == ">"):
pom = ''.join(sequence.split("\n")[1:])
else:
pom = sequence.replace("\n","")
return pom
# This function is parsing the data given from the server
def prepare_sequence(data):
if data == "Not found.":
return data
sequence = data['sequence']
seq_len = len(shortened_sequence(sequence))
intervals = []
disprot = []
disprot = ['-'] * seq_len
for record in data['regions']:
start_interval = record["start"]
end_interval = record["end"]
intervals.append((start_interval, end_interval))
for interval in intervals:
begin, end = int(interval[0]), int(interval[1])
for i in range(begin-1, end):
disprot[i] = "D"
return disprot, sequence
# This function is fetching the data from the DISPROT server
def get_sequence_info(id_disp):
# response = requests.get('http://www.disprot.org/ws/get/' + id_disp) # Old API changed on 13.09.2019.
response = requests.get('http://www.disprot.org/api/' + id_disp)
data = json.loads(response.content)
if re.search("200", str(response)) == None:
data = "Not found."
return prepare_sequence(data)
| true |
7b593e1bd973b519b5135ab7c4eb952e125e27ec | Python | likelion-kookmin/python-study-8th | /김시은/assignment2-1.py | UTF-8 | 110 | 3.921875 | 4 | [] | no_license | a = int(input("줄 수를 입력해주세요: "))
for i in range(1, a+1):
print(" "*(a-i)+("*"*(2*i-1)))
| true |
dcb142ef843fe2f78cd6c03ccf0f6692b5d4e07e | Python | guoshan45/guoshan-pyschool | /Strings/10.py | UTF-8 | 229 | 2.671875 | 3 | [] | no_license | def startEndVowels(word):
if len(word) != 0:
if word[0].lower() in 'aeiou' and word[len(word)-1].lower() in 'aeiou':
return 'True'
else:
return 'False'
else:
return 'False'
| true |
b888048a4a51969f03bc3000c1c7f386a708a790 | Python | hard1mpulse/pyneng | /tasks/Par4/4.7.py | UTF-8 | 195 | 3.015625 | 3 | [] | no_license | mac = 'AAAA:BBBB:CCCC'
mac = mac.replace(':','')
print('{:b}{:b}{:b}{:b}{:b}{:b}'.format(int(mac[0:2],16),int(mac[2:4],16),int(mac[4:6],16),int(mac[6:8],16),int(mac[8:10],16),int(mac[10:12],16))) | true |
5bb3f9ecab45a5792ab0fbf803ae65d1b258204a | Python | Cornellio/dashing-dashboards | /webstats/jobs/api_net_stats.py | UTF-8 | 9,349 | 2.65625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/python
'''
Dashing job for graphing open HTTP connections across server farm.
Get network stats from each server.
Get sum and write to file.
Push stats to dashing widget.
'''
import sys
import json
import time
import argparse
import urllib2
import httplib
import paramiko
import os
def vprint(message, verbose):
'''Print messages to stdout when verbose option is used'''
if verbose:
print message
def parse_args():
'''Parse command line arguments'''
parser = argparse.ArgumentParser(
description='Count open HTTP connections across '
'API server farm and send to Dashing server for display')
parser.add_argument('-s', help='API servers to collect stats from',
required=False, dest='servers')
parser.add_argument('-d', help='Dashing server to push data to',
required=False, dest='dashing_host',
default='dashing.virginam.com')
parser.add_argument('-w', help='Widget to send data to',
required=False, dest='widget',
default='web_api_netstats')
parser.add_argument('-n', help='Number of data points to '
'send to Dashing server, This will be the nuber of '
'values shown on the x-axis of the graph',
required=False, dest='num_recs', default=12, type=int)
parser.add_argument('-a', help='Authentication token for Dashing server',
required=False, dest='authtoken')
parser.add_argument('-l', help='login name used for remote ssh commands',
required=True, dest='username')
parser.add_argument('-i', required=False, dest='identity_file',
default="~/.ssh/id_dsa", help='ssh private key file')
parser.add_argument('-f', help='File where stat history is stored',
required=False, dest='historyfile',
default=sys.argv[0].strip('py') + "history")
parser.add_argument('--environment', help='Dashing environment to use, '
'either "production" or "development", '
'Defaults to production on port 80, '
'Development uses port 3030',
required=False, dest='dashing_env',
default="production")
parser.add_argument('-v', help='Verbose output', required=False,
default=False, dest='verbose', action='store_true')
args = parser.parse_args()
auth_token = args.authtoken
servers = args.servers.split()
dashing_host = args.dashing_host.strip('http://')
target_widget = args.widget
dashing_env = args.dashing_env
num_recs = args.num_recs
historyfile = args.historyfile
ssh_username = args.username
ssh_identity_file = args.identity_file
verbose = args.verbose
return (auth_token,
dashing_host,
target_widget,
dashing_env,
num_recs,
historyfile,
servers,
ssh_username,
ssh_identity_file,
verbose)
def get_http_connection_count(server, username, identity_file, cmd, v):
'''Return number of established http connections for given servers'''
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
privatekeyfile = os.path.expanduser(identity_file)
ssh_key = paramiko.DSSKey.from_private_key_file(privatekeyfile, password="")
ssh.load_system_host_keys()
ssh.connect(server, username=username, key_filename=privatekeyfile,
look_for_keys=False)
vprint('Connect %s:' % (server), v)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command(cmd)
vprint('Run remote command "%s"' % (cmd), v)
# Get output of remote ssh command
http_established_cx = []
for output in ssh_stdout:
http_established_cx.append(output)
len_http_established_cx = len(http_established_cx)
ssh.close()
return len_http_established_cx
def get_sum_http_established_cx(servers, username, identity_file, v):
'''
Cycle through API servers,
Retrieve count of open http connections,
Return the sum.
'''
cmd = 'netstat -tna | grep -i 80.*established'
vprint('Attempting connection to hosts: %s' % (servers), v)
# Create dict containing servername : connection count
http_connections = {}
for server in servers:
established_cx = get_http_connection_count(server,
username,
identity_file,
cmd, v)
http_connections[server] = established_cx
# Add all values from dict
http_connections_total = []
for val in http_connections.values():
http_connections_total.append(val)
return sum(http_connections_total)
def save_values(stats, file):
'''Write sum of values to file'''
# Get timestamp
now_time = time.strftime("%H:%M")
now_date = time.strftime("%Y-%m-%d")
time_stamp = now_date + " " + now_time
# convert integers to string and strip out parens ()
stats = str(stats).strip('()') + "\n"
line = time_stamp + ", " + stats
f = open(file, 'a')
f.write(line)
f.close
def tail_history(num_recs, historyfile):
'''
* Load entire file into list
* Put value into list
* Return tail -n where n is num_recs
'''
f = open(historyfile, 'r')
# skip header
f.readline()
lines = f.read().split('\n')
# Drop empy value from end of list
lines.pop()
# Put 3rd element of each line and put into new list
value_list = [value.split()[2] for value in lines]
# Return slice of the last num_recs
return value_list[-num_recs:]
f.close()
def get_json_values(values):
'''Return json string of graph points'''
lines_start = 0
lines_end = len(values)
# Make line selection from which to create json string
values_selected = values[lines_start:lines_end]
values_selected_separated = '\n'.join(values_selected) + '\n'
line_range = len(values_selected)
# Build json string
json_post_data = ''
for line_no in range(1, line_range):
json_post_segment = str(values_selected[line_no])
if line_no == 1:
json_post_data += '[ { "x": ' + str(line_no) + ', "y": ' + json_post_segment + ' }, '
if line_no > 1 and line_no < len(values_selected):
json_post_data += '{ "x": ' + str(line_no) + ', "y": ' + json_post_segment + ' }, '
if line_no == line_range - 1:
json_post_data += '{ "x": ' + str(line_no) + ', "y": ' + json_post_segment + ' } ]'
return json_post_data
def transmit_values(values, auth_token, target_widget, host, port, v):
'''Send data to Dashing server via http'''
data = '{ "auth_token": "' + auth_token + '", "points":' + values + '}'
msg = ''
msg += "Will use dashing host %s:%s\n" % (host, port)
msg += "Assembling final json string: \n%s" % (data)
vprint(msg, v)
host_connection = host + ':' + port
try:
http = httplib.HTTPConnection(host_connection)
http.request('POST', '/widgets/' + target_widget, data)
except Exception as e:
print "Cannot connect to %s" % (host_connection)
raise IOError
def main():
(auth_token, dashing_host, target_widget, dashing_env,
num_recs, historyfile, servers, ssh_username,
ssh_identity_file, verbosity) = parse_args()
# msg collects strings for printing verbose output
msg = ''
data_view = "points"
if dashing_env == "production":
dashing_http_port = "80"
msg += ("Running in dashing environment %s. Will send data via port "
"%s." % (dashing_env, dashing_http_port))
if dashing_env == "development":
dashing_http_port = "3030"
msg += ("Running in dashing environment %s. Will send data via port "
"%s." % (dashing_env, dashing_http_port))
server_connection = (dashing_host + ':' +
dashing_http_port + '/widgets/' + target_widget)
msg += "\nServer connection string: %s" % (server_connection)
# Create log file if it doesn't exist and write header
HEADER = "# Time, Established Connections"
if not os.path.exists(historyfile):
open(historyfile, 'a').close()
with open(historyfile, 'r+') as f:
line = f.readline()
if not line.startswith('#'):
f.write(HEADER + "\n")
msg += '\nCreating log file'
sum = (get_sum_http_established_cx(servers, ssh_username,
ssh_identity_file, verbosity))
save_values(sum, historyfile)
plot_values = tail_history(num_recs, historyfile)
msg += '\nAssembling values: \n' + get_json_values(plot_values)
vprint(msg, verbosity)
# stat_values = tail_history(num_recs, stat)
transmit_values(get_json_values(plot_values), auth_token,
target_widget, dashing_host, dashing_http_port, verbosity)
# sys.exit(0)
if __name__ == '__main__':
main()
| true |
454f0a1b10f31ea5879d7bf79b95dda5602f0bcb | Python | amanryzus/temp | /question 3a.py | UTF-8 | 717 | 3.546875 | 4 | [] | no_license | phone={}
ph=[]
pr=[]
q=1
def add():
ph,pr=input("Enter the phone name and it price").split()
if pr not in phone:
phone[pr] = []
phone[pr].append(ph)
else:
phone[pr].append(ph)
def find():
k=int(input("Enter the price"))
print(phone[k])
def rem():
k=int(input("Enter the price to remove"))
phone[k]=[]
def display():
for x in phone.keys():
print("At {0} Price the available phone :{1}".format(x,phone[x]))
while (q):
q=int(input("Enter \n1 to add\n2 to find\n3 to remove\n4 to print\n0 to exit\n"))
if q==1:
add()
elif q==2:
find()
elif q==3:
rem()
elif q==4:
display()
| true |
4ee8a63c6231e487a44b3eeb7058d03eaf1c95aa | Python | alexdylan/app1 | /planta.py | UTF-8 | 516 | 2.5625 | 3 | [] | no_license | class Planta:
def __init__(self,conexion,cursor):
self.conexion = conexion
self.cursor = cursor
def agregar(self, cultivo, fecha, id_clasi, id_inv):
insertar = ("INSERT INTO planta(cultivo,fecha, id_clasi,id_inv) VALUES(%s,%s,%s,%s)")
self.cursor.execute(insertar, (cultivo,fecha,id_clasi,id_inv))
self.conexion.commit()
def buscar(self, id_inv):
select = ("SELECT * FROM planta WHERE id_inv = %s")
self.cursor.execute(select, (id_inv,))
resultados = self.cursor.fetchall()
return resultados | true |
0a1546727c5c78e7464dc8a73baa1db7b2f5efb4 | Python | SrPrakhar/airflow-training-skeleton | /dags/excercise2.py | UTF-8 | 1,126 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | from datetime import date, datetime, timedelta
import airflow
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
args = {
'owner': 'Prakhar',
'start_date': datetime(2019, 11, 17),
}
dag = DAG(
dag_id='excercise2',
default_args=args,
schedule_interval="@daily",
)
def print_execution_date(**context):
today = date.today()
print("Today's date:", today)
# [START howto_operator_bash]
print_execution_date = PythonOperator(
task_id="print_execution_date",
python_callable=print_execution_date,
provide_context=True,
dag=dag,
)
# [END howto_operator_bash]
wait_5 = BashOperator(task_id="wait_5", bash_command="sleep 5", dag=dag)
wait_1 = BashOperator(task_id="wait_1", bash_command="sleep 1", dag=dag)
wait_10 = BashOperator(task_id="wait_10", bash_command="sleep 10", dag=dag)
the_end = DummyOperator(task_id="the_end", dag=dag)
print_execution_date >> [wait_5, wait_1, wait_10]
[wait_5, wait_1, wait_10] >> the_end
| true |
4866153e4cc761b1c524c3ba9c853bc44497b212 | Python | kduan005/nand2tetris | /project7/src/VMTranslator.py | UTF-8 | 10,353 | 3.28125 | 3 | [] | no_license | import sys
import collections
class parser(object):
'''
parser class that parse a single line of vm command into individual parts
including:
commandType: if a command is arithmetic or push/pop
argOne: first argument including "sub", "neg", "gt", "not",
"add", "eq", "lt", "or", "and", "not", "push", "pop"
argTwo: second argument, namely index into each memory segment, a command has
second argument only when the command type is push/pop
'''
def __init__(self):
#raw vm command
self.raw = ""
#split raw command into a list of tokens
self.tokens = []
self.commandType = ""
self.argOne = ""
self.argTwo = ""
def genCommandType(self):
#method to assign command type
if self.tokens[0] in {"sub", "neg", "gt", "not", "add", "eq", "lt", "or",\
"and", "not"}:
self.commandType = "arithmetic"
else:
#push/pop
self.commandType = self.tokens[0]
def genArgOne(self):
#method to assign argOne according to command types
if self.commandType == "arithmetic":
self.argOne = self.tokens[0]
else:
self.argOne = self.tokens[1]
def genArgTwo(self):
#method to assign argTwo, only applicable when command type is push/pop
if self.commandType in {"push", "pop"}:
self.argTwo = self.tokens[2]
def generate(self, vmCmd):
#generate commandType, argOne, argTwo for a single command line
self.raw = vmCmd
self.tokens = vmCmd.split(" ")
self.genCommandType()
self.genArgOne()
self.genArgTwo()
class codeWriter(object):
'''
codeWriter class to translate each vm command into assembly language command
by using commandType, argOne, argTwo information associated with each command
'''
def __init__(self, outname):
#take out filename from absolute/relative path and store it in self.outname
#it will be used for generate label name for static variables
self.outname = outname.split("/")[-1]
#commandType, argOne, argTwo attributes have the same value with those
#associated with parser object
self.commandType = ""
self.argOne = ""
self.argTwo = ""
#a list for storing assembly language commands for each vm command line
self.asmCmd = []
#a counter for recording index of continue label in assembly language,
#used for generating continue labels for vm command of "eq", "lt", "gt"
self.continue_counter = 0
#output file
self.outf = open(outname + ".asm", "w")
#a directory mapping key words in vm command to corresponding key words
#in assembly language, default value set to be "self.outname.self.argTwo"
#for mapping static variables and its labels
self.d = collections.defaultdict(lambda: self.outname + "." + self.argTwo)
self.d.update({"temp0": "5",
"temp1": "6",
"temp2": "7",
"temp3": "8",
"temp4": "9",
"temp5": "10",
"temp6": "11",
"temp7": "12",
"pointer0": "3",
"pointer1": "4",
"local": "LCL",
"argument": "ARG",
"this": "THIS",
"that": "THAT",
"eq": "JEQ",
"lt": "JLT",
"gt": "JGT",
"add": "+",
"sub": "-",
"neg": "-",
"not": "!",
"and": "&",
"or": "|"})
def writeCmd(self, parser):
#set commandType, argOne, argTwo to be same with those of parser object
self.commandType = parser.commandType
self.argOne = parser.argOne
self.argTwo = parser.argTwo
#add raw command as comment at the top of block of assembly command
#for debugging purpose
self.asmCmd = ["// " + parser.raw]
if self.commandType == "arithmetic":
self.writeArithmetic()
elif self.commandType == "push":
self.writePush()
elif self.commandType == "pop":
self.writePop()
def writeArithmetic(self):
#method that calls different code generation methods according to
#individual arithmetic type
if self.argOne in {"add", "sub"}:
self.add_sub()
elif self.argOne in {"eq", "lt", "gt"}:
self.eq_lt_gt()
elif self.argOne in {"neg", "not"}:
self.neg_not()
elif self.argOne in {"and", "or"}:
self.and_or()
def writePush(self):
#generate asm code for push command
#case "constant"
if self.argOne == "constant":
self.asmCmd.extend(["@" + self.argTwo,
"D=A",
"@SP",
"A=M",
"M=D",
"@SP",
"M=M+1"])
#case "local", "argument", "this", "that"
elif self.argOne in {"local", "argument", "this", "that"}:
self.asmCmd.extend(["@" + self.argTwo,
"D=A",
"@" + self.d[self.argOne],
"AM=D+M",
"D=M",
"@SP",
"AM=M+1",
"A=A-1",
"M=D",
"@" + self.argTwo,
"D=A",
"@" + self.d[self.argOne],
"M=M-D"])
#case "temp", "pointer", "static"
elif self.argOne in {"temp", "pointer", "static"}:
self.asmCmd.extend(["@" + self.d[self.argOne + self.argTwo],
"D=M",
"@SP",
"AM=M+1",
"A=A-1",
"M=D"])
def writePop(self):
#generate asm code for pop command
#case "local", "argument", "this", "that"
if self.argOne in {"local", "argument", "this", "that"}:
self.asmCmd.extend(["@" + self.argTwo,
"D=A",
"@" + self.d[self.argOne],
"M=D+M",
"@SP",
"AM=M-1",
"D=M",
"@" + self.d[self.argOne],
"A=M",
"M=D",
"@" + self.argTwo,
"D=A",
"@" + self.d[self.argOne],
"M=M-D"])
#case "temp", "pointer", "static"
elif self.argOne in {"temp", "pointer", "static"}:
self.asmCmd.extend(["@SP",
"AM=M-1",
"D=M",
"@" + self.d[self.argOne + self.argTwo],
"M=D"])
def add_sub(self):
#generate asm code for "add" or "sub" command
self.asmCmd.extend(["@SP",
"AM=M-1",
"D=M",
"A=A-1",
"M=M" + self.d[self.argOne] + "D"])
def eq_lt_gt(self):
#generate asm code for "eq", "lt" or "gt" command
self.asmCmd.extend(["@SP",
"AM=M-1",
"D=M",
"A=A-1",
"D=M-D",
"M=-1",
"@CONTINUE" + str(self.continue_counter),
"D;" + self.d[self.argOne],
"@SP",
"A=M-1",
"M=0",
"(CONTINUE" + str(self.continue_counter) + ")"])
#increment continue_counter by 1 for generating next continue label
self.continue_counter += 1
def neg_not(self):
#generate asm code for "neg" or "not" command
self.asmCmd.extend(["@SP",
"A=M-1",
"M=" + self.d[self.argOne] + "M"])
def and_or(self):
#generate asm code for "and" or "or" command
self.asmCmd.extend(["@SP",
"AM=M-1",
"D=M",
"A=A-1",
"D=D" + self.d[self.argOne] + "M",
"M=D"])
def close(self):
#close output file
self.outf.close()
class vmTranslator(object):
#vmTranslator class pieces parser and codeWriter together, constructor takes
#the file path and file name to be translated, constructs parser and codeWriter
def __init__(self, inputName):
self.inputName = inputName
self.parser = parser()
self.codeWriter = codeWriter(self.inputName[:-3])
def Translator(self):
try:
inf = open(self.inputName, "r")
except:
print("File not found or path is incorrect")
else:
with inf:
for row in inf:
#skip if the input line is comment or empty line
if row[:2] == "//" or row == "\r\n":
continue
#translate vm command into assembly command
self.parser.generate(row.replace("\r\n", ""))
self.codeWriter.writeCmd(self.parser)
#write assembly commands into output file
for cmd in self.codeWriter.asmCmd:
self.codeWriter.outf.write(cmd + "\n")
self.codeWriter.close()
if __name__ == "__main__":
vt = vmTranslator(sys.argv[1])
vt.Translator()
| true |
ad1750b02e4d7767f1365d5dd2133cb5189a2565 | Python | ramadevim/Basic-python | /basic/basic/slicing.py | UTF-8 | 279 | 3.0625 | 3 | [
"MIT"
] | permissive | l=[1,2,3,4,5,6,7,8,9]
print(l[-1])
#l[start:stop:step]
print(l[:])
print(l[0:5])
print(l[0:5:2])
print(l[-1::-1])
a='http://coreyms.com'
print(a)
print(a[::-1])#reverse url
print(a[-4:])#top level domain
print(a[7:])#without http
print(a[7:14])#without http and top level domain | true |
a4e01b24756bdf83acb143470d02bb11e32da061 | Python | I-ll-Go-Rythm/2021_winter_study | /geonhokim/3.Divide_and_Conquer/부분배열_고르기_오답.py | UTF-8 | 1,313 | 3.109375 | 3 | [] | no_license | from typing import List
class Solution:
def subArray(self, arr: List[int]) -> int:
if len(arr) == 1:
return arr[0]*arr[0]
if len(arr) == 2:
return max(arr[0]*arr[0], arr[1]*arr[1], sum(arr) * min(arr))
maxSum = 0
piver = len(arr) // 2
for i in range(piver, len(arr)):
sum_ = sum(arr[piver: i + 1]) * min(arr[piver: i + 1])
print(arr[piver: i + 1])
if maxSum < sum_:
maxSum = sum_
for i in range(piver - 1, -1, -1):
sum_ = sum(arr[i:piver]) * min(arr[i:piver])
print(arr[i:piver])
if maxSum < sum_:
maxSum = sum_
start = piver
end = piver + 1
while start != 0 or end != len(arr) - 1:
sum_ = sum(arr[start:end]) * min(arr[start:end])
if maxSum < sum_:
maxSum = sum_
if arr[start - 1] > arr[end] or end == len(arr) - 1:
start -= 1
elif arr[start - 1] <= arr[end] or start == 0:
end += 1
return maxSum
sol = Solution()
n = int(input())
arr = list(map(int, input().split()))
print(sol.subArray(arr)) | true |
1a313a9a4d2baed70a308b8cd9d1b03ccee7308a | Python | heroku11/MentionAjg | /bot/modules/doom_dao.py | UTF-8 | 866 | 2.625 | 3 | [] | no_license | from dataclasses import asdict, dataclass
from datetime import datetime
from pymongo.collection import Collection
@dataclass
class DoomedUser:
uid: int
first_name: str
chat_id: int
ts_doom: datetime
ts_lib: datetime
ts_reset: datetime
@property
def asdict(self):
return asdict(self)
class DoomDAO:
def __init__(self, db: Collection):
self.db = db
def find(self, uid: int, chat_id: int) -> DoomedUser:
db_user = self.db.find_one({"uid": uid, "chat_id": chat_id}, {"_id": 0})
return DoomedUser(**db_user) if db_user else None
def doom(self, user: DoomedUser):
self.db.insert_one(user.asdict)
def undoom(self, user: DoomedUser):
self.db.delete_one(user.asdict)
def clear(self, time_now: datetime):
self.db.delete_many({"ts_reset": {"$gt": time_now}})
| true |
75d0a9ff46d5d5dbf6c745f492f74b46ccb90069 | Python | brunotjuliani/Fiu | /Programas/ECMWF_SFC_SIMPLES.py | UTF-8 | 1,698 | 2.609375 | 3 | [] | no_license | ## PARA DADOS EM NÍVEL DE SUPERFÍCIE
#imports
import pandas as pd
from datetime import datetime, timedelta
from ecmwfapi import ECMWFService
#fazendo um range para se colocar às datas selecionadas
date_range = pd.date_range(start='20190531',end='20190531', freq="D").strftime('%Y%m%d').to_list()
for dates in date_range:
server = ECMWFService( "mars", url = "https://api.ecmwf.int/v1",
key = "66dc9750b0f18814d51fa8658c52d73f", email = "rafael.toshio@simepar.br")
server.execute(
{
"class" :"od",
"date" : dates, #pode remover esse range e usar dada como 20160101/to/20160102
"expver" : "1",
"levtype" : "sfc",
"number" : "1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/41/42/43/44/45/46/47/48/49/50",
"param" : "228.128", #Buscar parâmetro da variável no catálogo
"grid" : "0.2/0.2", #tamanho da grade
"step" : "all", #step de horas
"area" : "-22.0/-55.0/-27.0/-48.0", #lat/lon
"stream" : "enfo",
"time" : "00",#rodada
"type" : "pf",
"target" : "data.grib2"
},
"../Dados/ECMWF_Grib/ECMWF_SSE05p01_SFC"+ dates +"_00.grib2")
#number=1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30/31/32/33/34/35/36/37/38/39/40/41/42/43/44/45/46/47/48/49/50,
#stream=enfo,
#type=pf,
#
#"area" : "-17.09000015258789/-36.45000076293945/-37.3699951171875/-63.6300048828125", #lat/lon
#Base times: 00
#Grid: 0.2
#Forecast time-steps: [00]: 0 to 90 by 1, 93 to 144 by 3, 150 to 360 by 6
#Areas: N: -19 W: -59 S: -34 E: -40
#Parameters: TP
| true |
907b3521de633b5376a3d94a7fc18a5ec4e795ba | Python | mcpeer/django_polls | /polls/tests.py | UTF-8 | 4,823 | 2.703125 | 3 | [] | no_license | import datetime
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
# Create your tests here.
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date is
in the future
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date = time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date is
older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days = 30)
history_question = Question(pub_date = time)
self.assertIs(history_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date is
less then 1 day old.
"""
time = timezone.now() - datetime.timedelta(hours = 10)
recent_question = Question(pub_date = time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given 'question_text' and published the given
number of days offset to now (negative for questions published in the past,
postive for q's that yet have to be published)
"""
time = timezone.now() + datetime.timedelta(days = days)
return Question.objects.create(question_text=question_text, pub_date = time)
class QuestionIndexViewTest(TestCase):
def test_no_questions(self):
"""
If no questions exists, an appropriate message is shown
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No polls are available.')
self.assertNotIn('latest_question_list', list(response.context))
def test_past_questions(self):
"""
Q's with a pub_date in past are displayed
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Q's with a pub_date in future are not displayed
"""
create_question(question_text="Fut q", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, 'No polls are available.')
self.assertNotIn('latest_question_list', list(response.context))
def test_future_questions_and_past_questions(self):
"""
Even if both past and future questions exist, only past questions are displayed
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text='future', days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The question index page may display multiple questions
"""
create_question(question_text="Past question1.", days=-30)
create_question(question_text="Past question2.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question2.>', '<Question: Past question1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found
"""
future_question = create_question(question_text = 'Future question.', days = 5)
url = reverse('polls:detail', args = (future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of question with pub_date in past
displays the questions text
"""
past_q = create_question(question_text = "Past", days = -4)
url = reverse('polls:detail', args = (past_q.id,))
response = self.client.get(url)
self.assertContains(response, past_q.question_text) | true |
572a064a1505dfde03b998f182f87dc16842f29d | Python | djaney/machine-learning | /05_mnist.py | UTF-8 | 2,300 | 2.859375 | 3 | [] | no_license |
'''
Use MNIST database
1 layer with 10 neurons in softmax yeilds 19%
2 layer with 10 neurons each in softmax yeilds 9%
2 layer with 10 neurons each in relu and softmax yeilds 53%
5 layer with 10 neurons each in relu and softmax, 0.003 to 0.1 learning yeilds 85%
with decay 73%
with dropout 93%
some droppings
'''
# Imports
import numpy as np
import tensorflow as tf
import argparse
from tensorflow.examples.tutorials.mnist import input_data
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
mnist = input_data.read_data_sets(FLAGS.data_dir)
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100000, 0.95, staircase=True)
# input - None is for batch, 3 is for number of input per batch
x = tf.placeholder(tf.float32, [None,784])
h1 = tf.layers.dense(x, 10, tf.nn.relu)
h2 = tf.layers.dense(h1, 10, tf.nn.relu)
h3 = tf.layers.dense(h2, 10, tf.nn.relu)
h4 = tf.layers.dense(h3, 10, tf.nn.relu)
m_ = tf.layers.dense(h4, 10, tf.nn.softmax) # for testing
m = tf.layers.dropout(m_, rate=0.25) # for training
# initialize the variables defined above
init = tf.global_variables_initializer()
# labes or correct answers
y = tf.placeholder(tf.int64, [None])
# calculate the loss distance using cross entropy
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=m)
cross_entropy_ = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=m_)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_step = optimizer.minimize(cross_entropy)
sess = tf.Session()
sess.run(init)
for i in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_data = {x: batch_xs, y: batch_ys}
#training
sess.run(train_step, feed_dict=train_data)
if 0 == i % 100:
correct_prediction = tf.equal(tf.argmax(m_, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# train
a,c = sess.run([accuracy, cross_entropy_], feed_dict={x: batch_xs, y: batch_ys})
# test
a_,c_ = sess.run([accuracy, cross_entropy_], feed_dict={x: mnist.test.images,y: mnist.test.labels})
print(a,c,a_,c_) | true |
aca2e723571a39b4fc8810d26181588e275eccbc | Python | mtreinish/bqskit | /bqskit/compiler/task.py | UTF-8 | 3,052 | 3.046875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | """
This module implements the CompilationTask class, TaskException, TaskStatus
enum, and the TaskResult class.
The CompilationTask class describes a compilation problem. These can be
submitted to an engine. The different CompilationTask states are enumerated in
TaskStatus. Once a CompilationTask is completed, a TaskResult is returned. If an
exception occurred during execution, it will be reraised as a TaskException.
"""
from __future__ import annotations
import uuid
from enum import Enum
from typing import Sequence
from bqskit.compiler.basepass import BasePass
from bqskit.ir.circuit import Circuit
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
class TaskException(Exception):
"""TaskException exception type."""
class TaskStatus(Enum):
"""TaskStatus enum type."""
ERROR = 0 # The task encountered an error or does not exist.
WAITING = 1 # The task is waiting in a workqueue.
RUNNING = 2 # The task is currently running.
DONE = 3 # The task is finished.
class TaskResult():
"""TaskResult structure."""
def __init__(
self,
message: str,
status: TaskStatus,
circuit: Circuit | None = None,
) -> None:
"""TaskResult Constructor."""
self.message = message
self.status = status
self.circuit = circuit
def get_circuit(self) -> Circuit:
"""Retrieves the circuit result or reraises an error."""
if self.status == TaskStatus.ERROR:
raise TaskException(self.message)
if self.circuit is None:
raise TaskException('No circuit produced.')
return self.circuit
@staticmethod
def does_not_exist() -> TaskResult:
"""Return a TaskResult with an error message."""
return TaskResult('Task does not exist.', TaskStatus.ERROR)
@staticmethod
def from_circuit(circuit: Circuit) -> TaskResult:
"""Build a simple success result from a circuit."""
return TaskResult('Success.', TaskStatus.DONE, circuit)
class CompilationTask():
"""The CompilationTask class."""
def __init__(
self,
input_circuit: Circuit,
passes: Sequence[BasePass],
) -> None:
"""
CompilationTask Constructor.
Args:
input_circuit (Circuit): The input circuit to be compiled.
passes (Sequence[BasePass]): The configured operations to be
performed on the circuit.
"""
self.task_id = uuid.uuid4()
self.input_circuit = input_circuit
self.passes = passes
@staticmethod
def synthesis(utry: UnitaryMatrix, method: str) -> CompilationTask:
"""Produces a standard synthesis task for the given unitary."""
circuit = Circuit.from_unitary(utry)
return CompilationTask(circuit, []) # TODO
@staticmethod
def optimize(circuit: Circuit, method: str) -> CompilationTask:
"""Produces a standard optimization task for the given circuit."""
return CompilationTask(circuit, []) # TODO
| true |
e011a80b61528556f31ba5759aa41c549da3a1c1 | Python | frozbiz/pe | /010 - sum of primes less than a million.py | UTF-8 | 652 | 3.71875 | 4 | [] | no_license | # Find the sum of all the primes below two million.
nCurrPrime = 2
def isPrime(x):
# slight speed-up, although starting at 2 and going by one works fine as well
if (x == 2):
return True
if (x % 2 == 0):
return False
i = 3
while (i*i <= x):
if (x % i == 0):
return False
i += 2
return True
def getNextPrimeBetter():
global nCurrPrime
nCurrPrime += 1
while (not isPrime(nCurrPrime)):
nCurrPrime += 1
return nCurrPrime
n = nCurrPrime
nSum = 0
while n < 2000000:
nSum += n
n = getNextPrimeBetter()
print n
print nSum
| true |
8fbda9e9e0f69ac59ca843c245de1a38b382d631 | Python | JulianaM2/roadFighter | /menu.py | UTF-8 | 2,222 | 3.46875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 09:39:33 2019
@author: jumunoz
"""
import tkinter as tk
import roadFighter as rf
from PIL import ImageTk, Image
#Function to replace the window with the gameover menu
def gameOverWindow (points):
gameOver = tk.Tk() #Start a new window named gameover
gameOver.title('Gameover')
gameOver.geometry('200x250')
gameOver.iconbitmap('carIcon.ico')
#Background of the window
img = ImageTk.PhotoImage(Image.open('road.png'))
tk.Label(gameOver, image = img).pack()
tk.Label(gameOver, text = 'GAMEOVER', font = ('Bahnschrift Light Condensed', 25),
bg = 'gray45', fg = 'white').place(x = 42, y = 20)
tk.Label(gameOver, text = 'YOU GOT: ' + str(points) +' POINTS', bg = 'gray45',
font = ('Bahnschrift Light Condensed', 13), fg = 'white').place(x = 40, y = 90)
tk.Button(gameOver, text = 'PLAY', command = lambda:play(gameOver), bd = 3, padx = 10,
font = ('Bahnschrift Condensed', 12), bg = '#93b5b3').place(x = 40, y = 150)
tk.Button(gameOver, text='EXIT', command = gameOver.destroy, bd = 3, padx = 12,
font = ('Bahnschrift Condensed', 12), bg = '#93b5b3').place(x = 110, y = 150)
gameOver.mainloop()
#End gameOverWindow
#Function to show the main menu
def mainMenu():
mainMenu = tk.Tk()
mainMenu.title('Welcome')
mainMenu.geometry('200x250')
mainMenu.iconbitmap('carIcon.ico')
img = ImageTk.PhotoImage(Image.open('road.png'))
tk.Label(mainMenu, image = img).pack()
tk.Label(mainMenu, text = 'WELCOME', font = ('Bahnschrift Light Condensed', 25),
bg = 'gray45', fg = 'white' ).place(x = 48,y = 30)
tk.Button(mainMenu, text = 'PLAY', command = lambda:play(mainMenu), bd = 3, padx = 10,
font = ('Bahnschrift Condensed', 12), bg = '#93b5b3').place(x = 40, y = 150)
tk.Button(mainMenu, text='EXIT', command = mainMenu.destroy, bd = 3, padx = 12,
font = ('Bahnschrift Condensed', 12), bg = '#93b5b3').place(x = 110, y = 150)
mainMenu.mainloop()
#End mainMenu
#Function to start the game
def play(page):
page.destroy()
rf.playRoadFighter()
#End play | true |
6f86528e915d1672b2e56cdd42bc46ba01972401 | Python | mic16/area | /backend/service/Trigger.py | UTF-8 | 399 | 2.671875 | 3 | [] | no_license | class Trigger():
def __init__(self, func=None, types=[]):
self.types = types.copy()
self.action = func
def addType(self, type):
self.types.append(type)
return self
def getTypes(self):
return self.types
def setAction(self, func):
self.action = func
return self
def getAction(self):
return self.action | true |
10e1264be3a6c5caa0b1ee0ac249d4828af003e3 | Python | EmbraceLife/LIE | /my_utils/line_continuous_color.py | UTF-8 | 3,744 | 3.5 | 4 | [] | no_license | """
line_continuous_color
key answer is found here
https://stackoverflow.com/questions/17240694/python-how-to-plot-one-line-in-different-colors
# I have two datasets, one is array with shape (30,), named line_data; the other one is array (1, 30), named color_data.
# I have used line_data to plot a line, use color_data to plot a color bar like image.
# I want to fill the color of the line with the color bar's continuous color change.
# Could anyone show me how to do it in python libraries, for example matplotlib or else?
"""
import numpy as np
import matplotlib.pyplot as plt
line_data = np.array([ 0.89917704, 1.89812886, 2.89733245, 3.87308733,
4.79016642, 4.8327078 , 5.81535641, 5.81631461,
5.81652544, 5.81652555, 5.81652639, 5.81652663,
5.93220416, 6.74091009, 7.61425993, 7.66313944,
8.60456767, 8.65866624, 9.5472393 , 9.63912952,
9.84010958, 10.83984404, 11.83848397, 11.83959435,
12.1176459 , 12.39335136, 12.39511715, 13.20027627,
14.00576137, 14.07948385])
color_data = np.array([[ 8.99476647e-01, 2.99607753e-04, 9.99251425e-01,
4.78358124e-05, 9.75802720e-01, 5.87236322e-02,
1.61822475e-02, 9.98830855e-01, 9.99789059e-01,
9.99999881e-01, 1.00000000e+00, 9.99999166e-01,
9.99999404e-01, 8.84321868e-01, 7.56159425e-02,
9.48965788e-01, 9.97845292e-01, 5.64170629e-02,
2.31849123e-03, 8.90891552e-01, 7.99001336e-01,
9.99981403e-01, 2.46947806e-04, 9.98886883e-01,
9.99997258e-01, 7.21945703e-01, 9.97651160e-01,
9.99416947e-01, 1.94257826e-01, 9.99742925e-01]])
# plt.imshow(color_data, cmap='binary')
# plt.show()
#
# plt.plot(line_data, c='blue')
# plt.show()
color_data = color_data.transpose((1,0))
line_data = line_data.reshape((-1,1))
def uniqueish_color(color_data):
"""There're better ways to generate unique colors, but this isn't awful."""
# return plt.cm.gist_ncar(color_data)
return plt.cm.binary(color_data)
# xy = (np.random.random((10, 2)) - 0.5).cumsum(axis=0)
X = np.arange(len(line_data)).reshape((-1,1))
y = line_data
xy = np.concatenate((X,y), axis=1)
fig, ax = plt.subplots()
for start, stop, col in zip(xy[:-1], xy[1:], color_data):
x, y = zip(start, stop)
ax.plot(x, y, color=uniqueish_color(col[0]))
plt.show()
"""
# real thing I want to solve
X = np.arange(len(line_data)).reshape((-1,1))
y = line_data
xy = np.concatenate((X,y), axis=1)
fig, ax = plt.subplots()
for start, stop, col in zip(xy[:-1], xy[1:], color_data):
x, y = zip(start, stop)
ax.plot(x, y, color=uniqueish_color(col))
plt.show()
"""
####################
# how to fill a line with a continous 2 colors
# how to plot a color legend for this continuous 2 colors
# https://stackoverflow.com/questions/8342549/matplotlib-add-colorbar-to-a-sequence-of-line-plots
import matplotlib as mpl
import matplotlib.pyplot as plt
min_c = 0.000000000000001 # color_data.min()
max_c = 1.0 # color_data.max()
step = 1
# Setting up a colormap that's a simple transtion
mymap = mpl.colors.LinearSegmentedColormap.from_list('mycolors',['blue', 'yellow','red'])
# Using contourf to provide my colorbar info, then clearing the figure
Z = [[0,0],[0,0]]
levels = range(int(min_c*100),int(max_c*100),step)
CS3 = plt.contourf(Z, levels, cmap=mymap)
plt.clf()
for start, stop, col in zip(xy[:-1], xy[1:], color_data):
# setting rgb color based on z normalized to my range
r = (col[0]-min_c)/(max_c-min_c)
g = 0
b = 1-r
x, y = zip(start, stop)
plt.plot(x, y, color=(r,g,b))
plt.colorbar(CS3) # using the colorbar info I got from contourf
plt.show()
| true |
02795a3874849ab461576f4f1a4d14c661d68552 | Python | ftarantuviez/Classification-Iris | /main.py | UTF-8 | 1,671 | 3.046875 | 3 | [] | no_license | import streamlit as st
import pandas as pd
import pickle
from sklearn import datasets
st.set_page_config(page_title='Simple Iris Classification', page_icon="./f.png")
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
st.write("""
# Simple Iris Flower Prediction App
This app predicts the **Iris flower** type!
""")
st.sidebar.header('User Input Parameters')
def user_input_features():
sepal_length = st.sidebar.slider('Sepal length', 4.3, 7.9, 5.4)
sepal_width = st.sidebar.slider('Sepal width', 2.0, 4.4, 3.4)
petal_length = st.sidebar.slider('Petal length', 1.0, 6.9, 1.3)
petal_width = st.sidebar.slider('Petal width', 0.1, 2.5, 0.2)
data = {'sepal_length': sepal_length,
'sepal_width': sepal_width,
'petal_length': petal_length,
'petal_width': petal_width}
features = pd.DataFrame(data, index=[0])
return features
df = user_input_features()
st.subheader('User Input parameters')
st.write(df)
iris = datasets.load_iris()
clf = pickle.load(open('iris_clf.pkl', 'rb'))
prediction = clf.predict(df)
prediction_proba = clf.predict_proba(df)
st.subheader('Class labels and their corresponding index number')
st.write(iris.target_names)
st.subheader('Prediction')
st.write(iris.target_names[prediction])
#st.write(prediction)
st.subheader('Prediction Probability')
st.write(prediction_proba)
# This app repository
st.write("""
---
## App repository
[Github](https://github.com/ftarantuviez/Classification-Iris)
""")
# / This app repository | true |
e5f99c2156dbb3e3af6991553218c7103ec4dd88 | Python | yangroro/gitpractice2 | /예제6.py | UTF-8 | 247 | 3.53125 | 4 | [] | no_license | user_input = input("저장할 내욜을 입력하세요:")
f = open('test.txt', 'a') # 내용을 추가하기 위해서 'a'를 사용
f.write(user_input)
f.write("\n") # 입력된 내용을 줄 단위로 구분하기 위해 줄 바꿈 문자 삽입
f.close()
| true |
dd16ca0bda97bc5df24fdd996ca849e5ccff1fc9 | Python | chi-jams/Projects | /ICPC_Practice/oop.py | UTF-8 | 423 | 3.203125 | 3 | [] | no_license | N, Q = [int(i) for i in input().split(" ")]
words = []
for i in range(N):
words.append(input())
for i in range(Q):
pattern = input().split('*')
len0, len1 = len(pattern[0]), len(pattern[1])
match = 0
for word in words:
if len0 + len1 > len(word):
continue
if pattern[0] == word[:len0] and (len1 == 0 or pattern[1] == word[-len1:]):
match += 1
print(match)
| true |
e555af5a08a7ac3e639027206f9abd10b46da523 | Python | ReyRizki/Komgraf | /Blender/W12/fence.py | UTF-8 | 4,076 | 2.953125 | 3 | [] | no_license | import bpy
import random
from math import radians, sin, cos, tan, pi
from mathutils import Matrix
def clear_scene():
for obj in bpy.data.objects:
if(obj.name != 'Sun'):
bpy.data.objects.remove(obj)
for mtr in bpy.data.materials:
bpy.data.materials.remove(mtr)
def rotate_point(center, point, degree):
vcos = cos(radians(degree))
vsin = sin(radians(degree))
x1=point[0]
y1=point[1]
x2=center[0]
y2=center[1]
rotX = (vcos * (x2 - x1)) + (-vsin * (y2 - y1)) + x1
rotY = (vsin * (x2 - x1)) + (vcos * (y2 - y1)) + y1
return (rotX,rotY)
def create_fence(height=10, width=0.25, x = 0, y = 0, degree = 0, name="Cube"):
# create the fence
bpy.ops.mesh.primitive_cube_add(size=1, enter_editmode=False, align='WORLD')
# transform
fence = bpy.context.object
fence.name = name
fence.location = (x, y, 0)
fence.scale = (1, width, height)
fence.rotation_euler = (0, 0, radians(degree))
ru_r = random.uniform(0,1)
ru_g = random.uniform(0,1)
ru_b = random.uniform(0,1)
# give material
material = bpy.data.materials.new('material')
material.diffuse_color = (abs(ru_r), abs(ru_g), abs(ru_b), 1)
bpy.context.object.data.materials.append(material)
def polygonal_fence(faces, length, gap, width = 0):
# calculate degree
degree = 360 / faces
# initialize length, width, and center
if (width == 0):
width = length
is_rectangle = (width != length) and (faces == 4)
total_length = (length * 1) + ((length - 1) * gap)
total_width = (width * 1) + ((width - 1) * gap)
center = (total_length / 2, (total_width / 2) if is_rectangle else (tan(radians((180 - degree) / 2)) * total_length / 2))
print(center)
for i in range(faces):
# draw fence
fences = (width if ((i & 1) and is_rectangle) else length)
for j in range(fences):
create_fence(10, 0.25, (1 + gap) * j, 0, 0, "Cube." + str(i) + "." + str(j))
# select fences
for j in range(fences):
bpy.data.objects["Cube." + str(i) + "." + str(j)].select_set(True)
# decide position
total = (total_width if ((i & 1) and is_rectangle) else total_length)
current = (total / 2, 0)
target = (0, 0)
if ((i & 1) and is_rectangle):
target = (((i - 1) / 2) * length, width / 2)
else:
target = rotate_point(current, center, i * degree)
diff = (target[0] - current[0], target[1] - current[1])
bpy.ops.transform.translate(value=(diff[0], diff[1], 0), orient_type='LOCAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='LOCAL', constraint_axis=(False, False, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
bpy.ops.transform.translate(value=(-center[0], -center[1], 0), orient_type='LOCAL', orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)), orient_matrix_type='LOCAL', constraint_axis=(False, False, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1, use_proportional_connected=False, use_proportional_projected=False)
# rotate
bpy.ops.transform.rotate(value=radians(-i * degree), orient_axis='Z', orient_type='LOCAL', orient_matrix=((1, 0.0, 0.0), (0.0, 1, 0.0), (0.0, 0.0, 1)), orient_matrix_type='LOCAL', constraint_axis=(False, False, False), mirror=True, use_proportional_edit=False, proportional_edit_falloff='SMOOTH', proportional_size=1.0, use_proportional_connected=False, use_proportional_projected=False, snap=False, snap_target='CLOSEST', snap_point=(0.0, 0.0, 0.0), snap_align=False, snap_normal=(0.0, 0.0, 0.0), gpencil_strokes=False, release_confirm=False, use_accurate=True)
bpy.ops.object.select_all(action='DESELECT')
clear_scene()
polygonal_fence(8, 5, 0) | true |
5429c5f8bd93e942424a393f11f1543707723a9b | Python | cmeuth/SIUEDSS | /GPIO/accelerator.py | UTF-8 | 921 | 3.09375 | 3 | [] | no_license | import Adafruit_BBIO.GPIO as GPIO
import time as t
import threading
def blink_leds( led ):
global running
while running:
GPIO.output( led, GPIO.HIGH )
t.sleep( 0.5 )
GPIO.output( led, GPIO.LOW )
t.sleep( 0.5 )
print "thread closed"
############### Function Definitions over #############
def main():
print "Setting up GPIO"
accUp = "P8_14"
accDown = "P8_15"
# Variables
GPIO.setup( accUp, GPIO.IN )
GPIO.setup( accDown, GPIO.IN )
# GPIO.add_event_detect( accDown, GPIO.BOTH )
# GPIO.setup( "P9_15", GPIO.OUT )
global running
speed = 50
try:
print "IN"
while True:
t.sleep(.1)
if GPIO.input( accUp ):
speed = speed + 5
print "Speed: %s" % speed
elif GPIO.input( accDown ):
if (speed > 0):
speed = speed - 5
print "Speed: %s" % speed
except KeyboardInterrupt:
GPIO.cleanup()
print "Ending program"
if __name__ == "__main__":
running = False
main()
| true |
db6595a5d8e0cf5102250160f9b0d7b015e9f7cb | Python | rocknrolltt/pir_webservice | /web-services-master-2c4daf620c344203ab5c51bf160d2bc4f0de3fda/PIR/output_xml.py | UTF-8 | 9,768 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 22 09:54:40 2015
@author: bwoods
"""
import logging
from datetime import datetime
import xml.etree.ElementTree as ET
import numpy as np
from PIR import __version__
class Annual_Peril:
'''
Peril-specific data to be included in the report
'''
def __init__(self, year, at_point, neighboring):
self.year = year
self.at_point = at_point
self.neighboring = neighboring
def __repr__(self):
return ET.tostring(self.to_XML())
def to_XML(self, parent=None):
if parent is not None:
_year_elem = ET.SubElement(parent, 'Year')
else:
_year_elem = ET.Element('Year')
_yrs_ago = ET.SubElement(_year_elem, 'Years_Ago')
_yrs_ago.text = unicode(self.year)
_year_local = ET.SubElement(_year_elem, 'within_1km')
_year_local.text = unicode(self.at_point)
_year_region = ET.SubElement(_year_elem, 'within_3km')
_year_region.text = unicode(self.neighboring)
return _year_elem
class Last_Event:
'''
Dates of the last damaging event for a peril
'''
def __init__(self, at_point):
self.at_point = at_point
class Peril:
'''
Date of last incident of each peril
'''
def __init__(self, years, median, anomaly, last_event, currency_time):
'''
years shoud be a list of the last 5 years
'''
if len(years) < 5:
raise ValueError('Must provide at least last 5 years')
# ensure that years are sorted newest to oldest
yrs = np.array([y.year for y in years])
if not np.all(np.diff(yrs) == 1):
raise ValueError('Years must be subsequent and sorted most recent '
'to oldest. Provided: %s', yrs)
self.years = years
self.median = median
self.anomaly = anomaly
self.last_event = last_event
self.currency_time = currency_time
def __repr__(self):
return ET.tostring(self.to_XML())
def to_XML(self, peril_key, parent=None):
'''
Create the SubElements in *root* named *peril_key* from class
member *pobj*
'''
# SubElements for each peril (per year)
if parent is not None:
pelem = ET.SubElement(parent, peril_key)
else:
pelem = ET.Element(peril_key)
_currency = ET.SubElement(pelem, 'Currency_Date')
_currency.text = self.currency_time.strftime(_dateFmt)
_median = ET.SubElement(pelem, 'median')
_median.text = unicode(self.median)
_anomaly = ET.SubElement(pelem, 'anomaly')
_anomaly.text = unicode(np.round(self.anomaly).astype(int))
_elem_last = ET.SubElement(pelem, 'Last_Event')
_elem_last.text = unicode(self.last_event.at_point)
elem_years = ET.SubElement(pelem, 'Yearly_counts')
for year_record in self.years:
year_record.to_XML(elem_years)
return pelem
class Location:
'''
Location for query
'''
def __init__(self, propertyID, address, address2, city, state, zipcode,
lat=None, lon=None):
self.propertyID = propertyID
self.address = address
self.address2 = address2
self.city = city
self.state = state
self.zipcode = zipcode
self.lat = lat
self.lon = lon
def __repr__(self):
return ET.tostring(self.to_XML())
def to_XML(self, parent=None):
'''
Convert to XML eTree
'''
if parent is not None:
_loc_elem = ET.SubElement(parent, 'Location')
else:
_loc_elem = ET.Element('Location')
_property_elem = ET.SubElement(_loc_elem, 'Property_ID')
_property_elem.text = unicode(self.propertyID)
_address_elem = ET.SubElement(_loc_elem, 'Address')
_address_elem.text = unicode(self.address)
_address2_elem = ET.SubElement(_loc_elem, 'Address2')
_address2_elem.text = unicode(self.address2)
_city_elem = ET.SubElement(_loc_elem, 'City')
_city_elem.text = unicode(self.city)
_state_elem = ET.SubElement(_loc_elem, 'State')
_state_elem.text = unicode(self.state)
_zip_elem = ET.SubElement(_loc_elem, 'Zip_Code')
_zip_elem.text = unicode(self.zipcode)
_lat_elem = ET.SubElement(_loc_elem, 'Latitude')
_lat_elem.text = unicode(self.lat)
_lon_elem = ET.SubElement(_loc_elem, 'Longitude')
_lon_elem.text = unicode(self.lon)
return _loc_elem
class Hail_Damage_Score:
'''
score is the max hail probability that year
events is a count of events > 50% hail prob
'''
def __init__(self, year, score, events):
self.year = year
self.score = score
self.events = events
def __repr__(self):
return ET.tostring(self.to_XML())
def to_XML(self, parent=None):
'''
Attach data to XML eTree
'''
if parent is not None:
_hds_elem = ET.SubElement(parent, 'Year')
else:
_hds_elem = ET.Element('Year')
_yrs_ago = ET.SubElement(_hds_elem, 'Years_Ago')
_yrs_ago.text = unicode(self.year)
_score = ET.SubElement(_hds_elem, 'score')
_score.text = unicode(self.score)
_events = ET.SubElement(_hds_elem, 'events')
_events.text = unicode(self.events)
return _hds_elem
class Hail_Damage:
'''
All of the hail damage scores
'''
def __init__(self, score_list, currency_time):
self.score_list = score_list
self.currency_time = currency_time
def __repr__(self):
return ET.tostring(self.to_XML())
def to_XML(self, parent=None):
'''
Attach data to XML eTree
'''
if parent is not None:
_currency_elem = ET.SubElement(parent, 'Currency_Date')
else:
_currency_elem = ET.Element('Currency_Date')
_currency_elem.text = self.currency_time.strftime(_dateFmt)
for score in self.score_list:
score.to_XML(parent)
return _currency_elem
class Record:
'''
PIR record for delivery
'''
def __init__(self, geocode, perils, elevation, slope,
hail_risk, hail_damage, wind_risk, transaction,
currency_time, updated_time, contact, data_rights,
query_time=datetime.now()):
self.geocode = geocode
self.perils = perils
# static terrain info
self.elevation = elevation
self.slope = slope
# static risk score
self.hail_risk = hail_risk
self.wind_risk = wind_risk
# annual damage scores
self.hail_damage = hail_damage
self.transaction = transaction
self.query_time = query_time
self.currency_time = currency_time
self.updated_time = updated_time
self.contact = contact
self.data_rights = data_rights
def __repr__(self):
return ET.tostring(self.to_XML())
def to_XML(self):
'''
Convert the Record instance to an XML ElemntTee
'''
# we start with a root tag
root = ET.Element(_root_key)
peril_key_dict = {'Fire': _fire_key, 'Lightning': _lightning_key,
'Hail': _hail_key, 'Wind': _wind_key}
# create SubElements for each peril
for key, peril in self.perils.iteritems():
peril.to_XML(peril_key_dict[key], parent=root)
# only include terrain and hail risk information if included in the
# request surface data
if self.elevation:
_terrain = ET.SubElement(root, _terrain_key)
_slope = ET.SubElement(_terrain, 'slope_code')
_slope.text = unicode(self.slope)
_elevation = ET.SubElement(_terrain, 'elevation')
_elevation.text = unicode(np.round(self.elevation).astype(int))
# Hail Risk Score
if self.hail_risk:
_hail_risk = ET.SubElement(root, _hail_risk_key)
_hail_risk.text = unicode(self.hail_risk)
# Hail Risk Score
if self.wind_risk:
_wind_risk = ET.SubElement(root, _wind_risk_key)
_wind_risk.text = unicode(self.wind_risk)
# Hail Damage Scores (per year)
if self.hail_damage:
_hail_damage = ET.SubElement(root, _hail_damage_key)
self.hail_damage.to_XML(parent=_hail_damage)
_transaction = ET.SubElement(root, 'Transaction')
_transaction.text = unicode(self.transaction)
_query_time = ET.SubElement(root, 'Query_Time')
_query_time.text = unicode(self.query_time)
_current_time = ET.SubElement(root, 'Currency_Date')
_current_time.text = self.currency_time.strftime(_dateFmt)
_update_time = ET.SubElement(root, 'Update_Date')
_update_time.text = self.updated_time.strftime(_dateFmt)
# Location of query
self.geocode.to_XML(parent=root)
_rights_elem = ET.SubElement(root, 'data_rights')
_rights_elem.text = self.data_rights
_version_elem = ET.SubElement(root, 'version')
_version_elem.text = __version__
_contact_elem = ET.SubElement(root, 'contact')
_contact_elem.text = self.contact
return root
# global date format
_dateFmt = '%Y-%m-%d'
_root_key = 'PIR'
# keys for XML sections
_hail_key = 'Hail_Events'
_wind_key = 'Wind_Events'
_lightning_key = 'Lightning_Events'
_fire_key = 'Wildfire_Events'
_terrain_key = 'Terrain'
_hail_risk_key = 'Hail_Risk_Score'
_wind_risk_key = 'Wind_Risk_Score'
_hail_damage_key = 'Hail_Damage_Score'
| true |
574bf0c42d1d59f9bf0eded49cc5b0d8682ec6fe | Python | mohanrex/music_led_strip_control | /client/main.py | UTF-8 | 4,234 | 2.734375 | 3 | [
"MIT"
] | permissive |
from libs.config_service import ConfigService
from libs.effects import Effects
from libs.effects_enum import EffectsEnum
from libs.notification_enum import NotificationEnum
from libs.notification_service import NotificationService
from libs.server_service import ServerService
from libs.audio_process_service import AudioProcessService
import numpy as np
from multiprocessing import Process, Queue, Manager, Lock
from time import sleep
class Main():
"""
This is the main class. It control everything.
Its the first starting point of the programm.
"""
def start(self):
"""
This function will start all neccesary components.
Let's go :-D
"""
print("Init the programm...")
# We need a lock to prevent too fast save and load actions of the config
self._config_lock = Lock()
# Create the instance of the config
self._config_instance = ConfigService.instance(self._config_lock)
self._config = self._config_instance.config
# Prepare the queue for the output
self._effects_queue = Queue(2)
self._audio_queue_lock = Lock()
self._audio_queue = Queue(2)
self._server_queue_lock = Lock()
self._server_queue = Queue(2)
# Prepare all notification queues
self._notification_queue_effects_in = Queue(2)
self._notification_queue_effects_out = Queue(2)
self._notification_queue_audio_in = Queue(2)
self._notification_queue_audio_out = Queue(2)
self._notification_queue_server_in = Queue(2)
self._notification_queue_server_out = Queue(2)
# Start Notification Service
self._notification_service = NotificationService()
self._notification_service_process = Process(
target=self._notification_service.start,
args=(
self._config_lock,
self._notification_queue_effects_in,
self._notification_queue_effects_out,
))
self._notification_service_process.start()
#Start Server
self._server = ServerService()
self._server_process = Process(
target=self._server.start,
args=(
self._config_lock,
self._notification_queue_server_in,
self._notification_queue_server_out,
self._server_queue,
self._server_queue_lock
))
self._server_process.start()
# Start the Effect Service
self._effects = Effects()
self._effects_process = Process(
target=self._effects.start,
args=(
self._config_lock,
self._notification_queue_effects_in,
self._notification_queue_effects_out,
self._effects_queue,
self._server_queue,
self._server_queue_lock,
self._audio_queue,
self._audio_queue_lock
))
self._effects_process.start()
#Start audio process
self._audio = AudioProcessService()
self._audio_process = Process(
target=self._audio.start,
args=(
self._config_lock,
self._notification_queue_server_in,
self._notification_queue_server_out,
self._audio_queue,
self._audio_queue_lock
))
self._audio_process.start()
print("Init finished")
try:
print("Programm started...")
self._cancel_token = False
# Do nothing with this thread. Just wait for the exit.
while not self._cancel_token:
sleep(10)
except KeyboardInterrupt:
print("Stop the programm...")
self._server_process.terminate()
self._effects_process.terminate()
self._audio_process.terminate()
self._notification_service_process.terminate()
print("Programm stopped")
if __name__ == "__main__":
main = Main()
main.start() | true |
645fc701ed2be6063b3d59f4797bbfe79d3af449 | Python | jihoahn9303/tensorflow_manual | /code/17.Gradient_Vanishing.py | UTF-8 | 2,291 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 11:00:40 2021
@author: jiho Ahn
@topic: Gradient Vanishing Problem
"""
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = "true"
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from termcolor import colored
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.metrics import Mean, SparseCategoricalAccuracy
train_ds, ds_info = tfds.load(name='mnist',
shuffle_files=True,
as_supervised=True,
split='train',
with_info=True)
def normalization(images, labels):
images = tf.cast(images, tf.float32) / 255.
return [images, labels]
n_layer = 7
cmap = cm.get_cmap('rainbow', lut=n_layer)
units = [10] * n_layer
model = Sequential()
model.add(Flatten())
for layer_idx in range(n_layer-1):
model.add(Dense(units=units[layer_idx], activation='sigmoid'))
model.add(Dense(units=10, activation='softmax'))
model.build(input_shape=(None, 28, 28, 1))
#model.summary()
train_batch_size = 10
train_ds = train_ds.map(normalization).batch(train_batch_size)
loss_object = SparseCategoricalCrossentropy()
optimizer = SGD()
train_ds_iter = iter(train_ds)
images, labels = next(train_ds_iter)
#print(images.shape, labels.shape)
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
fig, ax = plt.subplots(figsize=(20, 10))
ax.set_yscale('log')
for grad_idx, grad in enumerate(gradients[::2]):
if grad_idx >= 1:
grad_abs = np.abs(grad.numpy().flat)
ax.plot(grad_abs, label='layer {}'.format(grad_idx),
color=cmap(grad_idx),
alpha=0.8)
ax.legend(bbox_to_anchor=(1, 0.5),
loc='center left',
fontsize=20)
fig.tight_layout()
#print(type(gradients))
#print(len(gradients))
#print(type(gradients[0]))
#print(gradients[0].shape)
| true |
5b5171a708a0e01dcdc2185d927a678d525bc73a | Python | 5362396/Wizualizacja-Danych | /cw5/zad4.py | UTF-8 | 531 | 4.125 | 4 | [] | no_license | # Korzystając z powyższego kodu stwórz kilka instancji klasy Point i spróbuj odwołać się do zmiennej counter z poziomu różnych instancji, porównując jej wartość dla każdej z nich oraz spróbuj zmienić jej wartość
class Point:
counter = []
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def update(self, n):
self.counter.append(n)
p1 = Point(0,0)
p2 = Point(5,5)
p1.update(3)
print(p1.counter)
print(p2.counter)
p1.update(1)
p2.update(2)
p3 = Point(1,1)
print(p3.counter) | true |
db2bc7d8404fd801f10ac6fa983ba88123eb1f4f | Python | rexuru17/planning-master | /products/models.py | UTF-8 | 1,048 | 2.546875 | 3 | [] | no_license | from django.db import models
# Create your models here.
class ProductGroup(models.Model):
product_group = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.product_group
class ProductSubGroup(models.Model):
product_group = models.ForeignKey(ProductGroup, on_delete=models.RESTRICT)
product_subgroup = models.CharField(max_length=100, unique=True)
def __str__(self):
return self.product_subgroup
class Product(models.Model):
product_group = models.ForeignKey(ProductGroup, on_delete=models.RESTRICT)
product_subgroup = models.ForeignKey(ProductSubGroup, on_delete=models.RESTRICT)
code = models.CharField(max_length=5, unique=True)
name = models.CharField(max_length=100)
unit_of_measure = models.CharField(max_length=10)
weight = models.DecimalField(max_digits=5, decimal_places=3)
weight_text = models.CharField(max_length=10)
def __str__(self):
product_info = str(self.code) + ' - ' + str(self.name)
return str(product_info)
| true |
76da4e6ae085340d515e6ef608b606dadb7b0301 | Python | Sarthak762/Friday-Mini-Desktop-Assistant | /main.py | UTF-8 | 2,304 | 2.78125 | 3 | [] | no_license | import pyttsx3
import speech_recognition as sr
from datetime import datetime
import wikipedia
import webbrowser
import os
def speak(text):
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
engine.say(text)
engine.runAndWait()
def welcome():
'''welcome() function wishes the user and introduces friday to the user'''
hrs = int(datetime.now().hour)
if(hrs<12):
speak("Good morning")
print("Good morning")
else:
speak('Good afternoon')
print("Good afternoon")
speak("I am friday! How can I help you")
print("I am friday! how can I help you")
def Listen():
r = sr.Recognizer()
with sr.Microphone() as source:
print("listining.......")
audio_txt = r.listen(source)
r.pause_threshold = 1
try:
query = r.recognize_google(audio_txt,language='en-in')
except Exception as e:
print('Please say again!')
return query
if __name__ == "__main__":
welcome()
while True:
query = Listen().lower()
if "open youtube" in query:
print("opening youtube")
speak("opening youtube")
webbrowser.open("youtube.com")
elif "open google" in query:
print("opening google")
speak("opening google")
webbrowser.open('google.com')
elif "open codechef" in query:
print("opening codechef")
speak("opening codechef")
webbrowser.open('codechef.com')
elif "search wikipedia" in query:
print("searching wikipedia..........")
speak("searching wikipedia")
query.replace("search wikipedia for","")
print("Wikipedia says,"+ wikipedia.summary(query,sentences=2))
speak("Wikipedia says,"+ wikipedia.summary(query,sentences=2))
elif "play movie" in query:
query.replace("play movie","")
list = [entry.name for entry in os.scandir("E:\\my movies\\english movies\\")]
print("Opening............")
speak("opening............")
os.startfile("E:\\my movies\\english movies") | true |
2971b6800fbb92467047a154a6c060eb1cb04b72 | Python | sohelahmadkhan/inferential_stats_project | /q04_chi2_test/build.py | UTF-8 | 482 | 2.875 | 3 | [] | no_license | # Default imports
import scipy.stats as stats
import pandas as pd
import numpy as np
df = pd.read_csv('data/house_pricing.csv')
# Enter Code Here
def chi_square(df):
x = df.LandSlope
price = pd.qcut(df['SalePrice'],3,labels = ['High','Medium','Low'])
freqtab = pd.crosstab(x,price)
chi2,pval,dof,expected = stats.chi2_contingency(freqtab)
chi_test = np.bool_(False)
#print type(chi_test)
#print freqtab
return pval,chi_test
print chi_square(df)
| true |
290ab7edb84ebd9cf543bca847889a882ce949f1 | Python | tarashetman/stmodelling | /modules/FCNmodule.py | UTF-8 | 1,859 | 2.546875 | 3 | [
"BSD-2-Clause"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as F
class FCNmodule(torch.nn.Module):
"""
This is the CONV implementation used for linking spatio-temporal
features coming from different segments.
"""
def __init__(self, img_feature_dim, num_frames, num_class, relation_type):
super(CONVmodule, self).__init__()
self.num_frames = num_frames
self.num_class = num_class
self.img_feature_dim = img_feature_dim
self.relation_type = relation_type
self.classifier = nn.Sequential(
nn.ReLU(),
nn.Conv2d(1, 64, kernel_size=3, stride=(1, 2), padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=(1, 2), padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=(1, 2), padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=(1, 2), padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=3, stride=(1, 2), padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(p=0.0),
nn.Conv2d(256, num_class, kernel_size=1),
nn.ReLU(),
nn.AvgPool2d((num_frames, 8))
)
def forward(self, input):
input = input.unsqueeze(1)
# print(sum(p.numel() for p in self.classifier.parameters() if p.requires_grad))
output = self.classifier(input)
output = torch.squeeze(output, 3)
output = torch.squeeze(output, 2)
return output
def return_FCN(relation_type, img_feature_dim, num_frames, num_class):
FCNmodel = FCNmodule(img_feature_dim, num_frames, num_class, relation_type)
return FCNmodel
| true |
13c06844583259bcb3fe1cf024b4942ac2d7192f | Python | furas/python-examples | /flask/ajax-setinterval-get-thread-result/main.py | UTF-8 | 2,135 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# date: 2020.01.17
# https://stackoverflow.com/questions/59780007/ajax-with-flask-for-real-time-esque-updates-of-sensor-data-on-webpage/
from flask import Flask, request, render_template_string, jsonify
import datetime
import time
import threading
app = Flask(__name__)
running = False # to control loop in thread
value = 0
def rpi_function():
global value
print('start of thread')
while running: # global variable to stop loop
value += 1
time.sleep(1)
print('stop of thread')
@app.route('/')
@app.route('/<device>/<action>')
def index(device=None, action=None):
global running
global value
if device:
if action == 'on':
if not running:
print('start')
running = True
threading.Thread(target=rpi_function).start()
else:
print('already running')
elif action == 'off':
if running:
print('stop')
running = False
else:
print('not running')
return render_template_string('''<!DOCTYPE html>
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
</head>
<body>
<a href="/bioR/on">TURN ON</a>
<a href="/bioR/off">TURN OFF</a>
<h1 id="num"></h1>
<h1 id="time"></h1>
<script>
setInterval(function(){$.ajax({
url: '/update',
type: 'POST',
success: function(response) {
console.log(response);
$("#num").html(response["value"]);
$("#time").html(response["time"]);
},
error: function(error) {
console.log(error);
}
})}, 1000);
</script>
</body>
</html>
''')
@app.route('/update', methods=['POST'])
def update():
return jsonify({
'value': value,
'time': datetime.datetime.now().strftime("%H:%M:%S"),
})
app.run(debug=True)
| true |
90e01ed3410a0af85102496c5384cf9c06f94e0d | Python | MattHartshorn/otp-encryption | /test/testhelper.py | UTF-8 | 806 | 2.65625 | 3 | [
"MIT"
] | permissive | import unittest
import sys
import os
sys.path.append(os.path.abspath("../src"))
import keygenerator
from helper import binStrToBytes
class TestHelper(unittest.TestCase):
def test_binStrToBytes(self):
key = keygenerator.generate(8, True)
self.assertEqual(len(binStrToBytes(key)), 8)
def test_binStrToBytes_invalidSize_bits(self):
key = keygenerator.generate(14)
with self.assertRaises(Exception):
binStrToBytes(key)
def test_binStrToBytes_type(self):
key = 36
with self.assertRaises(TypeError):
binStrToBytes(key)
def test_binStrToBytes_nonBinary(self):
key = "01011001g0100101"
with self.assertRaises(Exception):
binStrToBytes(key)
if (__name__ == "__main__"):
unittest.main()
| true |
daef694441f85592b9e4d3a1767e5ed247201d91 | Python | haiou90/aid_python_core | /day19/personal_house_information_manager_system/bll.py | UTF-8 | 1,466 | 2.984375 | 3 | [] | no_license | """
业务逻辑层
"""
from dal import HouseDao
class HouseManagerController:
def __init__(self):
self.__list_houses = HouseDao.load()
self.max = self.__list_houses[0]
self.dict_type_house = {}
self.set_house =set()
@property
def list_houses(self):
return self.__list_houses
def delete_house(self, id):
for item in self.__list_houses:
if item.id == id:
self.__list_houses.remove(item)
return True
return False
def calculate_max_price(self):
for i in range(len(self.__list_houses)):
if self.max.total_price < self.__list_houses[i].total_price:
self.max = self.__list_houses[i]
return self.max
def calculate_min_price(self):
return min(self.__list_houses,key=lambda item: item.total_price)
def calculate_house_type(self):
for item in self.__list_houses:
if item.house_type in self.dict_type_house:
self.dict_type_house[item.house_type] += 1
else:
self.dict_type_house[item.house_type] = 1
return self.dict_type_house
# def calculate_house_old_type(self):
# for item in self.__list_houses:
# self.set_house.add(item.house_type)
# for item in self.set_house:
# self.dict_type_house.update({item:self.__list_houses.count(item)})
# return self.dict_type_house
| true |
9ac88c5b6818d91177b096a5cfb5dfc303cac925 | Python | ytakzk/timber_assemblies | /UR_Control/geometry/surface.py | UTF-8 | 18,161 | 2.921875 | 3 | [
"MIT"
] | permissive | import Rhino.Geometry as rg
from geometry.beam import Beam
import copy
import math
class Surface(object):
def __init__(self, surface, u_div=5, v_div=3, beam_width = 160, beam_thickness = 40):
""" Initialization
:param surface: Base rg.geometry object that will be edited
:param u_div: How many divisions this surface will have in the u_direction (default = 5)
:param v_div: How many divisions this surface will have in the v_direction (default = 3)
:param beam_width: The initial width of the beams (default = 160)
:param beam_thickness: The initial thickness of the different beams (default = 40)
"""
domain = rg.Interval(0, 1)
surface.SetDomain(0, domain)
surface.SetDomain(1, domain)
self.surface = surface
self.shared_edges = []
self.bottom_curve = surface.IsoCurve(0, 0)
self.top_curve = surface.IsoCurve(0, 1)
self.left_curve = surface.IsoCurve(1, 0)
self.right_curve = surface.IsoCurve(1, 1)
self.bottom_pt = self.bottom_curve.PointAt(0.5)
self.top_pt = self.top_curve.PointAt(0.5)
self.left_pt = self.left_curve.PointAt(0.5)
self.right_pt = self.right_curve.PointAt(0.5)
self.u_div = u_div
self.v_div = v_div
self.beam_w = beam_width
self.beam_t = beam_thickness
def instantiate_beams(self, mapping_type = 0, seam_type = 0, warp_type = 3, will_flip = False, flush_beam_count = 2):
""" Function that instatiates the beam generation
:param mapping_type: Some default types of surface logics applied ([1] = even - default, [2/3, 1] = seaming type)
:param seam_type: Which type of seam this object has (0 = single flush - default, 1 = multi flush left, 2 = multi flush right, 3 = multi flush both sides)
:param warp_type: How the surface is being warped (0 = no warping - default, 1 = left, 2 = right, 3 = both sides)
:param will_flip: Whether the surface will flip or not (default = False)
"""
self.mapping_type = [[1], [2/3, 1]][mapping_type]
self.seam_type = seam_type
self.warp_type = warp_type
self.will_flip = will_flip
self.flush_beam_count = flush_beam_count
self.warped_srf = copy.deepcopy(self.surface)
# changing the u_div count in relation to the mapping_type
total_flush_beam_count = math.round(self.seam_type / 2.0) * self.flush_beam_count
self.mapping_pattern_length = len(self.mapping_type)
# checking whether there are enough u_div to map out a surface in the middle
if self.u_div < total_flush_beam_count:
self.main_srf_div = 2
self.u_div = self.main_srf_div * self.mapping_pattern_length + total_flush_beam_count
# checking whether the amount of splits in the middle is a multiple of it's mapping_pattern_len
elif not(int(self.u_div - total_flush_beam_count) % self.mapping_pattern_length == 0):
self.main_srf_div = math.ceil((self.u_div - total_flush_beam_count) / self.mapping_pattern_length)
self.u_div = self.main_srf_div * self.mapping_pattern_length + total_flush_beam_count
# initializing the beam set
self.beams = []
if self.will_flip:
# flip
domain = rg.Interval(0, 1)
self.surface.Reverse(0, True)
self.surface.SetDomain(0, domain)
self.end_isocrvs = [self.surface.GetIsocurve(0, 0), self.surface.GetIsocurve(0, 1)]
# setting up how and what needs to be run in order
# does flipping matter here ???
o_half_t = .5 * self.beam_t
o_flush_seam = (self.flush_beam_count - .5) * self.beam_t
# starting condition of the beam instantiation
self.div_counter = 0
# single - flush condition
if self.seam_type == 0:
# simple even condition
# absolute offset of half the beam_t
self.__offset_sides_surface(offset_dis = o_half_t, sides = 3)
self.__warp_surface()
self.__instantiate_main_beams(start_beams = True, end_beams = True)
# multi - flush condition on the left
elif self.seam_type == 1:
# flush condition on the left
# initializing the flush beams
self.__multi_flush_seams(location = 0)
self.__offset_sides_surface(offset_dis = o_flush_seam, sides = 1)
self.__offset_sides_surface(offset_dis = o_half_t, sides = 2)
self.__warp_sides_surface()
self.__instantiate_main_beams(start_beams = False, end_beams = True)
# multi - flush condition on the right
elif self.seam_type == 2:
# flush condition on the right
self.__offset_sides_surface(offset_dis = o_flush_seam, sides = 2)
self.__offset_sides_surface(offset_dis = o_half_t, sides = 1)
self.__warp_sides_surface()
self.__instantiate_main_beams(start_beams = True, end_beams = False)
# initializing the flush beams
self.__multi_flush_seams(location = 1)
# multi - flush conditon on both sides
elif self.seam_type == 3:
# flush condition on both sides
# initializing the first set of flush conditions
self.__multi_flush_seams(location = 0)
self.__offset_sides_surface(offset_dis = o_flush_seam, sides = 3)
self.__warp_sides_surface()
# initializing the second set of flush conditions
self.__multi_flush_seams(location = 1)
if will_flip:
# flip back
domain = rg.Interval(0, 1)
self.surface.Reverse(0, True)
self.surface.SetDomain(0, domain)
# reversing the direction of the base_plane of the beam
self.beams = list(reversed(self.beams))
def __instantiate_main_beams(self, start_beams = False, end_beams = False):
""" internal method that sets out the beams on the main surface
:param start_beams: Whether the main surface is mapped from the left edge of the surface or skips over the first one
:param end_beams: Whether the main surface is mapped until the end on the right or the last one is ignored
"""
division_range = (int(start_beams) + self.main_srf_div - 1 + int(end_beams))
u_val_list = []
[u_val_list.extend([(u_map_set_val + u_val / division_range) for u_map_set_val in self.mapping_type]) for u_val in range(int(start_beams), int(start_beams) + self.main_srf_div, 1)]
for u in u_val_list:
inner_arr = []
for v in range(self.v_div):
if (self.div_counter % 2 == 0 and v % 2 == 1) or (self.div_counter % 2 == 1 and v % 2 == 0):
continue
p1 = self.warped_srf.PointAt(u, float(v)/self.v_div)
p2 = self.warped_srf.PointAt(u, float(v+1)/self.v_div)
length = p1.DistanceTo(p2)
center = rg.Point3d((p1 + p2) / 2)
_, uu, vv = self.warped_srf.ClosestPoint(center)
normal = self.warped_srf.NormalAt(uu, vv)
x_axis = rg.Vector3d(p1) - rg.Vector3d(p2)
x_axis.Unitize()
y_axis = rg.Vector3d.CrossProduct(normal, x_axis)
plane = rg.Plane(center, x_axis, normal)
beam = Beam(plane, length, self.beam_w, self.beam_t)
inner_arr.append(beam)
self.beams.append(inner_arr)
self.div_counter += 1
def __multi_flush_seams(self, location = 0, will_flip = False):
""" method to create a flush seam with n amount of beams
:param location: Whether you're considering the end or the start of the system (defualt = 0)
:param will_flip: What the start condition is (default = False)
"""
# getting the correct isocurve of the surface
local_curve = self.end_isocrvs[location]
# getting the domain of the curve
t_start, t_end = local_curve.Domain[0], local_curve.Domain[1]
t_delta = t_end - t_start
t_set = [t_start, (t_end + t_start) / 2, t_end]
pt_set = [local_curve.PointAt(t_val) for t_val in t_set]
curve_plane = rg.Plane(pt_set[0], pt_set[1], pt_set[2])
# getting the t_values on that curve to describe the beam lines
t_vals = [t_start + t_delta * v / (self.v_div + 1) for v in range (self.v_div + 1)]
# generating the move vectors for the curves
# to check whether you're at the start or the end of the surface
if (location == 0):
switch_flag = 0
else:
switch_flag = - self.flush_beam_count
mv_vectors = [curve_plane.ZAxis * self.beam_t * (switch_flag + .5 + i) for i in range(self.flush_beam_count )]
# # getting the lines
# if (will_flip):
for mv_vector in mv_vectors:
temp_curve = copy.deepcopy(local_curve)
temp_curve.Translate(mv_vector)
inner_arr = []
for v in range(self.v_div):
if (self.div_counter % 2 == 0 and v % 2 == 1) or (self.div_counter % 2 == 1 and v % 2 == 0):
continue
p1 = temp_curve.PointAt(t_vals[v])
p2 = temp_curve.PointAt(t_vals[v + 1])
length = p1.DistanceTo(p2)
center = rg.Point3d((p1 + p2) / 2)
z_axis = curve_plane.ZAxis
x_axis = rg.Vector3d(p1) - rg.Vector3d(p2)
x_axis.Unitize()
y_axis = rg.Vector3d.CrossProduct(z_axis, x_axis)
plane = rg.Plane(center, x_axis, y_axis)
beam = Beam(plane, length, self.beam_w, self.beam_t)
inner_arr.append(beam)
self.beams.append(inner_arr)
self.div_counter += 1
def __offset_sides_surface(self , offset_dis=20, rel_or_abs = False, sides = 0, sampling_count = 25):
""" method that returns a slightly shrunk version of the surface along the v direction
:param offset_dis: Offset Distance (abs or relative)
:param rel_or_abs: Flag that states whether you offset the surface absolutely or relative - if relative u domain has to be set correctly!!! (rel = True, abs = False, default = False)
:param sides: Which sides should be offseted (0 = nothing - default, 1 = left, 2 = right, 3 = both)
:param sampling_count: Precision at which the surface should be rebuild
"""
# first of all checking whether you have to do anything at all
if not (sides == 0):
local_srf = self.warped_srf
# case that surface offset is relative
if rel_or_abs:
u_div = local_srf.Domain(0)
if (sides == 1):
offsets = 1
elif (sides == 2):
offsets = 1
elif (sides == 3):
offsets = 2
# making sure you don't make the surface dissappear
if offset_dis * offsets > .9 * u_div:
offset_dis = .9 * u_div / offsets
local_srf.SetDomain(1, rg.Interval(0, sampling_count - 1))
temp_isocurves = [local_srf.IsoCurve(0, v_val) for v_val in range(sampling_count)]
temp_isocurves_shortened = []
for isocurve in temp_isocurves:
# getting the length and the domain of every isocurve
start_t, end_t = isocurve.Domain[0], isocurve.Domain[1]
t_delta = end_t - start_t
# calculating how much the offset_dis result in t_val change
if rel_or_abs:
t_shift = t_delta * offset_dis
else:
length = isocurve.GetLength()
t_differential = t_delta / length
t_shift = t_differential * offset_dis
# creating variations for the different offset options
start_t_new, end_t_new = start_t, end_t
if (sides == 1):
start_t_new = start_t + t_shift
splits = [start_t_new]
data_to_consider = 1
elif (sides == 2):
end_t_new = end_t - t_shift
splits = [end_t_new]
data_to_consider = 0
elif (sides == 3):
start_t_new = start_t + t_shift
end_t_new = end_t - t_shift
splits = [start_t_new, end_t_new]
data_to_consider = 1
# splitting the curve at the values in the split list, picking the curve based on the split type
new_isocurve = isocurve.Split(splits)[data_to_consider]
temp_isocurves_shortened.append(new_isocurve)
# switching the uv direction back to where it should be! -> rebuilding the surface
point_list = [[] for i in range(sampling_count)]
for temp_curve in temp_isocurves_shortened:
length = temp_curve.GetLength()
start_t, end_t = temp_curve.Domain[0], temp_curve.Domain[1]
t_delta = end_t - start_t
t_differential = t_delta / (sampling_count - 1)
# calculating how much the offset_dis result in t_val change
point_set = [temp_curve.PointAt(t_val * t_differential + start_t) for t_val in range(0, sampling_count, 1)]
for i, point in enumerate(point_set):
point_list[i].append(rg.Point3d(point))
uv_switched_curves = []
for point_set in point_list:
local_isocurve = rg.NurbsCurve.Create(False, 3, point_set)
uv_switched_curves.append(local_isocurve)
# lofting those isocurves
loft_type = rg.LoftType.Tight
local_srf = rg.Brep.CreateFromLoftRebuild(uv_switched_curves, rg.Point3d.Unset, rg.Point3d.Unset, loft_type, False, 50)[0]
# getting the loft as a nurbssurface out of the resulting brep
local_srf = local_srf.Faces.Item[0].ToNurbsSurface()
domain = rg.Interval(0, 1)
local_srf.SetDomain(0, domain)
local_srf.SetDomain(1, domain)
self.warped_srf = local_srf
else:
# in case you don't have to do anything at all you do nothing at all !?
pass
def __warp_sides_surface(self, grading_percentage = .5, precision = 25):
""" method that makes the beams move closer to each other at the seams
:param grading_percentage: Percent of the surface that is being regraded (default .5)
"""
# first of all checking whether you have to do anything at all
if not(self.warp_type == 0):
local_srf = self.warped_srf
u_extra_precision = int(math.ceil(25 / grading_percentage)) - precision
half_pi = math.pi / 2.0
half_pi_over_precision = half_pi / (precision - 1)
# setting up the base grading t_vals
ini_t_vals = []
total_t_vals = u_extra_precision
for i in range(precision):
alfa = half_pi_over_precision * i
local_t_val = math.sin(alfa)
ini_t_vals.append(local_t_val)
total_t_vals += local_t_val
[ini_t_vals.append(1) for i in range(u_extra_precision)]
# setting up the grading list for the amount of sides
local_t_val = 0
if (self.warp_type == 1):
# only on the left side
t_vals = []
for t_val in ini_t_vals:
local_t_val += t_val
t_vals.append(local_t_val)
elif (self.warp_type == 2):
# only on the right side
t_vals = []
ini_t_vals.reverse()
local_ini_t_vals = [0]
local_ini_t_vals.extend(ini_t_vals[:-1])
for t_val in local_ini_t_vals:
local_t_val += t_val
t_vals.append(local_t_val)
elif (self.warp_type == 3):
# on both sides
t_vals = []
local_ini_t_vals = ini_t_vals[:]
ini_t_vals.reverse()
local_ini_t_vals.extend(ini_t_vals[:-1])
for t_val in local_ini_t_vals:
local_t_val += t_val
t_vals.append(local_t_val)
# getting the v isocurves
val_0, val_1 = t_vals[0], t_vals[-1]
local_srf.SetDomain(1, rg.Interval(0, precision - 1))
temp_srf_iscrv_set = [local_srf.IsoCurve(0, v_val) for v_val in range(precision)]
pt_list = [[] for i in range(len(t_vals))]
for isocrv in temp_srf_iscrv_set:
t_start, t_end = isocrv.Domain[0], isocrv.Domain[1]
t_delta = t_end - t_start
t_differential = t_delta / val_1
[pt_list[i].append(isocrv.PointAt(t_start + t_val * t_differential)) for i, t_val in enumerate(t_vals)]
# constructing new isocurves
loft_curves = [rg.NurbsCurve.Create(False, 3, pt_set) for pt_set in pt_list]
loft_type = rg.LoftType.Tight
local_srf = rg.Brep.CreateFromLoftRebuild(loft_curves, rg.Point3d.Unset, rg.Point3d.Unset, loft_type, False, 50)[0]
# getting the loft as a nurbssurface out of the resulting brep
new_srf = local_srf.Faces.Item[0].ToNurbsSurface()
domain = rg.Interval(0, 1)
new_srf.SetDomain(0, domain)
new_srf.SetDomain(1, domain)
self.warped_srf = local_srf
else:
# in case you don't have to do anything at all you do nothing at all !?
pass
| true |
a2db046f11286b79ff68844072e1a6bf783d2f92 | Python | AndreaCenturelli/IoT_for_professor | /emulator/sensor_emulators/heartBeat_emulator.py | UTF-8 | 972 | 2.796875 | 3 | [] | no_license | import json
from datetime import datetime
import pytz
utc = pytz.utc
class HeartBeatEmulator(object):
def __init__(self):
self.BPM = 85
def increaseHeartBeat(self):
increaseInit = 0.08
increaseLater = 0.01
if self.BPM <120:
self.BPM = self.BPM + self.BPM*increaseInit
else:
self.BPM = self.BPM + self.BPM*increaseLater
def decreaseHeartBeat(self):
decreaseInit = 0.08
decreaseLater = 0.01
if self.BPM > 95:
self.BPM = self.BPM - self.BPM*decreaseInit
else:
self.BPM = self.BPM - self.BPM*decreaseLater
def park(self):
self.BPM = 85
def sendValue(self):
data = {}
data['type'] = 'heart_beat'
data['timestamp'] = datetime.now(utc).strftime("%b %d %Y %H:%M:%S %Z")
data['value'] = round(self.BPM)
result = json.dumps(data)
return result
| true |
1d3ce5372b1ad609f562fde8b69a1beb2db058a2 | Python | AdrianTVB/AreYouBeingServed | /scrape/scripts/url_to_text.py | UTF-8 | 2,990 | 2.640625 | 3 | [] | no_license | # Initial script sourced from https://stackoverflow.com/questions/328356/extracting-text-from-html-file-using-python
#import urllib
import requests
from bs4 import BeautifulSoup
#from cStringIO import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from urllib.request import urlopen
#from StringIO import StringIO
from io import StringIO, BytesIO
#import os
import sys, getopt
## TODO Turn into a function so given a url and filename and directory it will parse the url and put into the file.
# Url from browser is to the navigation frame, drop the _WEB to get to the contents
#url = "http://hastings.infocouncil.biz/Open/2019/08/COR_22082019_MIN_4613_WEB.htm"
#url = "http://hastings.infocouncil.biz/Open/2019/08/COR_22082019_MIN_4613.htm"
# save as text file
#fileDate = "20190822"
#meetingType = "Council"
#organisation = "hdc"
#outputDir = "data/txt/"
#outputFile = fileDate + "_" + organisation + "_" + meetingType + ".txt"
# Old code from original post (uses urllib)
#html = urllib.request.urlopen(url).read()
#soup = BeautifulSoup(html)
def html_to_txt(url):
page_response = requests.get(url, timeout=5)
soup = BeautifulSoup(page_response.content, "html.parser")
# For downloaded html
#html = open("ncc1.html").read()
#page_content = BeautifulSoup(html)
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
#print(text)
return text
#html_to_txt(url=url, outputDir=outputDir, outputFile=outputFile)
#converts pdf, returns its text content as a string
# modified from https://stanford.edu/~mgorkove/cgi-bin/rpython_tutorials/Using%20Python%20to%20Convert%20PDFs%20to%20Text%20Files.php#4
# using https://stackoverflow.com/questions/9751197/opening-pdf-urls-with-pypdf
def pdf_to_txt(url, pages=None):
if not pages:
pagenums = set()
else:
pagenums = set(pages)
output = StringIO()
infile = StringIO()
manager = PDFResourceManager()
converter = TextConverter(manager, output, laparams=LAParams())
interpreter = PDFPageInterpreter(manager, converter)
#infile = file(fname, 'rb')
#remote_file = urlopen(Request(url)).read()
remote_file = urlopen(url).read()
infile = BytesIO(remote_file)
for page in PDFPage.get_pages(infile, pagenums):
interpreter.process_page(page)
infile.close()
converter.close()
text = output.getvalue()
output.close
return text
| true |
da415245283472a5db1320c646d370ec0623d865 | Python | ScribesZone/ModelScript | /modelscript/megamodels/dependencies/__init__.py | UTF-8 | 702 | 2.515625 | 3 | [
"MIT"
] | permissive | # coding=utf-8
"""Dependencies.
This module provides only one abstract class "Dependency",
the root of all concrete dependencies defined in separated modules.
"""
from abc import ABCMeta, abstractmethod
from modelscript.base.exceptions import (
MethodToBeDefined)
MegamodelElement = 'MegamodelElement'
class Dependency(object, metaclass=ABCMeta):
@property
@abstractmethod
def source(self) -> MegamodelElement:
raise MethodToBeDefined( # raise:OK
'property .source is not implemented')
@property
@abstractmethod
def target(self) -> MegamodelElement:
raise MethodToBeDefined( # raise:OK
'property .target is not implemented')
| true |
a5ea5fd94e30654d7002003f1701f8ba4d35d495 | Python | ngynmt/venmo-autopay | /pay_my_bills.py | UTF-8 | 3,207 | 2.703125 | 3 | [] | no_license | from splinter import Browser
with Browser() as browser:
# visit url
url = "https://www.venmo.com"
browser.visit(url)
browser.cookies.delete()
# credentials
my_name = "John Smith" # your name on venmo
my_number = "5555555555" # phone number used to log in
my_password = "YOUR_PASSWORD" # password
# bills
bills = [{
"payment_type": "CHARGE", # or "PAY"
"venmo_username": "venmo-user-007",
"venmo_name": "Venmo Name",
"amount": "0.01",
"details": "this is a test"
},
{
"payment_type": "PAY",
"venmo_username": "friendo-numero-uno",
"venmo_name": "Friend One",
"amount": "0.01",
"details": "this is another test"
}];
# sign in flow
if browser.is_text_not_present(my_name):
print("Not logged in, performing sign in flow ...")
sign_in_link = browser.find_link_by_text("Sign In").first
sign_in_link.click()
browser.find_by_name("phoneEmailUsername").fill(my_number)
browser.find_by_name("password").fill(my_password + "/n")
buttons = browser.find_by_tag("button")
sign_in_button = buttons.first.find_by_text("Sign In")
sign_in_button.click()
# DO LATER: handle 2FA
# if MFA_TRIGGERED:
# verification_code = raw_input("Please enter verification code")
# browser.find_by_name("verify").fill(verification_code)
# buttons = browser.find_by_tag("button")
# verify_button = buttons.first.find_by_text("Enter")
# verify_button.click()
# create a charge for each bill
for bill in bills:
# select pay or charge
if browser.is_element_present_by_id('onebox_pay_toggle', 20):
if bill['payment_type'] == "PAY":
print("Creating a payment to: " + bill['venmo_username'] + " (" + bill['venmo_name'] + ")")
payment_type_button = browser.find_by_id("onebox_pay_toggle").first
elif bill['payment_type'] == "CHARGE":
print("Creating a charge to: " + bill['venmo_username'] + " (" + bill['venmo_name'] + ")")
payment_type_button = browser.find_by_id("onebox_charge_toggle").first
payment_type_button.click()
# select user
venmo_user_search = browser.find_by_id("onebox_typeahead").first
venmo_user_search.fill(bill['venmo_username'] + "\n");
user_dropdown = browser.find_by_css(".ac-renderer").first
select_user = user_dropdown.find_by_text(bill['venmo_username']).first
select_user.click()
# fill out amount and details
browser.find_by_id("onebox_details").fill(bill['amount'] + " " + bill['details'])
print("$" + bill['amount'] + " - " + bill['details'])
send_button = browser.find_by_id("onebox_send_button")
send_button.click()
# set text to check based on payment type
if bill['payment_type'] == "PAY":
text_to_check = "Paid $%s and notified %s of your payment."%(bill['amount'], bill['venmo_name'])
elif bill['payment_type'] == "CHARGE":
text_to_check = "Requested $%s from %s."%(bill['amount'], bill['venmo_name'])
# if success message not present, print error and exit
if not browser.is_text_present(text_to_check, 15):
api_response = browser.find_by_id("js_messaging_message").first.text
print(api_response)
print("Something went wrong, please check Venmo account.")
quit()
else:
print("Success!")
# refresh
browser.visit(url)
print("fin") | true |
a68fcbc0c07f668327aa010d4fa593b4fb801a6d | Python | andiselvam77/example | /Tkinter.py | UTF-8 | 213 | 2.671875 | 3 | [] | no_license | from tkinter import *
root=Tk()
root.geometry('100x100')
def b1():
s=e1.get()
d=eval(s)
label1=Label(root,text=d).pack()
e1=Entry(root)
e1.pack()
but1=Button(root,text='click',command=b1).pack()
mainloop()
| true |
e9e4d7b43ad34d2f95e2d342edc7f068f4f63220 | Python | ctaylor4874/lat_lng_queue_loader | /app/__init__.py | UTF-8 | 1,719 | 2.59375 | 3 | [
"MIT"
] | permissive | import os
import logging
from json import dumps
from flask import Flask, render_template, request, redirect, url_for, flash
import sqs
GOOGLE_API_KEY = os.getenv('GOOGLE_API_KEY')
SECRET = os.getenv('QUEUE_LOADER_SECRET').encode()
app = Flask(__name__)
app.config['SECRET_KEY'] = bytes(SECRET)
def send_message(queue, data):
queue.send_message(MessageBody=data)
def get_data(result):
start_lat = float(result.get('start_lat'))
start_lng = float(result.get('start_lng'))
end_lat = float(result.get('end_lat'))
end_lng = float(result.get('end_lng'))
data = {
'start_lat': start_lat,
'start_lng': start_lng,
'end_lat': end_lat,
'end_lng': end_lng
}
return data
@app.route('/')
def index():
return render_template("map.html", GOOGLE_API_KEY=GOOGLE_API_KEY)
@app.route('/handle_data', methods=['POST'])
def handle_data():
lat_lng_queue = sqs.get_queue('lat_lng_queue')
if request.method == 'POST':
result = request.form
data = get_data(result)
if data.get('start_lat') >= data.get('end_lat'):
flash("Starting latitude must me less than ending latitude.", category='error')
elif data.get('start_lng') >= data.get('end_lng'):
flash("Starting longitude must me less than ending longitude.", category='error')
else:
send_message(lat_lng_queue, dumps(data))
return redirect(url_for('index'))
return render_template('error.html')
if __name__ == '__main__':
logging.basicConfig(level=20, format='%(asctime)s:{}'.format(logging.BASIC_FORMAT))
try:
app.run(debug=False)
except Exception as e:
logging.exception(e)
raise
| true |
ed0cf769823d3f068a1757d7b6a31934950b0ae2 | Python | JC-Quil/Seizure_prediction_from_iEEG | /converter.py | UTF-8 | 14,550 | 2.78125 | 3 | [] | no_license | ### This script convert the sample signals into subsamples and calculate the features ###
# Import python libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Get specific functions from some other python libraries
from math import floor, log
from scipy.stats import skew, kurtosis
from scipy.io import loadmat # For loading MATLAB data (.dat) files
from scipy import signal
from pywt import wavedec
from time import time
# This code was adapted from a post from Deep to the Kaggle competition forum here:
# https://www.kaggle.com/deepcnn/melbourne-university-seizure-prediction/feature-extractor-matlab2python-translated/comments
# Convert .mat files into dictionaries
def convertMatToDictionary(path):
mat = loadmat(path)
names = mat['dataStruct'].dtype.names
ndata = {n: mat['dataStruct'][n][0, 0] for n in names}
return ndata
# Define the frequence bands
def defineEEGFreqs():
'''
EEG waveforms are divided into frequency groups related to mental activity.
alpha waves = 8-14 Hz = Awake with eyes closed
beta waves = 15-30 Hz = Awake and thinking, interacting, doing calculations, etc.
gamma waves = 30-45 Hz = Might be related to consciousness and/or perception
theta waves = 4-7 Hz = Light sleep
delta waves < 4 Hz = Deep sleep
'''
return (np.array([0.2, 4, 8, 15, 30, 45, 70, 180])) # Frequency levels in Hz
# Calculates the FFT of the epoch signal. Removes the DC component and normalizes the area to 1
def calcNormalizedFFT(epoch, lvl, subsampLen, fs):
lseg = np.round(subsampLen/fs*lvl).astype('int')
#D = np.absolute(np.fft.fft(epoch, n = (lseg[-1]), axis=0))
D = np.fft.fft(epoch, n = (lseg[-1]), axis=0)
#print "D", D.shape
D[0,:]=0 # set the DC component to zero
D /= D.sum() # Normalize each channel
#print "fft", D.shape
return D
# Calculate the power spectral density from the FFT
def calcPSD(fft_epoch):
psd = np.power(np.abs(fft_epoch), 2)
#print "PSD", psd.shape
return psd
# Compute the relative PSD for each frequency band
def calcPSD_band(psd_epoch, lvl, subsampLen, nc, fs):
e_tot = sum(psd_epoch)
lseg = np.round(subsampLen/fs*lvl).astype('int')
psd_band = np.zeros((len(lvl)-1,nc))
for j in range(0, len(lvl)-1, 1):
psd_band[j,:] = np.sum(psd_epoch[lseg[j]:lseg[j+1],:], axis=0)/e_tot
#print psd_band
return psd_band
# Computes Shannon Entropy of segments corresponding to frequency bands
def calcShannonEntropy(psd_epoch, lvl, subsampLen, nc, fs):
e_tot = sum(psd_epoch)
rpsd_epoch = psd_epoch/e_tot
lseg = np.round(subsampLen/fs*lvl).astype('int')
sentropy = np.zeros((len(lvl)-1,nc))
for j in range(0, len(lvl)-1, 1):
sentropy[j,:] = -1*np.sum(np.multiply(rpsd_epoch[lseg[j]:lseg[j+1],:],np.log(rpsd_epoch[lseg[j]:lseg[j+1],:])), axis=0)
return sentropy
# Compute spectral edge frequency
def calcSpectralEdgeFreq(psd_epoch, lvl, subsampLen, nc, fs):
sfreq = fs
tfreq = 50
ppow = 0.5
topfreq = int(round(subsampLen/sfreq*tfreq))+1
D = psd_epoch
A = np.cumsum(D[:topfreq,:], axis = 0)
B = A - (A.max(axis = 0)*ppow)
C = np.argmin(np.abs(B), axis = 0)
spedge = np.zeros(16)
for i in range(16):
spedge[i] = C[i]*sfreq/subsampLen
return spedge
# Calculate cross-correlation matrix
def corr(data, type_corr):
C = np.array(data.corr(type_corr))
C[np.isnan(C)] = 0 # Replace any NaN with 0
C[np.isinf(C)] = 0 # Replace any Infinite values with 0
w,v = np.linalg.eig(C)
x = np.sort(w)
x = np.real(x)
return x
# Compute correlation matrix across channels
def calcCorrelationMatrixChan(epoch):
# Calculate correlation matrix and its eigenvalues (b/w channels)
data = pd.DataFrame(data=epoch)
type_corr = 'spearman'
lxchannels = corr(data, type_corr)
return lxchannels
# Compute correlation matrix across frequencies for each frequency bands
def calcCorrelationMatrixFreq(fft_epoch, lvl, subsampLen, nc, fs):
# Calculate correlation matrix and its eigenvalues (b/w freq)
dspect = calcPSD(fft_epoch)
#data = pd.DataFrame(data=dspect[:round(lvl[-2]*subsampLen/fs),:])
data = dspect[:int(round(lvl[-1]*subsampLen/fs)),:]
lseg = np.round(subsampLen/fs*lvl).astype('int')
lxfreqbands = np.zeros((len(lvl)-1,nc))
type_corr = 'spearman'
for j in range(0, len(lvl)-1, 1):
lxfreqbands[j,:] = corr(pd.DataFrame(data[lseg[j]:lseg[j+1],:]), type_corr)
return lxfreqbands
# Calculate Hjorth activity over epoch
def calcActivity(epoch):
activity = np.var(epoch, axis=0)
return activity
# Calculate the Hjorth mobility parameter over epoch
def calcMobility(epoch):
# N.B. the sqrt of the variance is the standard deviation. So we just get std(dy/dt) / std(y)
mobility = np.divide(
np.std(np.diff(epoch, axis=0)),
np.std(epoch, axis=0))
return mobility
# Calculate Hjorth complexity over epoch
def calcComplexity(epoch):
complexity = np.divide(
calcMobility(np.diff(epoch, axis=0)),
calcMobility(epoch))
return complexity
# /feature removed from the feature set
def hjorthFD(X, Kmax=3):
""" Compute Hjorth Fractal Dimension of a time series X, kmax
is an HFD parameter. Kmax is basically the scale size or time offset.
So you are going to create Kmax versions of your time series.
The K-th series is every K-th time of the original series.
This code was taken from pyEEG, 0.02 r1: http://pyeeg.sourceforge.net/
"""
L = []
x = []
N = len(X)
for k in range(1,Kmax):
Lk = []
for m in range(k):
Lmk = 0
for i in range(1,int(floor((N-m)/k))):
Lmk += np.abs(X[m+i*k] - X[m+i*k-k])
Lmk = Lmk*(N - 1)/floor((N - m) / k) / k
Lk.append(Lmk)
L.append(np.log(np.mean(Lk))) # Using the mean value in this window to compare similarity to other windows
x.append([np.log(float(1) / k), 1])
(p, r1, r2, s)= np.linalg.lstsq(x, L) # Numpy least squares solution
return p[0]
# /feature removed from the feature set
def petrosianFD(X, D=None):
"""Compute Petrosian Fractal Dimension of a time series from either two
cases below:
1. X, the time series of type list (default)
2. D, the first order differential sequence of X (if D is provided,
recommended to speed up)
In case 1, D is computed by first_order_diff(X) function of pyeeg
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
This code was taken from pyEEG, 0.02 r1: http://pyeeg.sourceforge.net/
"""
if D is None:
D = np.diff(X) # Difference between one data point and the next
N_delta= 0; #number of sign changes in derivative of the signal
for i in range(1,len(D)):
if D[i]*D[i-1]<0:
N_delta += 1
n = len(X)
# This code is a little more compact. It gives the same
# result, but I found that it was actually SLOWER than the for loop
#N_delta = sum(np.diff(D > 0))
return np.log10(n)/(np.log10(n)+np.log10(n/n+0.4*N_delta))
# Calculate Katz fractal dimension
def katzFD(epoch):
L = np.abs(epoch - epoch[0]).max()
d = len(epoch)
return (np.log(L)/np.log(d))
# Calculate Higuchi fractal dimension
def higuchiFD(epoch, Kmax = 8):
'''
Ported from https://www.mathworks.com/matlabcentral/fileexchange/30119-complete-higuchi-fractal-dimension-algorithm/content/hfd.m
by Salai Selvam V
'''
N = len(epoch)
Lmk = np.zeros((Kmax,Kmax))
for k in range(1, Kmax+1):
for m in range(1, k+1):
Lmki = 0
maxI = floor((N-m)/k)
for i in range(1,int(maxI+1)):
Lmki = Lmki + np.abs(epoch[m+i*k-1]-epoch[m+(i-1)*k-1])
normFactor = (N-1)/(maxI*k)
Lmk[m-1,k-1] = normFactor * Lmki
Lk = np.zeros((Kmax, 1))
for k in range(1, Kmax+1):
Lk[k-1,0] = np.sum(Lmk[range(k),k-1])/k/k
lnLk = np.log(Lk)
lnk = np.log(np.divide(1., range(1, Kmax+1)))
fit = np.polyfit(lnk,lnLk,1) # Fit a line to the curve
return fit[0] # Grab the slope. It is the Higuchi FD
# Calculate Petrosian fractal dimension /feature removed
def calcPetrosianFD(epoch):
[nt, no_channels] = epoch.shape
fd = []
for j in range(no_channels):
fd.append(petrosianFD(epoch[:,j])) # Petrosan fractal dimension
return fd
# Calculate Hjorth fractal dimension /feature removed
def calcHjorthFD(epoch):
[nt, no_channels] = epoch.shape
fd = []
for j in range(no_channels):
fd.append(hjorthFD(epoch[:,j],3)) # Hjorth exponent
return fd
# Calculate Higuchi fractal dimension
def calcHiguchiFD(epoch):
[nt, no_channels] = epoch.shape
fd = []
for j in range(no_channels):
fd.append(higuchiFD(epoch[:,j])) # Higuchi fractal dimension
return fd
# Calculate Katz fractal dimension
def calcKatzFD(epoch):
[nt, no_channels] = epoch.shape
fd = []
for j in range(no_channels):
fd.append(katzFD(epoch[:,j])) # Katz fractal dimension
return fd
# Calculate skewness of the signal
def calcSkewness(epoch):
sk = skew(epoch)
return sk
# Calculate kurtosis of the signal
def calcKurtosis(epoch):
kurt = kurtosis(epoch)
return kurt
# Compute the relative PSD for each frequency band from welch method
def calcPSDWavelet(epoch, nc):
psd_wav = np.zeros((7,nc))
for i in range(16):
C_7, C_6, C_5, C_4, C_3, C_2, C_1 = wavedec(epoch[:, i], wavelet = 'db6', level = 6)
C = [C_7, C_6, C_5, C_4, C_3, C_2, C_1]
for j in range(7):
D = C[j]
E = np.power(D, 2)
psd_wav[j,i] = sum(E)
psd_wav[:,i] /= psd_wav[:,i].sum()
return psd_wav
# Calculate mean of the signal
def calcMean(epoch, subsampLen):
mean_value = np.sum(epoch, axis = 0)/subsampLen
return mean_value
# Calculate standard deviation of the signal
def calcStdDev(epoch):
std_dev = np.std(epoch, axis = 0)
return std_dev
# Control wether an epoch contain more than 80% of non-zero signal
def check_epoch_validity(epoch, nt, nc):
valid = True
mat = epoch
mat = mat[np.all(mat[:,:] != 0, axis = 1),:]
#print "mat", mat.shape
if mat.shape[0] < 10000:
valid = False
return valid
# Calculate all the features
def calculate_features(file_name):
f = convertMatToDictionary(file_name)
fs = f['iEEGsamplingRate'][0,0]
eegData = f['data']
[nt, nc] = eegData.shape
print('EEG shape = ({} timepoints, {} channels)'.format(nt, nc))
lvl = defineEEGFreqs()
subsampLen = int(floor(fs * 30)) # Grabbing 30-second epochs from within the time series
numSamps = int(floor(nt / subsampLen)); # Num of 30-sec samples
sampIdx = range(0,(numSamps+1)*subsampLen,subsampLen)
# Define a feature dictionary with the associated function for calculation
functions = { 'shannon entropy': 'calcShannonEntropy(psd_epoch, lvl, subsampLen, nc, fs)'
, 'power spectral density': 'calcPSD_band(psd_epoch, lvl, subsampLen, nc, fs)'
, 'spectral edge frequency': 'calcSpectralEdgeFreq(psd_epoch, lvl, subsampLen, nc, fs)'
, 'correlation matrix (channel)' : 'calcCorrelationMatrixChan(epoch)'
, 'correlation matrix (frequency)' : 'calcCorrelationMatrixFreq(epoch, lvl, subsampLen, nc, fs)'
, 'hjorth activity' : 'calcActivity(epoch)'
, 'hjorth mobility' : 'calcMobility(epoch)'
, 'hjorth complexity' : 'calcComplexity(epoch)'
, 'skewness' : 'calcSkewness(epoch)'
, 'kurtosis' : 'calcKurtosis(epoch)'
, 'Katz FD' : 'calcKatzFD(epoch)'
, 'Higuchi FD' : 'calcHiguchiFD(epoch)'
, 'PSD wavelet' : 'calcPSDWavelet(epoch, nc)'
, 'mean' : 'calcMean(epoch, subsampLen)'
, 'std dev' : 'calcStdDev(epoch)'
}
# Initialize a dictionary of pandas dataframes with the features as keys
feat = {key[0]: pd.DataFrame() for key in functions.items()}
for i in range(0,7,1):
for key in ['shannon entropy', 'power spectral density', 'PSD wavelet', 'correlation matrix (frequency)']:
sub_key = key + str(i)
feat[sub_key] = pd.DataFrame()
#print "feat", feat [debug]
valid = True # initiate the variable recording the validity of an epoch
for i in range(1, numSamps+1):
#print('processing file {} epoch {}'.format(file_name,i)) [debug]
epoch = eegData[sampIdx[i-1]:sampIdx[i], :]
valid = check_epoch_validity(epoch, nt, nc) # Verify the extent of drop-off within the epoch
if valid:
fft_epoch = calcNormalizedFFT(epoch, lvl, subsampLen, fs)
psd_epoch = calcPSD (fft_epoch)
for key in functions.items():
if key[0] in ['shannon entropy', 'power spectral density', 'PSD wavelet', 'correlation matrix (frequency)']:
temp_df = pd.DataFrame(eval(key[1])).T
for i in range(0,7,1):
sub_key = key[0] + str(i)
feat[sub_key] = feat[sub_key].append(temp_df[:][i])
else:
feat[key[0]] = feat[key[0]].append(pd.DataFrame(eval(key[1])).T)
else:
numSamps = int(numSamps - 1) # Adjust the length of the feature table in case of drop-off
#print "feat", feat [debug]
# Attribute the correct epoch number to the rows
for key in feat.keys():
feat[key]['Epoch #'] = range(numSamps)
feat[key] = feat[key].set_index('Epoch #')
return feat
| true |
699639a9a26044e995fd1f75b465d4773b4413f7 | Python | Aasthaengg/IBMdataset | /Python_codes/p02401/s212531329.py | UTF-8 | 138 | 2.75 | 3 | [] | no_license | while True:
a, op, b = input().split()
if op == '?':
break
if op == '/':
op = '//'
print(eval(a + op + b)) | true |
dfa99d339fbee70bd93d40b19028090439d91870 | Python | klehman-rally/pyral | /rallyfire.py | UTF-8 | 2,921 | 2.65625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
#################################################################################################
#
# rallyfire - Exemplar script to test the basic connectivity to a Rally server
# and obtain basic workspace and project information
#
#################################################################################################
import sys
from pyral import Rally, rallyWorkset
#################################################################################################
errout = sys.stderr.write
#################################################################################################
def main(args):
options = [opt for opt in args if opt.startswith('--')]
args = [arg for arg in args if arg not in options]
server, user, password, apikey, workspace, project = rallyWorkset(options)
#print(" ".join(["|%s|" % item for item in [server, user, password, apikey[:8], workspace, project]]))
# If you want to use BasicAuth, use the following form
#rally = Rally(server, user, password, workspace=workspace, project=project)
# If you want to use API Key, you can use the following form
#rally = Rally(server, apikey=apikey, workspace=workspace, project=project)
# the following form of obtaining a Rally instance will use the apikey if it is present (non None)
# otherwise it will use the user and password for BasicAuth
# add in the debug=True keyword arg pair if you want more verbiage ...
rally = Rally(server, user, password, apikey=apikey,
workspace=workspace, project=project,
debug=True, isolated_workspace=True)
specified_workspace = workspace
workspace = rally.getWorkspace()
print("Workspace: %s " % workspace.Name)
if specified_workspace != workspace.Name:
print(" ** The workspace you specified: %s is not a valid workspace name for your account, using your default workspace instead" % specified_workspace)
#print "Workspace: %12.12s %-18.18s (%s)" % (workspace.oid, workspace.Name, workspace.ref)
project = rally.getProject()
print("Project : %s " % project.Name)
# uncomment this to see all of your accessible workspaces and projects
# workspaces = rally.getWorkspaces()
# for workspace in workspaces:
# print(" ", workspace.Name)
# projects = rally.getProjects(workspace=workspace.Name)
# if projects:
# print("")
# print(" Projects:")
# for project in projects:
# print(" ", project.Name)
# else:
# print(" No projects")
# print("")
sys.exit(0)
#################################################################################################
#################################################################################################
if __name__ == '__main__':
main(sys.argv[1:])
| true |
7884b49b0e20d3b6c64402cbf9f4951aae1bb54d | Python | robledito/Proyect-Euler | /untitled.py | UTF-8 | 115 | 3.046875 | 3 | [] | no_license | def D(n):
if n==0:
return 400000*1.05
else:
return ((D(n-1)*1.05 )) - 25000
print(D(37)) | true |
3279a9249325abe58e03cca6933bae3afce219d1 | Python | chui101/covid-vis | /flask/states.py | UTF-8 | 2,672 | 3.546875 | 4 | [] | no_license | import json
class state_info:
def __init__(self, datafile = "data/states.json"):
with open(datafile,'r') as fp:
self.data = json.load(fp)
def get_states(self):
"""Gets a list of two letter state codes present in states.json"""
return self.data.keys()
def get_name(self, state):
"""Get the full name of the state from the two letter code"""
return self.data[state]['name']
def get_area(self, state):
"""Get the area of a state"""
return self.data[state]['area']
def get_population(self, state):
"""Get the population of a state (2019 estimate)"""
return self.data[state]['population']
def get_density(self, state):
"""Get the calculated density of a state"""
return self.get_population(state)/self.get_area(state)
class state_historic_data:
def __init__(self, state):
self.state = state
with open("data/" + state + "_historic.json",'r') as fp:
self.data = json.load(fp)
def get_latest(self):
return self.data[-1]
def get_latest_n(self, n):
return self.data[-n:]
def get_date_range(self, begin, end):
return list(filter(lambda x: begin <= x['date'] <= end, self))
def get_after_n_cases(self, n):
"""Get a set of data points after the state meets or exceeds the threshold number of cases.
Searches backwards until it finds the last point positives meets or exceeds the threshold"""
# find the index where the state meets the threshold
last_index = len(self.data)-1
for i in range(len(self.data)-1,-1,-1):
if self.data[i]['positive'] >= n:
last_index = i
# slice array and return
return self.data[last_index:]
def get_three_day_case_average(self):
"""Get a moving three day average of cases"""
positives = list(map(lambda x: 0 if x['positive'] is None else x['positive'], ))
return three_day_average(positives)
def get_three_day_death_average(self):
"""Get a moving three day average of deaths"""
positives = list(map(lambda x: 0 if x['positive'] is None else x['positive'], ))
return three_day_average(positives)
def three_day_average(array):
averages = []
for i in range(len(array)):
sum = 0
num_in_average = 0
if i > 0:
sum += array[i-1]
num_in_average += 1
sum += array[i]
num_in_average += 1
if i < len(array) - 1:
sum += array[i+1]
num_in_average += 1
averages.append(sum/num_in_average)
return averages | true |
ed7a409aafd9fb97b057744edfbbb1391d31c260 | Python | Aasthaengg/IBMdataset | /Python_codes/p03693/s589044069.py | UTF-8 | 134 | 3.09375 | 3 | [] | no_license | r,g,b = input().split(" ")
r = int(r)
g = int(g)
b = int(b)
i = 100*r + 10*g + b
x = i % 4
if x==0:
print("YES")
else:
print("NO") | true |
d304898faa9df15b6314fd37e0d7fa5de3cfa65b | Python | XtmRebron20/learning_web_spider | /learning_urllib/urllib_URLError.py | UTF-8 | 673 | 3.28125 | 3 | [] | no_license | # 学习urllib标准库中error模块
# 这是一个打开一个未知页面的脚本
# 用于学习URLError类的reson属性
# 标准库:urllib 模块:error 类:URLError
# 继承自OSError类(error异常模块的基类)
# from urllib import request, error
try:
response = request.urlopen('https://cuiqingcai.com/index.html')
except error.URLError as e:
print(e.reason)
# 标准库:urllib 模块:error 类:HTTPError
# 继承自URLError类
# from urllib import request, error
# try:
# response = request.urlopen('https://cuiqingcai.com/index.html')
# except error.HTTPError as e:
# print(e.reason, e.code, e.headers, sep='\n')
| true |
d46b0979531cc2e952947852058b7ce24d908502 | Python | slash-todo/advent-of-code-2018 | /04/wizzardoo/day4.py | UTF-8 | 3,413 | 3.8125 | 4 | [
"MIT"
] | permissive | class Guard:
def __init__(self, guard_id):
self.id = guard_id
self.activity = [0] * 60 # Represents their cumulative 60 minutes
self.sleep_times = {} # Not in use yet
self.time_asleep = 0
puzzleinput = []
with open("puzzleinput.txt", "r") as file:
for line in file:
puzzleinput += [line.replace("\n", "")]
# Entries arent in order
ordered_puzzleinput = sorted(puzzleinput, key = lambda x: x[1:17])
guards = []
asleep_time = None # Let's assume a guard always goes to sleep before they wake up
asleep_minute = None # The minute they fell asleep.
import arrow
# Alright so python maintainers decided that pre 1900 dates should be dodgy so using arrow. Datetime is a pain in the arse.
for note in ordered_puzzleinput:
time = arrow.get( note[1:17], 'YYYY-MM-DD HH:mm')
minute = note[15:17]
if "#" in note:
# Bit of a faff here, but if there is a #, a new guard log is starting. We either make a new guard, or check if it is in our previously
# created "guards" list.
guard_id = note.split(" ")[-3][1:]
guard = list(filter(lambda x: x.id == guard_id, guards)) # Return a list of guards where the id equals the current guard on duty.
if len(guard) == 0: # new guard
guard = Guard(guard_id) # create guard
else:
guard = guard[0] # Get old guard
guards += [guard]
if "falls asleep" in note:
asleep_time = time
asleep_minute = int(minute)
if "wakes up" in note:
sleep_duration = time - asleep_time
sleep_duration = int(sleep_duration.total_seconds() / 60)
guard.time_asleep += sleep_duration
for n in range(0, sleep_duration-1 ): # for each minute asleep, add 1 to each guards 60 entry list
guard.activity[asleep_minute+n-1] += 1
# What is the meaning of life? Who is the most asleep guard?
# All this and more below
most_asleep_guard = None
most_asleep_time = 0
for guard in guards:
if most_asleep_time < guard.time_asleep:
most_asleep_time = guard.time_asleep
most_asleep_guard = guard
# Now that we have the most asleep guard....
print("Most asleep guard: #", most_asleep_guard.id)
print("time spent sleeping (total): ", most_asleep_guard.time_asleep)
print("Unreadable activity chart: ", most_asleep_guard.activity) # Fuck
most_sleepy_minute = None
time_asleep_on_minute = 0
for n in range(0, len(most_asleep_guard.activity)):
if time_asleep_on_minute < most_asleep_guard.activity[n]:
print(n+1)
most_sleepy_minute = n+1
time_asleep_on_minute = most_asleep_guard.activity[n]
#39 x 2953
print("Part 1 | Minute most asleep: ", most_sleepy_minute, " | Time asleep on that minute : ", time_asleep_on_minute)
print("Answer: ", int(most_asleep_guard.id) * most_sleepy_minute)
# Part two answer...
most_sleepy_minute = None
most_times_asleep = 0
most_sleepy_guard = None
for guard in guards:
for minute in range(0, len(guard.activity)):
if most_times_asleep < guard.activity[minute]:
most_sleepy_guard = guard
most_times_asleep = guard.activity[minute]
most_sleepy_minute = minute+1
print("Part 2. Guard: ", most_sleepy_guard.id, " | most times asleep: ", most_times_asleep, " | Most sleepy minute: " , most_sleepy_minute)
print("Answer : ", int(most_sleepy_guard.id) * most_sleepy_minute) | true |
76b0c13e35a7d7064fb7dba74a776adc45144512 | Python | cdroulers/dr-who-vs-daleks | /DrWhoGame.py | UTF-8 | 28,891 | 2.6875 | 3 | [
"Unlicense"
] | permissive | # -*- encoding: utf-8 -*-
# Programme par Xtian Droulers et Pie-Hoes Leclapi
# Version 0.1 d�but� le 5 septembre 2006
# version 1.0 pr�te le 15 septembre 2006
# version 1.1 pr�te le 19 septembre 2006, voir "Docs/About.txt" pour les d�tails.
from Tkinter import *
import random
import winsound
from time import sleep
#from win32api import ShellExecute
import thread
class Controleur:
# constructeur, cr�ation des variables de la partie
def __init__(self):
self.isZapping = False
self.difficulte = "normal"
self.root = Tk()
self.aireDeJeu = AireDeJeu(self)
self.vue = Vue(self)
self.newGame()
# si on clique sur File -> Exit
def quit(self):
self.root.quit()
# appeller � chaque tour, pour voir si Dr. Who est mort, et effectuer les bonnes actions
def isGameOver(self):
if self.aireDeJeu.doc.isAlive:
return False
else:
return True
# probablement inutile :| # fonctions appell�es par le mod�le pour dessiner le Dr et les daleks
def drawDrWho(self, doc):
pass
def drawDalek(self, dal):
pass
# pour passer de la vue � docWho
def clickedCloseToDrWho(self, x, y):
return self.aireDeJeu.doc.isClose(x, y)
# pour retourner la position du doc � la vue
def getPosDoc(self):
return self.aireDeJeu.doc.posX, self.aireDeJeu.doc.posY
# pour runner jusqu'� la fin de la ronde
def run(self):
while not self.isGameOver() and not self.isRoundOver():
self.afterMove()
# puis, on regarde si la ronde est termin�e i.e. tous les daleks sont morts.
if self.isRoundOver():
self.nextRound()
# runner jusqu'� moins un
def runMoinsUn(self):
# si runMoinsUn retourne True, on peut continuer, si self.isRoundOver est False, on peut continuer
while not self.isGameOver() and self.aireDeJeu.runMoinsUn() and not self.isRoundOver():
self.afterMove()
# puis, on regarde si la ronde est termin�e i.e. tous les daleks sont morts.
if self.isRoundOver():
self.nextRound()
# teleporter Dr. Who
def teleport(self):
if not self.isGameOver():
self.aireDeJeu.doc.teleport()
self.afterMove()
self.vue.playWav("Sounds/Teleportation.wav")
# puis, on regarde si la ronde est termin�e i.e. tous les daleks sont morts.
if self.isRoundOver():
self.nextRound()
# zapper autour de Dr. Who
def zap(self):
if not self.isGameOver() and not self.isZapping and self.aireDeJeu.doc.zap():
self.isZapping = True
self.vue.zap(self.aireDeJeu.doc.posX, self.aireDeJeu.doc.posY)
self.afterMove()
self.isZapping = False
# puis, on regarde si la ronde est termin�e i.e. tous les daleks sont morts.
if self.isRoundOver():
self.nextRound()
# apr�s un mouvement ou un zap ou un teleport, fait les v�rifications de GameOver et f�raille
def afterMove(self):
# on fait bouger tous les daleks
for i in self.aireDeJeu.listeDaleks:
x, y = i.findMove()
i.move(x, y)
# ensuite, on v�rifie s'il y a eu collision et si le doc est mort.
self.aireDeJeu.verifierCollision()
# si le doc est en vie, on continue � jouer
if self.aireDeJeu.verifierVieDrWho():
self.vue.updateBar("Ronde: " + str(self.aireDeJeu.ronde) + " ---- Cr�dits: " + str(self.aireDeJeu.doc.credits) + " --- Zaps: " + str(self.aireDeJeu.doc.nbZaps))
# on update le canevas
self.updateAireDeJeu()
# bouger dr. Who
def moveDrWho(self, x, y):
if not self.isZapping:
posX = self.aireDeJeu.doc.posX
posY = self.aireDeJeu.doc.posY
if self.aireDeJeu.doc.move(x, y):
self.afterMove()
# puis, on regarde si la ronde est termin�e i.e. tous les daleks sont morts.
if self.isRoundOver():
self.nextRound()
else:
self.afficherMessage("Mouvement invalide")
# bouger les dalek (INUTILE!!!!!)
def moveDalek(self):
pass
# faire disparaitre Dr.Who
def killDrWho(self):
self.aireDeJeu.killDrWho()
self.vue.playWav("Sounds/DocMort.wav")
# faire disparaitre un Dalek (INUTILE!!! parce que l'aireDeJeu traite les collisions!)
def killDalek(self, dalek):
self.aireDeJeu.killDalek(dalek)
# faire une nouvelle partie
def newGame(self):
if not self.isZapping:
self.vue.newGame()
if self.difficulte == "easy":
nbDaleks, nbZaps, nbCredits, dim = 3, 3, 3, 30
elif self.difficulte == "normal":
nbDaleks, nbZaps, nbCredits, dim = 5, 1, 5, 30
elif self.difficulte == "hard":
nbDaleks, nbZaps, nbCredits, dim = 10, 1, 10, 30
self.aireDeJeu = AireDeJeu(self, nbDaleks, nbZaps, nbCredits, dim)
self.updateAireDeJeu()
self.vue.updateBar("D�but de la partie! Ronde: " + str(self.aireDeJeu.ronde) + " ---- Cr�dits: " + str(self.aireDeJeu.doc.credits) + " --- Zaps: " + str(self.aireDeJeu.doc.nbZaps))
# si la partie est termin�e
def gameOver(self):
self.killDrWho()
self.vue.gameOver(self.aireDeJeu.doc.credits)
# passer � la ronde suivante
def nextRound(self):
self.aireDeJeu.nextRound()
self.vue.updateBar("Ronde: " + str(self.aireDeJeu.ronde))
self.updateAireDeJeu()
# pour savoir si tous les daleks sont morts
def isRoundOver(self):
return self.aireDeJeu.isRoundOver()
# afficher la nouvelle aire de jeu
def updateAireDeJeu(self):
self.vue.update()
# �crire un message sur la status bar
def afficherMessage(self, texte):
self.vue.updateBar(texte + " --- Ronde: " + str(self.aireDeJeu.ronde) + " ---- Cr�dits: " + str(self.aireDeJeu.doc.credits) + " --- Zaps: " + str(self.aireDeJeu.doc.nbZaps))
# lire les score avec la calsse DB()
def readScore(self):
return DB().readScore()
# lire le texte d'un fichier avec DB()
def getText(self, filePath):
return DB().getText(filePath)
# �crier le score et le nom dans le fichier
def writeNom(self, nom):
DB().writeScore(nom, self.aireDeJeu.doc.credits)
# classe de partie
class AireDeJeu:
def __init__(self, controleur = None, nbDaleksDePlus = 5, nbZapsDePlus = 1, nbCreditsParDalek = 5, dimension = 30, ronde = 1):
self.controleur = controleur
self.dimension = dimension
self.ronde = ronde
self.nbDaleksDePlus = nbDaleksDePlus
self.nbZapsDePlus = nbZapsDePlus
self.nbCreditsParDalek = nbCreditsParDalek
self.doc = DrWho(self, nbZapsDePlus)
self.listeDaleks = self.initialiserDaleks()
# supprimer un dalek de la liste car il a �t� zapp�
def deleteDalek(self, leDalekCon):
for i in range(len(self.listeDaleks)):
if self.listeDaleks[i].numero == leDalekCon.numero:
del self.listeDaleks[i]
break
# si zap tous, fin de la ronde (inutile, on l'appellait 2 fois
#if len(self.listeDaleks) == 0:
# self.controleur.nextRound()
# v�rifier s'il y a eu des collision
def verifierCollision(self):
for i in self.listeDaleks:
if i.isAlive and i.isOverOtherDalek():
self.doc.giveCredits()
self.killDalek(i)
# v�rifier s'il y eu collision avec le Doc Who
def verifierVieDrWho(self):
for i in self.listeDaleks:
if i.isOverDrWho(self.doc.posX, self.doc.posY):
self.deleteDalek(i)
self.controleur.gameOver()
return False
return True
# v�rifier si un dalek est proche pour le run - 1
def runMoinsUn(self):
for i in self.listeDaleks:
if self.doc.isClose(i.posX, i.posY):
return False
return True
# tuer le doc
def killDrWho(self):
self.doc.die()
# rendre un Dalek mort
def killDalek(self, unDalekCon):
for i in range(len(self.listeDaleks)):
if self.listeDaleks[i].numero == unDalekCon.numero:
self.listeDaleks[i].dieNormal()
self.controleur.vue.playWav("Sounds/DalekMort.wav")
# fonction r�initialisant les rondes
def nextRound(self):
self.ronde += 1
self.listeDaleks = self.initialiserDaleks()
self.doc.ajouterZap()
self.doc.generatePos()
# fonction qui cr�e les daleks n�c�ssaire � la ronde
def initialiserDaleks(self):
liste = []
for i in range(self.ronde * self.nbDaleksDePlus):
liste.append(Dalek(self, i))
return liste
# fonction qui check si tous les daleks sont morts
def isRoundOver(self):
for i in self.listeDaleks:
if i.isAlive:
return False
return True
# classe du Dr. Who
class DrWho:
def __init__(self, aireDeJeu, zaps):
self.aireDeJeu = aireDeJeu
self.isAlive = True
self.nbZaps = zaps
self.credits = 0
self.generatePos()
# pour zapper
def zap(self):
if self.nbZaps > 0:
liste = []
for i in self.aireDeJeu.listeDaleks:
if i.isZapped():
liste.append(i)
for i in liste:
i.dieZap()
self.aireDeJeu.controleur.vue.playWav("Sounds/DalekMort.wav")
self.enleverZap()
return True
else:
self.aireDeJeu.controleur.afficherMessage("Vous n'avez plus de zaps.")
# nouvelle ronde = nouvelle position
def generatePos(self):
self.posX = random.randint(0, self.aireDeJeu.dimension - 1)
self.posY = random.randint(0, self.aireDeJeu.dimension - 1)
# voir si quelque chose (Dalek ou click) est proche de docWho (utile pour is Zapped et run -1)
def isClose(self, posX, posY):
if posX+1 == self.posX and posY+1 == self.posY:
return True
elif posX+1 == self.posX and posY == self.posY:
return True
elif posX+1 == self.posX and posY-1 == self.posY:
return True
elif posX == self.posX and posY-1 == self.posY:
return True
elif posX == self.posX and posY+1 == self.posY:
return True
elif posX-1 == self.posX and posY+1 == self.posY:
return True
elif posX-1 == self.posX and posY == self.posY:
return True
elif posX-1 == self.posX and posY-1 == self.posY:
return True
elif posX == self.posX and posY == self.posY:
return True
else:
return False
# quand le doc tue un dalek, il re�oit des cr�dits
def giveCredits(self):
self.credits += self.aireDeJeu.nbCreditsParDalek
# pour voir si le doc move, j'ai fait une autre fonction, car elle faisait trop de chose dans le m�me if
def canMove(self, x, y):
if self.posX + x < 0 or self.posX + x > self.aireDeJeu.dimension -1 or self.posY + y < 0 or self.posY + y > self.aireDeJeu.dimension - 1:
return False
for i in self.aireDeJeu.listeDaleks:
if i.isAlive == False and i.isOverDrWho(self.posX + x, self.posY + y):
return False
return True
# pour bouger
def move(self, x, y):
if self.canMove(x, y):
self.posX += x
self.posY += y
return True
else:
return False
# pour ajouter un zap
def ajouterZap(self):
self.nbZaps += self.aireDeJeu.nbZapsDePlus
# pour enlever un zap
def enleverZap(self):
self.nbZaps -= 1
# quand il meurt!
def die(self):
self.isAlive = False
# teleporter un peu partout
def teleport(self):
self.posX = random.randint(0, self.aireDeJeu.dimension - 1)
self.posY = random.randint(0, self.aireDeJeu.dimension - 1)
# se dessiner sur le canevas par le controleur (inutile aussi)
def draw(self):
self.aireDeJeu.controleur.drawDrWho(self)
class Dalek:
def __init__(self, aireDeJeu, no):
self.isAlive = True
self.aireDeJeu = aireDeJeu
self.numero = no
self.posX = random.randint(0, self.aireDeJeu.dimension - 1)
self.posY = random.randint(0, self.aireDeJeu.dimension - 1)
docX = self.aireDeJeu.doc.posX
docY = self.aireDeJeu.doc.posY
while docX == self.posX and docY == self.posY:
self.posX = random.randint(0, self.aireDeJeu.dimension - 1)
self.posY = random.randint(0, self.aireDeJeu.dimension - 1)
# se dessiner par le controleur (inutile aussi)
def draw(self):
self.aireDeJeu.controleur.drawDalek(self)
# bouger le dalek
def move(self, x, y):
self.posX += x
self.posY += y
# pour trouver un move � faire
def findMove(self):
if self.isAlive:
if self.aireDeJeu.doc.posX - self.posX > 0:
reX = 1
elif self.aireDeJeu.doc.posX - self.posX < 0:
reX = -1
else:
reX = 0
if self.aireDeJeu.doc.posY - self.posY > 0:
reY = 1
elif self.aireDeJeu.doc.posY - self.posY < 0:
reY = -1
else:
reY = 0
return reX, reY
else:
return 0, 0
# mourir d'un zap, donc, supprimer de la liste
def dieZap(self):
self.aireDeJeu.deleteDalek(self)
# mourir d'une collision, laisser un tas de feraille
def dieNormal(self):
self.isAlive = False
# voir s'il se fait zapper en v�rifiant s'il est proche de Dr. Who
def isZapped(self):
return self.aireDeJeu.doc.isClose(self.posX, self.posY)
# voir s'il est par-dessus un autre Dalek, c'est-�-dire qu'il y a collision
def isOverOtherDalek(self):
for i in self.aireDeJeu.listeDaleks:
if i.numero != self.numero:
if self.posX == i.posX and self.posY == i.posY:
return True
return False
# coir s'il est par-dessus le doc Who les param�tres sont la positions du Doc!
def isOverDrWho(self, x, y):
if self.posX == x and self.posY == y:
return True
else:
return False
class Vue:
def __init__(self, controleur):
self.controleur = controleur
self.grandeurGrid = 30
self.isPlayingMusic = False
self.wantsGrid = True
self.imageDrWho = PhotoImage(file="Icones/Doc.gif")
self.imageDeadDrWho = PhotoImage(file="Icones/DocMort.gif")
self.imageDalek = PhotoImage(file="Icones/Dalek.gif")
self.imageTas = PhotoImage(file="Icones/DalekMort.gif")
self.imageYellowZap = PhotoImage(file="Icones/YellowZap.gif")
self.imageBlueZap = PhotoImage(file="Icones/BlueZap.gif")
self.imageGameOver = PhotoImage(file="Icones/GameOver.gif")
self.imageInstructions = PhotoImage(file="Icones/Numpad.gif")
self.creerWidgets()
# cr�er les widgets et la fenetre
def creerWidgets(self):
self.controleur.root.title("Dr. Who vs. Daleks!!!")
self.controleur.root.wm_iconbitmap("Icones/LMAOSoft.ico")
# cr�er le menubar
self.menubar = Menu(self.controleur.root)
# menu fichier
self.filemenu = Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label="Nouvelle partie", command=self.controleur.newGame)
self.filemenu.add_command(label="Meilleurs scores", command=self.afficherScores)
self.filemenu.add_separator()
self.filemenu.add_command(label="Fermer", command=self.controleur.quit)
self.menubar.add_cascade(label="Fichier", menu=self.filemenu)
# menu options
self.optionmenu = Menu(self.menubar, tearoff=0)
self.optionmenu.add_command(label="Toggler la grid", command=self.toggleGrid)
self.menubar.add_cascade(label="Options", menu=self.optionmenu)
#menu difficult�
self.difficultymenu = Menu(self.menubar, tearoff=0)
self.difficultymenu.insert_radiobutton(1, label="Facile", command=self.clickEasy)
self.difficultymenu.insert_radiobutton(2, label="Normal", command=self.clickNormal)
self.difficultymenu.insert_radiobutton(3, label="Difficile", command=self.clickHard)
self.menubar.add_cascade(label="Difficult�", menu=self.difficultymenu)
# menu help
self.helpmenu = Menu(self.menubar, tearoff=0)
self.helpmenu.add_command(label="Instructions", command=self.afficherInstructions)
self.helpmenu.add_separator()
self.helpmenu.add_command(label="� propos", command=self.afficherAPropos)
self.menubar.add_cascade(label="Aide", menu=self.helpmenu)
# ajout du menu � root
self.controleur.root.config(menu=self.menubar)
# la status bar
self.status = Label(self.controleur.root, bd=1, relief=SUNKEN, anchor=W)
self.btnZap = Button(self.controleur.root, text="Zapper", command=self.hitZ)
self.btnTeleport = Button(self.controleur.root, text="T�l�porter", command=self.hitT)
self.btnRun = Button(self.controleur.root, text="Runner", command=self.hitRun)
self.btnRunMoinsUn = Button(self.controleur.root, text="Runner moins un", command=self.hitRunMoinsUn)
# le canevas
self.canevas = Canvas(self.controleur.root, width=self.controleur.aireDeJeu.dimension*self.grandeurGrid, height=self.controleur.aireDeJeu.dimension*self.grandeurGrid, bg="white")
# BINDING DES RACCOURCIS!
self.controleur.root.bind("<F1>", self.hitF1)
self.controleur.root.bind("<F2>", self.hitF2)
self.controleur.root.bind("<F12>", self.hitF12)
# gridage final
self.btnZap.grid(row=0, column=0)
self.btnTeleport.grid(row=0, column=1)
self.btnRun.grid(row=0, column=2)
self.btnRunMoinsUn.grid(row=0, column=3)
self.canevas.grid(columnspan=4)
self.status.grid(columnspan=4, sticky=W+E)
self.status.config(text="Aller dans Fichier -> Nouvelle partie pour d�buter.")
# changer le texte de la statusbar
def updateBar(self, texte):
self.status.config(text=texte)
# changer la grilel qui apparait ou pas
def toggleGrid(self, *args):
if self.wantsGrid:
self.wantsGrid = False
else:
self.wantsGrid = True
self.update()
# dessiner le doc sur le canevas
def drawDrWho(self):
x = self.controleur.aireDeJeu.doc.posX
y = self.controleur.aireDeJeu.doc.posY
if self.controleur.aireDeJeu.doc.isAlive:
photo = self.imageDrWho
else:
photo = self.imageDeadDrWho
self.canevas.create_image(x * self.grandeurGrid + self.grandeurGrid / 2, y * self.grandeurGrid + self.grandeurGrid / 2, image=photo)
# dessiner un dalek
def drawDaleks(self):
for i in self.controleur.aireDeJeu.listeDaleks:
if i.isAlive:
photo = self.imageDalek
else:
photo = self.imageTas
self.canevas.create_image(i.posX * self.grandeurGrid + self.grandeurGrid / 2, i.posY * self.grandeurGrid + self.grandeurGrid / 2, image=photo)
# fonction en callback sur le frame pour F1
def hitF1(self, event):
self.afficherInstructions()
# fonction en callback sur le frame pour F2
def hitF2(self, event):
self.controleur.newGame()
# fonction en callback sur le frame pour F10
def hitF12(self, event):
self.afficherAPropos()
# fonction en callback sur le frame pour les mouvements et les actions
def hitKey(self, event):
key = event.char
if key == "1":
self.controleur.moveDrWho(-1, 1)
elif key == "2":
self.controleur.moveDrWho(0, 1)
elif key == "3":
self.controleur.moveDrWho(1, 1)
elif key == "4":
self.controleur.moveDrWho(-1, 0)
elif key == "5":
self.controleur.moveDrWho(0, 0)
elif key == "6":
self.controleur.moveDrWho(1, 0)
elif key == "7":
self.controleur.moveDrWho(-1, -1)
elif key == "8":
self.controleur.moveDrWho(0, -1)
elif key == "9":
self.controleur.moveDrWho(1, -1)
elif key == "+":
self.controleur.run()
elif key == "-":
self.controleur.runMoinsUn()
# fonction en callback sur le canevas pour les clicks de souris
def click(self, event):
x, y = event.x / self.grandeurGrid, event.y / self.grandeurGrid
if self.controleur.clickedCloseToDrWho(x, y):
docX, docY = self.controleur.getPosDoc()
self.controleur.moveDrWho(x - docX, y - docY)
else:
self.controleur.teleport()
# fonctions pour le settage des difficult�s
def clickEasy(self):
self.controleur.difficulte = "easy"
def clickNormal(self):
self.controleur.difficulte = "normal"
def clickHard(self):
self.controleur.difficulte = "hard"
# fonction en callback sur le root pour le teleport
def hitT(self, *args):
self.controleur.teleport()
# fonction en callback sur le root pour le zap
def hitZ(self, *args):
self.controleur.zap()
# fonction pour le bouton run
def hitRun(self):
self.controleur.run()
# fonction pour le bouton run moins un
def hitRunMoinsUn(self):
self.controleur.runMoinsUn()
# fonction qui flash les lumi�res
def zap(self, x, y):
g = self.grandeurGrid
self.playWav("Sounds/Zap.wav")
for i in range(3):
self.zappityZap(x+1, y+1, g)
self.zappityZap(x+1, y, g)
self.zappityZap(x+1, y-1, g)
self.zappityZap(x-1, y+1, g)
self.zappityZap(x-1, y, g)
self.zappityZap(x-1, y-1, g)
self.zappityZap(x, y+1, g)
self.zappityZap(x, y-1, g)
self.update()
# fonction qui zappe un seul carr�
def zappityZap(self, x, y, g):
#self.canevas.create_rectangle(x * g, y * g, x * g + g, y * g + g, fill="blue")
self.canevas.create_image(x * g + g / 2, y * g + g / 2, image=self.imageBlueZap)
sleep(0.01)
self.canevas.update()
#self.canevas.create_rectangle(x * g, y * g, x * g + g, y * g + g, fill="yellow")
self.canevas.create_image(x * g + g / 2, y * g + g / 2, image=self.imageYellowZap)
sleep(0.01)
self.canevas.update()
# fonction pour afficher la grille sur le canevas
def drawGrid(self):
grandeur = self.controleur.aireDeJeu.dimension
for i in range(self.controleur.aireDeJeu.dimension):
self.canevas.create_line(0, i*self.grandeurGrid, grandeur*self.grandeurGrid, i*self.grandeurGrid)
self.canevas.create_line(i*self.grandeurGrid, 0, i*self.grandeurGrid, grandeur*self.grandeurGrid)
# updater le canevas
def update(self):
self.canevas.delete(ALL)
if self.wantsGrid:
self.drawGrid()
self.drawDrWho()
self.drawDaleks()
self.canevas.update()
# d�but de la partie, faire tous les bind n�cessaire
def newGame(self):
if not self.isPlayingMusic:
self.playMid("Sounds/BackgroundMusic.mid")
#winsound.PlaySound("Sounds/TestMusic.wav", winsound.SND_ASYNC + winsound.SND_LOOP)
self.canevas.config(bg="white")
# pour les touches du clavier num�rique
self.controleur.root.bind("<Key>", self.hitKey)
# pour le teleport et le zap
self.controleur.root.bind("<t>", self.hitT)
self.controleur.root.bind("<z>", self.hitZ)
self.controleur.root.bind("<0>", self.hitT)
self.controleur.root.bind("<.>", self.hitZ)
self.controleur.root.bind("<g>", self.toggleGrid)
# pour la souris
self.canevas.bind("<Button-1>", self.click) #mouvement du doc plus teleport
self.canevas.bind("<Button-3>", self.hitZ) # zapper
# si la partie finit, il faut enlever le bind sur les fl�ches et la souris "TROUVER LE BON UNBIND!
def gameOver(self, credits):
self.canevas.config(bg="yellow")
self.controleur.root.unbind("<k>")
self.controleur.root.unbind("<t>")
self.controleur.root.unbind("<g>")
self.controleur.root.unbind("<Key>")
self.controleur.root.unbind("<0>")
self.controleur.root.unbind("<.>")
self.canevas.unbind("<Button-1>")
self.canevas.unbind("<Button-3>")
self.updateBar("FIN DE LA PARTIE!!!!! Vous avez fait " + str(credits) + " cr�dits!!!")
self.getNom()
self.canevas.config(bg="gray")
self.canevas.create_image(self.controleur.aireDeJeu.dimension * self.grandeurGrid / 2, self.controleur.aireDeJeu.dimension * self.grandeurGrid / 2, image=self.imageGameOver)
self.canevas.update()
# afficher les instructions
def afficherInstructions(self):
texte = self.controleur.getText("Docs/Instructions.txt")
self.top = Toplevel(self.controleur.root)
self.top.title("Instructions du jeu Dr.Who Vs. Daleks...")
self.top.wm_iconbitmap("Icones/About.ico")
self.top.lblImage = Label(self.top, image=self.imageInstructions)
self.top.lblImage.grid()
self.top.lblInstructions = Label(self.top, text=texte)
self.top.lblInstructions.grid()
Button(self.top, text="OK", command=self.top.destroy).grid()
# afficher le About
def afficherAPropos(self):
texte = self.controleur.getText("Docs/About.txt")
self.top = Toplevel(self.controleur.root)
self.top.title("� propos du jeu Dr.Who Vs. Daleks...")
self.top.wm_iconbitmap("Icones/LMAOSoft.ico")
self.top.lblInstructions = Label(self.top, text=texte)
self.top.lblInstructions.grid()
Button(self.top, text="OK", command=self.top.destroy).grid()
# afficher les high scores
def afficherScores(self):
listeScore = self.controleur.readScore()
self.top = Toplevel(self.controleur.root)
self.top.title("Les meilleurs scores")
self.top.wm_iconbitmap("Icones/LMAOSoft.ico")
for i in range(len(listeScore)):
Label(self.top, text=str(listeScore[i][0]), width=50).grid(row=i, column=0)
Label(self.top, text=str(listeScore[i][1]), width=50).grid(row=i, column=1)
Button(self.top, text="OK", command=self.top.destroy).grid(columnspan=2, sticky=W+E)
# demander un nom pour les high scores
def getNom(self):
self.top = Toplevel(self.controleur.root)
self.top.title("Veuillez entrer votre nom")
self.top.wm_iconbitmap("Icones/LMAOSoft.ico")
Label(self.top, text="Nom:").grid(row=0, column=0)
self.top.entryNom = Entry(self.top)
self.top.entryNom.grid(row=0, column=1)
self.top.btnOK = Button(self.top, text="OK", command=self.sendNom)
self.top.btnOK.grid(row=1, columnspan=2, sticky=W+E)
# d�truire le top et envoyer sese donn�es au controleur
def sendNom(self):
stringTempo = self.top.entryNom.get()
self.top.destroy()
self.controleur.writeNom(stringTempo)
self.afficherScores()
# jouer un son midi
def playMid(self, filename):
self.isPlayingMusic = True
#ShellExecute(0, "open", filename, None, "", 0)
# jouer un son wav
def playWav(self, filename):
winsound.PlaySound(filename, winsound.SND_ASYNC)
class DB:
def writeScore(self, name, score):
fichier = file("Saves/HighScores.txt", "a")
fichier.write("\n" + str(name) + "," + str(score))
fichier.close()
def readScore(self):
fichier = file("Saves/HighScores.txt")
tempo = fichier.read()
fichier.close()
tempo = tempo.split("\n")
for i in range(len(tempo)):
tempo[i] = tempo[i].split(",")
return tempo
def getText(self, filePath):
fichier = file(filePath)
tempo = fichier.read()
fichier.close()
return tempo
# fonction main du fichier
if __name__ == "__main__":
c = Controleur()
print "LOL LIMEWIRE"
print "PIRATONS LES LANGUES!!!! C'EST COMME DES OCEN MAIS PAS DE O LOL!"
c.root.mainloop()
| true |
1e2661f70cb7b97050204c8b6a0f8b7b9908e41e | Python | cowlove/winglevlr | /plot_on_map.py | UTF-8 | 1,740 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python3
# Plot the files "out.plog" and "wpts.txt" on google maps, output file to "map.html"
# Display with "google-chrome ./map.html"
import gmplot
import re
import sys
filePlog1 = "./out.plog" if len(sys.argv) < 2 else sys.argv[1]
filePlog2 = "./out.plog" if len(sys.argv) < 3 else sys.argv[2]
fileWpts = "./wpts.txt" if len(sys.argv) < 4 else sys.argv[3]
apikey = 'AIzaSyC4_GZpzLJsbb_XgHka26mQQTa-QaO9d3w'
gmap = gmplot.GoogleMapPlotter(47.45804489362055, -122.59075938584017, 13, apikey=apikey)
track1 = []
track2 = []
waypoints = []
lcount = 0
with open(filePlog1) as f:
line = f.readline()
while line:
lcount += 1
if (lcount % 100 == 0):
words = re.split("\s+", line)
if (len(words) > 30):
lat = float(words[30])
lon = float(words[31])
track1.append((lat, lon))
line = f.readline()
with open(filePlog2) as f:
line = f.readline()
while line:
lcount += 1
if (lcount % 100 == 0):
words = re.split("\s+", line)
if (len(words) > 30):
lat = float(words[30])
lon = float(words[31])
track2.append((lat, lon))
line = f.readline()
with open(fileWpts) as f:
line = f.readline()
while line:
m = re.match("\s*([\d.+-]+)\s*,\s*([\d.+-]+)", line)
if (m):
waypoints.append((float(m.group(1)), float(m.group(2))))
line = f.readline()
if (len(waypoints) > 0):
waypoints.append(waypoints[0])
gmap.scatter(*zip(*waypoints), color='yellow')
gmap.plot(*zip(*track1), edge_width=3, color='red')
gmap.plot(*zip(*track2), edge_width=2, color='green')
gmap.draw('map.html')
| true |
2d1e8c1ffeaccbe722350b58e1555629ebaf8c6d | Python | GayanSandaruwan/Concurrent-Matrix-Multiplication | /Submition/execute.py | UTF-8 | 3,334 | 2.609375 | 3 | [] | no_license | import subprocess
import math
import sys
import statistics
seq = []
parallel = []
parallelOpt = []
seqMean = {}
parallelMean = {}
parallelOptMean = {}
seqFile = sys.argv[1]
parallelFile = sys.argv[2]
parallelOptFile = sys.argv[3]
executeLimit = 2000
def numberOfSamples(dataSet):
n = ((100 * 1.96 * statistics.stdev(dataSet)) / (5 * statistics.mean(dataSet))) ** 2
return math.ceil(n)
def rerun(fileName, size, times):
# print times;
temp = []
for i in range(times):
temp.append(int(subprocess.check_output([fileName, str(size)])))
return temp
file = open("output.txt", "a")
for j in range(200, (executeLimit + 1), 200):
print("N = " + str(j))
print("------------------------------------------------------------")
print("Sequential mean calculation started : 10 samples.")
seq = rerun(seqFile, j, 10)
print("std 10 samples:(seq)" + str(statistics.stdev(seq)))
print("mean 10 samples:(seq) " + str(statistics.mean(seq)))
numberSeqSamples = numberOfSamples(seq)
print("Required Sample Size: " + str(numberSeqSamples))
if numberSeqSamples > 10:
print("Running again")
seq = rerun(seqFile, j, numberSeqSamples)
print("mean " + str(numberSeqSamples) + "samples:(seq) " + str(statistics.mean(seq)))
else:
print("Proceeding with sample size 10.")
seqMean = statistics.mean(seq)
print("------------------------------------------------------------")
print("Parallel mean calculation started : 10 samples.")
parallel = rerun(parallelFile, j, 10)
print("std 10 samples:(par)" + str(statistics.stdev(parallel)))
print("mean 10 samples:(par)" + str(statistics.mean(parallel)))
numberParallelSamples = numberOfSamples(parallel)
print("Required Sample Size: " + str(numberParallelSamples))
if numberParallelSamples > 10:
print("Running again")
parallel = rerun(parallelFile, j, numberParallelSamples)
print("mean " + str(numberParallelSamples) + "samples:(parallel) " + str(statistics.mean(parallel)))
else:
print("Proceeding with sample size 10.")
parallelMean = statistics.mean(parallel)
# parallelDataSet[j] = parallelMean
print("-----------------------------------------------------------")
print("Optimized mean calculation started : 10 samples.")
parallelOpt = rerun(parallelOptFile, j, 10)
print("std 10 samples:(opt)" + str(statistics.stdev(parallelOpt)))
print("mean 10 samples:(opt) " + str(statistics.mean(parallelOpt)))
numberParallelOptSamples = numberOfSamples(parallelOpt)
print("Required Sample Size: " + str(numberParallelOptSamples))
if numberParallelOptSamples > 10:
print("Running again")
parallelOpt = rerun(parallelOptFile, j, numberParallelOptSamples)
print("mean " + str(numberParallelOptSamples) + "samples:(opt) " + str(statistics.mean(parallelOpt)))
else:
print("Proceeding with sample size 10.")
parallelOptMean = statistics.mean(parallelOpt)
# parallelOptDataSet[j] = parallelOptMean
print(str(j) + " complete")
print(str(j) + ", " + str(seqMean) + ", " + str(parallelMean) + "," + str(parallelOptMean)+"\n")
file.write(str(j) + ", " + str(seqMean) + ", " + str(parallelMean) + "," + str(parallelOptMean)+"\n")
file.flush()
| true |
3f53d5510b8658ed50204587cb493e79a6778ea3 | Python | xinsec/py | /ts2.py | UTF-8 | 215 | 2.546875 | 3 | [] | no_license | import time
import progressbar
bar = progressbar.ProgressBar(widgets=[
' [', progressbar.Timer(), '] ',
progressbar.Bar(),
' (', progressbar.ETA(), ') ',
])
for i in bar(range(200)):
time.sleep(0.1) | true |
cef12213ed12f9b8f4bacbba09174559e702c317 | Python | rldaugherty/Python-Repo | /01. Syntax/08. Calculations.py | UTF-8 | 787 | 4.3125 | 4 | [] | no_license | # Using numbers in calculations means we have to define them in a very specific way. We can't define them as strings (surrounded by quote marks). They need to be defined as INT() or FLOAT() to be used in mathmatical equations.
# When building equations, remember the "Order of Operations", or PMDAS for short (Parenthisis > Multiplication > Division > Addition > Subtraction).
#Also note that when Python performs division, it will automatically convert all INTs into FLOATS, due to the necessity of possible decimal points.
# Prints "500"
print(573 - 74 + 1)
# Prints "50"
print(25 * 2)
# Prints "2.0"
print(10 / 5)
#IF you try to divide by zero, Python can throw a special error for that. It's called the ZeroDivisionError, since you can't divide by 0.
print(115/0)
| true |