blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
af501afb70832d063e4e403f263fc9528f2c2632 | Python | llOldmenll/python_test | /tkinter_gui_test.py | UTF-8 | 239 | 3.0625 | 3 | [] | no_license | import tkinter
from tkinter import *
window = tkinter.Tk()
labelFrame = LabelFrame(window, text="It's LABEL!!!")
labelFrame.pack(fill=BOTH, expand="yes")
left = Label(labelFrame, text="Inside label!!!!")
left.pack()
window.mainloop()
| true |
48f5cfcf69f3b40c393213edca39a3ce8bac4acc | Python | CottageLabs/metadata-enhancement | /cleanup/cleanup/find_duplicates.py | UTF-8 | 658 | 2.96875 | 3 | [] | no_license | import csv
from csvwrapper import CSVWrapper, normalise_strings, denormalise_strings
import sys
def detect_and_strip_duplicates(values):
norm_values, map = normalise_strings(values)
new_values = []
for value in norm_values:
if value not in new_values:
new_values.append(value)
else:
print values, '. Duplicate was:', value
return denormalise_strings(new_values, map)
def main(argv=None):
if not argv:
argv=sys.argv
IN = argv[1]
c = CSVWrapper(IN)
c.apply_global_cell_function(detect_and_strip_duplicates)
if __name__ == '__main__':
main() | true |
0c8865a5fc76823d384ac8ae26a8eb73c6e86e55 | Python | Createcafe3d/YXE3Dtools | /test/peachyprinter_test/infrastructure_test/simulator_test.py | UTF-8 | 8,295 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | import unittest
import numpy as np
import math
from math import pi
import os
import sys
from mock import MagicMock, patch
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
from peachyprinter.infrastructure.simulator import *
class TestHelp(object):
epsilon = 0.000001
def assertProjectiveCoordinatesEqual(self, a,b):
for i in range(3):
diff = a[0,i]/a[0,3] - b[0,i]/b[0,3]
self.assertTrue(abs(diff)<self.epsilon, 'Was out by : %s' % diff)
class MirrorTest(unittest.TestCase,TestHelp):
def test_reflect_refects_at_origin_when_perpendicular_to_origin(self):
base_point = np.matrix([0.0,0.0,0.0,1.0])
point = np.matrix([0.0,0.0,2.0,1.0])
expected_point = np.matrix([0.0,0.0,-2.0,1.0])
normal = np.matrix([0.0,0.0,1.0,0.0])
axis = np.matrix([0.0,1.0,0.0,0.0])
galvo_pos = MagicMock()
galvo_pos.return_value = 0
mirror = Mirror(base_point, normal, axis, galvo_pos)
result = mirror.reflect(point, 0.0)
self.assertProjectiveCoordinatesEqual(expected_point,result)
def test_reflect_refects(self):
base_point = np.matrix([0.0,0.0,0.0,1.0])
point = np.matrix([1.0,0.0,3.0,1.0])
expected_point = np.matrix([-3.0,0.0,-1.0,1.0])
normal = np.matrix([math.sqrt(2)/2,0.0,math.sqrt(2)/2,0.0])
axis = np.matrix([-1.0,0.0,-1.0,0.0])
galvo_pos = MagicMock()
galvo_pos.return_value = 0
mirror = Mirror(base_point, normal, axis, galvo_pos)
result = mirror.reflect(point, 0.0)
self.assertProjectiveCoordinatesEqual(expected_point,result)
def test_normal_can_be_non_unit(self):
base_point = np.matrix([0.0,0.0,0.0,1.0])
point = np.matrix([1.0,0.0,3.0,1.0])
expected_point = np.matrix([-3.0,0.0,-1.0,1.0])
normal = np.matrix([1.0,0.0,1.0,0.0])
axis = np.matrix([-1.0,0.0,-1.0,0.0])
galvo_pos = MagicMock()
galvo_pos.return_value = 0
mirror = Mirror(base_point, normal, axis, galvo_pos)
result = mirror.reflect(point, 0.0)
self.assertProjectiveCoordinatesEqual(expected_point,result)
def test_reflect_refects_not_at_origin_when_perpendicular_to_the_point(self):
base_point = np.matrix([0.0,0.0,1.0,1.0])
point = np.matrix([0.0,0.0,2.0,1.0])
expected_point = np.matrix([0.0,0.0,-0.0,1.0])
normal = np.matrix([0.0,0.0,1.0,0.0])
axis = np.matrix([1.0,0.0,0.0,0.0])
galvo_pos = MagicMock()
galvo_pos.return_value = 0
mirror = Mirror(base_point, normal, axis, galvo_pos)
result = mirror.reflect(point, 0.0)
self.assertProjectiveCoordinatesEqual(expected_point,result)
def test_reflect_refects_not_at_origin_when_not_perpendicular_to_the_point(self):
base_point = np.matrix([0.0,0.0,1.0,1.0])
point = np.matrix([0.0,10.0,2.0,1.0])
expected_point = np.matrix([0.0,10.0,-0.0,1.0])
normal = np.matrix([0.0,0.0,1.0,0.0])
axis = np.matrix([0.0,1.0,0.0,0.0])
galvo_pos = MagicMock()
galvo_pos.return_value = 0
mirror = Mirror(base_point, normal,axis, galvo_pos)
result = mirror.reflect(point, 0.0)
self.assertProjectiveCoordinatesEqual(expected_point,result)
def test_mirror_calculates_angle_correctly(self):
base_point = np.matrix([0.0,0.0,1.0,1.0])
normal = np.matrix([1.0,0.0,0.0,0.0])
axis = np.matrix([0.0,1.0,0.0,0.0])
point = np.matrix([-1.0,0.0,1.0,1.0])
expected_point = np.matrix([0.0,0.0,2.0,1.0])
galvo_pos = MagicMock()
galvo_pos.return_value = math.pi / 4.0
mirror = Mirror(base_point, normal, axis, galvo_pos)
result = mirror.reflect(point, 1.0)
self.assertProjectiveCoordinatesEqual(expected_point,result)
class GalvoTest(unittest.TestCase):
def test_galvo_pos(self):
galvo = Galvo()
self.assertAlmostEquals(-pi / 8.0 , galvo.pos(-1.0),6)
self.assertAlmostEquals( pi / 8.0 , galvo.pos(1.0) ,6)
self.assertAlmostEquals( 0.0 , galvo.pos(0.0) ,6)
self.assertAlmostEquals(-pi / 16.0, galvo.pos(-0.5),6)
self.assertAlmostEquals( pi / 16.0, galvo.pos( 0.5),6)
class LaserTest(unittest.TestCase,TestHelp):
def test_get_real_point_with_aimed_down(self):
#Setup
expected_target_point1 = np.matrix([0.0,0.0,1.0,1.0])
expected_target_point2 = np.matrix([0.0,0.0,2.0,1.0])
position = np.matrix([0.0,0.0,10.0,1.0])
point_at = np.matrix([0.0,0.0,1.0,1.0])
laser = Laser(position, point_at)
#Assert
self.assertProjectiveCoordinatesEqual(expected_target_point1, laser.fire(1.0))
self.assertProjectiveCoordinatesEqual(expected_target_point2, laser.fire(2.0))
def test_get_real_point_aimed_somewhere(self):
#Setup
expected_target_point1 = np.matrix([1.0,3.0,1.0,1.0])
position = np.matrix([4.0,6.0,4.0,1.0])
point_at = np.matrix([3.0,5.0,3.0,1.0])
laser = Laser(position, point_at)
#Assert
self.assertProjectiveCoordinatesEqual(expected_target_point1, laser.fire(1.0))
def test_get_real_point_with_moved_laser(self):
#Setup
expected_target_point1 = np.matrix([1.0,3.0,1.0,1.0])
position = np.matrix([4.0,6.0,4.0,1.0])
point_at = np.matrix([3.0,5.0,3.0,1.0])
laser = Laser(position, point_at)
#Assert
self.assertProjectiveCoordinatesEqual(expected_target_point1, laser.fire(1.0))
def test_should_throw_exception_when_laser_axis_is_parallel_to_point_at(self):
#Setup
position = np.matrix([0.0,0.0,1.0,1.0])
point_at = np.matrix([1.0,0.0,1.0,1.0])
laser = Laser(position, point_at)
with self.assertRaises(Exception):
laser.fire(0.0)
class PeachyPrinterTest(unittest.TestCase,TestHelp):
@patch('peachyprinter.infrastructure.simulator.Laser')
def test_write(self, mock_Laser):
#Setup
deflection1 = 1.0
deflection2 = -1.0
z_height = 10
position = np.matrix([0.0,0.0,0.0,1.0])
laser_point_at = np.matrix([1.0,0.0,0.0,1.0])
phantom_laser1_position = np.matrix([1.0,0.0,7.0,1.0])
phantom_laser1_point_at = np.matrix([1.0,1.0,0.0,1.0])
phantom_laser2_position = np.matrix([2.0,0.0,0.0,1.0])
phantom_laser2_point_at = np.matrix([1.0,2.0,0.0,1.0])
laser = MagicMock()
laser.position = position
laser.point_at = laser_point_at
mock_Laser.return_value.position = phantom_laser1_position
mock_Laser.return_value.point_at = phantom_laser1_point_at
mock_Laser.return_value.fire.return_value = [1.0,2.0,3.0,1.0]
mirror1 = MagicMock()
mirror1_values = [phantom_laser1_position, phantom_laser1_point_at ]
def mirror1_side_effect(a,b):
return mirror1_values.pop(0)
mirror1.reflect.side_effect = mirror1_side_effect
mirror2 = MagicMock()
mirror2_values = [phantom_laser2_position, phantom_laser2_point_at ]
def mirror2_side_effect(a,b):
return mirror2_values.pop(0)
mirror2.reflect.side_effect = mirror2_side_effect
pp = PeachyPrinter(mirror1, mirror2, laser)
#Execute
pp.write(deflection1,deflection2, z_height)
#Assert
self.assertEquals((phantom_laser1_position, phantom_laser1_point_at), mock_Laser.mock_calls[0][1])
self.assertEquals((phantom_laser2_position, phantom_laser2_point_at), mock_Laser.mock_calls[1][1])
self.assertEquals((position, deflection1), mirror1.mock_calls[0][1])
self.assertEquals((laser_point_at, deflection1), mirror1.mock_calls[1][1])
self.assertEquals((phantom_laser1_position, deflection2), mirror2.mock_calls[0][1])
self.assertEquals((phantom_laser1_point_at, deflection2), mirror2.mock_calls[1][1])
mock_Laser.return_value.fire.assert_called_once_with(z_height)
if __name__ == '__main__':
unittest.main() | true |
8e5ab727b05e96c153c6429bca97d6ee4befa910 | Python | mandychumt/LondonAirQuality | /London.py | UTF-8 | 17,535 | 3.34375 | 3 | [] | no_license | import requests
import time
import sqlite3
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import pandas as pd
import matplotlib.pyplot as plt # use "pythonw" instead of "python" if there is ImportError
import sys
# a command which is either "local" or "remote" should be entered in command-line for obtaining data locally of remotely
try:
source = sys.argv[1]
except:
print('Please enter "local" or "remote" in command-line')
# grab, store and manipulate data from source 1
print('*** Outputing Data Source 1 ***')
print('''This data source from API is the carbon intensity data between specified datetimes for London.
I grab the carbon inensity forecast and index between May and December in 2018, and store them in several text files.
Then I use these data to obtain monthly average carbon intensity, the forecast times of both high index and very high index.''')
print('\nAPI Endpoint: \nBase URL: https://api.carbonintensity.org.uk/regional/intensity/{from}/{to}/regionid/13 \nParameter {from}: Start datetime in in ISO8601 format YYYY-MM-DDThh:mmZ \nParameter {to}: End datetime in in ISO8601 format YYYY-MM-DDThh:mmZ \ne.g. https://api.carbonintensity.org.uk/regional/intensity/2018-05-01T00:00Z/2018-05-31T23:59Z/regionid/13')
print('\nAPI Documentation: \nhttps://carbon-intensity.github.io/api-definitions/?python#carbon-intensity-api-v2-0-0')
print('\nFile Names: \n{month}forecast.txt or {month}index.txt \nParameter{month}: from 05 to 12')
print('\nSample Result for May 2018:')
# a function to wirte items in a list into a txt file seperated by comma
def write_text(file_name, ls):
with open(file_name,'w') as new_file:
for item in ls:
item = str(item)
new_file.write(item)
new_file.write(',')
# a function to obtain the average value of some forecast values in a list
def get_avg_forecast(ls):
count = 0
total = 0
for forecast in ls:
forecast_float = float(forecast)
count += 1
total += forecast_float
if count != 0: return total/count
else: return None
# a function to obtain the count of very_high or high index in a list
def get_index_count(ls):
veryhigh_count = 0
high_count = 0
for index in ls:
if index == 'very high':
veryhigh_count += 1
if index == 'high':
high_count += 1
return (veryhigh_count, high_count)
# a function to read the local txt file and store data into a list
def get_data_1_locally(file_name):
fh = open(file_name, 'r')
data_ls = []
pre_data_ls = fh.readline().split(',')
for data in pre_data_ls:
if file_name.endswith('forecast.txt'):
try: data_ls.append(int(data))
except: pass
else:
try: data_ls.append(data)
except: pass
return data_ls
# a function to grab data from source 1 remotely from API
def get_data_1_remotely(date):
headers = {
'Accept': 'application/json'
}
url = 'https://api.carbonintensity.org.uk/regional/intensity/2018-' + date[0] + 'T00:00Z/2018-' + date[1] + 'T23:59Z/regionid/13'
r = requests.get(url, params={}, headers = headers)
data = r.json()
count = 0 # the count of forecast values
total = 0 # the total value of every forecast
high_count = 0 # the forecast times of high index
veryhigh_count = 0 # the forecast times of veryhigh index
forecast_ls = []
index_ls = []
# e30m means every 30 minutes, the carbon intensity will be given for every half hour
for e30m in data['data']['data']:
# get the forecast value for every half hour
forecast = e30m['intensity']['forecast']
forecast_ls.append(forecast)
index = e30m['intensity']['index']
index_ls.append(index)
# check if the file for storing forecast data exist, create a new one if not
try: open(file_name_forecast,'r')
except: write_text(file_name_forecast, forecast_ls)
# check if the file for storing index data exist, create a new one if not
try: open(file_name_index,'r')
except: write_text(file_name_index, index_ls)
return (forecast_ls, index_ls)
# the API cannot work too fast so need to sleep for 3 seconds in every loop and more time in every 3 loops
# however it's not unstable please wait for a short period of time and try again if there is an error
if int(date[0][0:2]) == 7 : time.sleep(15)
elif int(date[0][0:2]) == 10 : time.sleep(20)
else: time.sleep(3)
# the carbon intensity will be obtained monthly, from 2018 May to December
date_ls = [('05-01','05-31','may'), ('06-01','06-30','june'), ('07-01','07-31','july'), ('08-01','08-31','august'), ('09-01','09-30','september'), ('10-01','10-31','october'), ('11-01','11-30','november'), ('12-01','12-31','december')]
carbon_intensity_ls = []
for date in date_ls:
file_name_forecast = date[0][0:2] + 'forecast.txt'
file_name_index = date[0][0:2] + 'index.txt'
# if the command is 'remote', get the data from API directly
if source == 'remote':
result = get_data_1_remotely(date)
forecast_ls = result[0]
index_ls = result[1]
# if the command is "local", get the data from local files
elif source == 'local':
try:
open(file_name_forecast,'r')
open(file_name_index,'r')
# if the files do not exist, get the data from API and store it at first
except:
get_data_1_remotely(date)
forecast_ls = get_data_1_locally(file_name_forecast)
index_ls = get_data_1_locally(file_name_index)
# obtain the monthly average forecast, the forecast times of both high index and very high index from functions and store them in a list as a tuple for each month
carbon_intensity_ls.append((date[2],get_avg_forecast(forecast_ls),get_index_count(index_ls)[0],get_index_count(index_ls)[1]))
# print the sample result for May
print('Average:', carbon_intensity_ls[0][1], ' Very_high index:', carbon_intensity_ls[0][2], 'times High index:', carbon_intensity_ls[0][3], 'times')
# grab, store and manipulate data from source 2
print('\n\n\n\n*** Outputing Data Source 2 ***')
print('''This data source from API is life quality scores for London including housing, cost of living, startups, venture capital and other 13 scores.
I grab London's scores, which are related to tourism, including the cost of living, travel connectivity, safety, environmental quality, economy and internet access, and store them into a sqlite file.
Then I use these scores to obtain a ranking from the highest score to the lowest score.''')
print('\nAPI Endpoint: \nhttps://api.teleport.org/api/urban_areas/slug:london/scores/')
print('\nAPI Documentation: \nhttps://developers.teleport.org/api/getting_started/#life_quality_ua')
print('\nFile Name: \nlife_quality_scores.sqlite')
print('\nSample Result for Top 3 scores:')
# a function to get data from source 2 remotely from API
def get_data_2_remotely():
headers = {
'Accept': 'application/vnd.teleport.v1+json'
}
url = 'https://api.teleport.org/api/urban_areas/slug:london/scores/'
r = requests.get(url, params={}, headers = headers)
data = r.json()
score_ls = [] # a list with all scores obtained from API
score_ls_short = [] # a list with scores that are related to tourism
# obtain all scores from API
for score in data['categories']:
score_ls.append((score['score_out_of_10'],score['name']))
# selected scores needed
for i in [1,4,7,10,11,13]:
score_ls_short.append(score_ls[i])
# check if the data is stored locally, store the data into a sqlite file if not
try: open('life_quality_scores.sqlite','r')
except:
conn = sqlite3.connect('life_quality_scores.sqlite')
cur = conn.cursor()
for score in score_ls_short:
cur.execute('''
CREATE TABLE IF NOT EXISTS Life_Quality_Scores
(score REAL, title TEXT UNIQUE)''')
cur.execute('INSERT OR IGNORE INTO Life_Quality_Scores (title,score) VALUES (?, ?)', (score[1],score[0]))
conn.commit()
return score_ls_short
# get the data remotely if command is "remote"
if source == 'remote' :
score_ls_short = get_data_2_remotely()
# get the data locally if command is "local"
elif source == 'local' :
try: open('life_quality_scores.sqlite','r')
# if the local file does not exist, get the data from API and store the data locally before getting local data
except: get_data_2_remotely()
conn = sqlite3.connect('life_quality_scores.sqlite')
cur = conn.cursor()
score_ls_short = cur.execute('SELECT * FROM Life_Quality_Scores').fetchall()
# rank the short score list from the highest to the lowest
score_ls_short.sort(reverse=True)
# print the top 3 as sample result
print(score_ls_short[0][1], score_ls_short[0][0])
print(score_ls_short[1][1], score_ls_short[1][0])
print(score_ls_short[2][1], score_ls_short[2][0])
# grab, store and manipulate data from source 3
print('\n\n\n\n*** Outputing Data Source 3 ***')
print('''This data source from a website is the weather information for London in each month in 2019.
I grab the average, high and low temperatures, average sunshine hours, average rainfall, rainfall days from May to December and store them into a splite file.''')
print('\nURL: \nBase URL: https://www.holiday-weather.com/london/averages/{MONTH}/ \nParameter {MONTH}: from "May" to "December" \ne.g. https://www.holiday-weather.com/london/averages/may/')
print('\nFile Name: \nweather.sqlite' )
print('\nSample Result for May 2019:')
# a function to get data from source 3 remotely from a website
def get_data_3_remotely():
weather_ls = []
for month in ['may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']:
# only grab the data of May 2019 for sample output
url= "https://www.holiday-weather.com/london/averages/" + month + '/'
# Ignore the HTTP Error 403: Forbidden
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(url,headers=hdr)
html = urlopen(req)
soup = BeautifulSoup(html, 'lxml')
# only grab the average, high and low temperatures, average sunshine hours, average rainfall, rainfall days in May for sample output
all_data = soup.find_all('li')
avg_temp = int(all_data[0].find_all('span')[3].find('span').get_text())
high_temp = int(all_data[1].find_all('span')[2].find('span').get_text())
low_temp = int(all_data[2].find_all('span')[2].find('span').get_text())
sun_hour = int(all_data[3].find_all('span')[2].find('span').get_text())
rainfall = int(all_data[4].find_all('span')[2].find('span').get_text())
raindays = int(all_data[5].find_all('span')[2].find('span').get_text())
weather_ls.append((month, avg_temp, high_temp, low_temp, sun_hour, rainfall, raindays))
# check if the data is stored locally, store the data into a SQL file if not
try: open('weather.sqlite','r')
except:
conn = sqlite3.connect('weather.sqlite')
cur = conn.cursor()
for weather in weather_ls:
cur.execute('''
CREATE TABLE IF NOT EXISTS Weather
(month TEXT UNIQUE, average_temperature_°C INTEGER, high_temperature_°C INTEGER, low_temperature_°C INTEGER,
sunshine_hours INTEGER, rainfall_mm INTEGER, rainfall_days INTEGER)''')
cur.execute('INSERT OR IGNORE INTO Weather (month, average_temperature_°C, high_temperature_°C, low_temperature_°C, sunshine_hours, rainfall_mm, rainfall_days) VALUES (?, ?, ?, ?, ?, ?, ?)', (weather[0], weather[1], weather[2], weather[3], weather[4], weather[5], weather[6]))
conn.commit()
return weather_ls
# get the data remotely if command is "remote"
if source == 'remote' :
weather_ls = get_data_3_remotely()
# get the data locally if command is "local"
elif source == 'local' :
try: open('weather.sqlite','r')
# if the local file does not exist, get the data remotely, store the data locally before getting local data
except: get_data_3_remotely()
conn = sqlite3.connect('weather.sqlite')
cur = conn.cursor()
weather_ls = cur.execute('SELECT * FROM Weather').fetchall()
# print the sample result for May
print('Average temperature:', weather_ls[0][1], '°C High temperature:', weather_ls[0][2], '°C Low temperature:', weather_ls[0][3], '°C')
print('Sunshine hours:', weather_ls[0][4], ' Rainfall:', weather_ls[0][5], 'mm Rainfall days:', weather_ls[0][6])
# combine all data from 3 sources in one sqlite file
print('\n\n\n\n*** Integrating 3 Sources ***')
conn = sqlite3.connect('weather.sqlite')
cur = conn.cursor()
# create a new table in weather.sqlite named CarbonIntensity to store culculated result from source 1
cur.execute('''
CREATE TABLE IF NOT EXISTS CarbonIntensity
(month TEXT UNIQUE, average_carbon_intensity REAL, times_of_veryhigh_index INTEGER, times_of_high_index INTEGER)''')
try:
for carbon_intensity in carbon_intensity_ls:
cur.execute('INSERT INTO CarbonIntensity (month, average_carbon_intensity, times_of_veryhigh_index, times_of_high_index) VALUES (?, ?, ?, ?)', (carbon_intensity[0], carbon_intensity[1], carbon_intensity[2], carbon_intensity[3]))
conn.commit()
except: pass
# create a new table in weather.sqlite named ScoreRanking to store culculated result from source 2
cur.execute('''
CREATE TABLE IF NOT EXISTS ScoreRanking
(title TEXT UNIQUE, score REAL)''')
try:
for score in score_ls_short:
cur.execute('INSERT INTO ScoreRanking (title, score) VALUES (?, ?)',(score[1], score[0]))
conn.commit()
except: pass
print('Integrating 3 sources into "weather.sqlite" completed!')
# combine manipulated data from source 1 and 3 into one dataframe
# also create a dataframe for manipulated data from source 2
print('\n\n\n\n*** Building Dataframes ***')
conn = sqlite3.connect('weather.sqlite')
weather_df = pd.read_sql('''SELECT Weather.month, Weather.average_temperature_°C, Weather.high_temperature_°C, Weather.low_temperature_°C,
Weather.sunshine_hours, Weather.rainfall_mm, Weather.rainfall_days,
CarbonIntensity.average_carbon_intensity, CarbonIntensity.times_of_veryhigh_index, CarbonIntensity.times_of_high_index
FROM Weather JOIN CarbonIntensity ON Weather.month=CarbonIntensity.month''', con=conn)
# make the index start from 5 so each index number can represent the corresponding month
weather_df.index = weather_df.index + 5
score_ranking_df = pd.read_sql('SELECT * FROM ScoreRanking', con=conn)
# make the index start from 1 so each index number can represent the ranking of corresponding score
score_ranking_df.index = score_ranking_df.index + 1
print("A Dataframe of London's Predicted Temperature from May to December in 2019 & Corresponding Carbon Intensity in 2018")
print(weather_df)
print("\nA Dataframe of London's Ranked Life Quality Scores of London")
print(score_ranking_df)
# use temperature and carbon intensity data to plot a graph to predict the trend from May to December in 2018
print('\n\n\n\n*** Plotting A Graph ***')
fig = plt.figure(figsize=(10.0,7.0))
fig.suptitle("A Graph of London's Predicted Temperature from May to December in 2019 & Corresponding Carbon Intensity in 2018", fontsize=15)
ax1 = fig.add_subplot(111)
ax1.plot(weather_df[['high_temperature_°C', 'average_temperature_°C', 'low_temperature_°C']])
ax1.set_ylabel('Temperature(°C)')
ax2 = ax1.twinx()
ax2.plot(weather_df[['average_carbon_intensity']], color='red')
ax2.set_ylabel('Carbon Intensity', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.legend(['Predicted High Temperature', 'Predicted Average Temperature', 'Predicted Low Temperature'])
ax2.legend(['Average Carbon Intensity'])
plt.savefig('weather_and_carbon_intensity_graph.png')
plt.show()
print('Plotting a graph completed! \nThe graph is saved as a png file called weather_and_carbon_intensity_graph.png')
# conclusion of results
print('''\n\nConclusion: \nFrom the data frames and graph, we can see that August may have the lowest possibility to have air pollution related to high carbon intensity while December may have the highest possibility.
Besides, in August, the average temperature will be 19°C, the predicted average sunshine hours (6 hours) will be the second most and the number of rainfall days (13 days) will be the least.
Therefore, August will be the best month for people to visit London this year for a comfortable environment and weather.
Although 19°C is comfortable enough but it’s still the highest among the average temperatures of all months. People can choose May which average temperature will be 14°C with more rainfall days (15 days), and had the second least carbon intensity last year if they want a cooler trip with higher humidity.
\nRegarding the life quality in London, it has the best Travel Connectivity. The scores of Safety and Internet Access rank the second and the third respectively. Cost of Living has the lowest score.
It can be seen that London is a nice tourism city and safe. Tourists don't need to worry about the internet but they may need more budget to travel in London.
''') | true |
e46dbdbfabdbb2bfefe5f46c556d49ee1847c9e2 | Python | voussoir/reddit | /_old/EightBall/eightball.py | UTF-8 | 3,346 | 2.65625 | 3 | [
"BSD-3-Clause"
] | permissive | #/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
import random
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
NAME = ""
TRIGGERSTRINGA = ["I summon you /u/" + NAME, "I summon you great /u/" + NAME, "I summon you " + NAME, "I summon you great " + NAME, "Dear great /u/" + NAME, "Dear great " + NAME, NAME + ", roll the 8-ball", NAME + " roll the 8-ball"]
#These will trigger a response from replystringa
REPLYSTRINGA = ["Yes.", "No.", "That's a stupid question.", "Maybe some day.", "Try asking again.", "You should ask the admins."]
#This is a list of potential replies. Will be randomized.
TRIGGERSTRINGB = [NAME + " is dumb"]
#A second set of triggers
REPLYSTRINGB = ["No you."]
#A second set of responses. Will be randomized
TRIGGERLIST = [TRIGGERSTRINGA, TRIGGERSTRINGB]
REPLYLIST = [REPLYSTRINGA, REPLYSTRINGB]
#You can also add a third or fourth set of triggers and responses. Just make sure to add them to TRIGGERLIST and REPLYLIST
#The first list in TRIGGERLIST goes to the first list in REPLYLIST. Keep them in order.
#Triggerlist and Replylist must have the same number of items
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
'''All done!'''
WAITS = str(WAIT)
try:
import bot
USERAGENT = bot.getaG()
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS oldpost_index ON oldposts(id)')
print('Loaded Completed table')
sql.commit()
print("Logging in " + NAME)
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def scanSub():
print('Scanning ' + SUBREDDIT)
subreddit = r.get_subreddit(SUBREDDIT)
comments = subreddit.get_comments(limit=MAXPOSTS)
for comment in comments:
cur.execute('SELECT * FROM oldposts WHERE ID=?', [comment.id])
if not cur.fetchone():
cbody = comment.body.lower()
response = ''
for m in range(len(TRIGGERLIST)):
if any(trigger.lower() in cbody for trigger in TRIGGERLIST[m]) and response == '':
print(comment.id)
random.shuffle(REPLYLIST[m])
response = REPLYLIST[m][0]
if response != '':
print('\tPosting response')
comment.reply(response)
cur.execute('INSERT INTO oldposts VALUES(?)',[comment.id])
sql.commit()
while True:
try:
scanSub()
except Exception as e:
print('An error has occured:', e)
print('Running again in ' + WAITS + ' seconds \n')
sql.commit()
time.sleep(WAIT) | true |
9f160c16a374595ee904fe4d51c6dc75f7dc0981 | Python | aaguasvivas/CodeBreakersCode | /9.Dynamic_Programming/DP/fibonacciDP.py | UTF-8 | 208 | 2.9375 | 3 | [] | no_license | def fibonacci(n):
if n == 0:
return 0
dp = [0]*(n + 1)
d[1] = 1
for i in range(2, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
if __name__ == '__main__':
pass
| true |
1ab4fa27d2b4c5f5ee6be15f8a279a26748131a7 | Python | muskanjindal24/18IT040_IT374_Python_Programming | /TASK14[Practice Assignments]/BirthdayJson.py | UTF-8 | 465 | 3.9375 | 4 | [] | no_license | import json
import time
birthday = {}
with open('birthdays.json', 'r') as f:
birthday = json.load(f)
print("Welcome to the Birthday dictionary! We know the birthdays of:")
time.sleep(1)
for x in birthday:
print(x)
time.sleep(0.7)
choice= input("Who's birthday do you want to look up?")
if birthday[choice]:
print('{} is born on {}\n'.format(choice, birthday[choice]))
else:
print('{} is not in the list\n'.format(choice)) | true |
3bcaef314063e810be5f2b5b5294792776b1d4a9 | Python | RustyBower/pdst | /tests/test_analysis.py | UTF-8 | 1,914 | 2.6875 | 3 | [
"MIT"
] | permissive | import unittest
from parameterized import parameterized
from pdst import analysis
class TestMetadata(unittest.TestCase):
@parameterized.expand([
([0, 0, 0, 0], [0, 0, 0, 0], True),
([0, 1, 0, 0], [1, 0, 0, 0], True),
([0, 0, 0, 255], [0, 0, 0, 0], False),
([0, 1, 0, 0], [1, 0, 1, 1], True),
([0, 0, 0, 255], [5, 5, 0, 255], False),
([0, 0, 0], [5, 5, 0], False),
([0, 0, 0], [0, 0, 0, 0], True),
])
def test_nearlyEqual(self, arr1, arr2, expected):
self.assertEqual(expected, analysis.nearlyEqual(arr1, arr2))
@parameterized.expand([
([[0, 0, 0, 0]], [100], [[0, 0, 0, 0]], [100]),
([[0, 0, 0, 0], [0, 0, 0, 0]], [100, 100],
[[0, 0, 0, 0]], [200]),
([[0, 0, 0, 0], [255, 255, 255, 255], [1, 1, 1, 0]], [100, 100, 100],
[[0, 0, 0, 0], [255, 255, 255, 255]], [200, 100]),
])
def test_mergeSimilar(self, inColors, inCounts, expectedColors, expectedCounts):
outColors, outCounts = analysis.mergeSimilar(inColors, inCounts)
self.assertEqual(expectedColors, outColors)
self.assertEqual(expectedCounts, outCounts)
@parameterized.expand([
([0, 0, 0, 0], False, False),
([0, 0, 0, 0], True, False),
([0, 0, 0, 255], False, True),
([0, 0, 0, 255], True, False),
([16, 16, 16, 255], True, False),
([17, 17, 17, 255], True, True),
([238, 238, 238, 255], True, True),
([239, 239, 239, 255], True, False),
([255, 255, 255, 255], True, False),
])
def test_isColorAcceptable(self, color, noBlackWhite, expected):
acceptable = analysis.isColorAcceptable(color, noBlackWhite)
self.assertEqual(expected, acceptable)
def test_isValidImage_nonImage(self):
self.assertFalse(analysis.isValidImage(__file__))
| true |
1ae6a1c27da616f4c8b772c8e1b549542bb06796 | Python | bazhenovstepan/infa_2020_bazhenov | /Refactoring_Lab4_2.py | UTF-8 | 12,050 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import pygame
import pygame.gfxdraw
from pygame.draw import*
import numpy
pygame.init()
WHITE = (255,255,255) # Белый цвет
BLACK = (0,0,0) # Чёрный цвет
BLUE = (180,210,245) # Голубой цвет
GRAY = (180,180,180) # Серый цвет
LIME = (160,255,130) # Цвет зелёной поляны (оттенок зеленого)
GREEN = (140,235,70) # Цвет зеленых кустов (оттенок зеленого)
YELLOW = (255,255,0) # Жёлтый цвет
PURPLE = (230, 130, 200) # Фиолетовый цвет
RED = (255,0,0) # Красный цвет
sc = pygame.display.set_mode((620, 760))
# sc - главная плоскость, голубого цвета (задаёт цвет небу на рисунке)
sc.fill(BLUE)
# Рисуем серые горы
polygon(sc, GRAY, [(0,280), (80,80),(150,220), (230, 120), (370,360), (480, 100), (520, 140), (620, 20), (620, 430), (0,430), (0,280)], 0)
aalines(sc, BLACK, False, [(0,280), (80,80),(150,220), (230, 120), (370,360), (480, 100), (520, 140), (620, 20)])
back = pygame.Surface((620, 760))
back.fill(GRAY)
sc.blit(back, (0,430))
# Рисуем зеленую поляну
polygon(sc, LIME, [(0,455), (30,445), (60,440), (76,440), (136, 435), (300, 435), (307, 437), (310, 436), (312,440), (316,443), (316,460), (318,462), (318, 500), (325,502),(330,504),(620,504), (620,760), (0,760),(0,455)], 0)
aalines(sc, BLACK, False, [(0,455), (30,445), (60,440), (76,440), (136, 435), (300, 435), (307, 437), (310, 436), (312,440), (316,443), (316,460), (318,462), (318, 500), (325,502),(330,504),(620,504)])
flower = pygame.Surface((620,760))
flower.fill(RED)
flower.set_colorkey(RED)
circle(flower, GREEN, (520, 640), 90)
k = pygame.Surface((400,400))
k.fill(RED)
k.set_colorkey(RED)
a = 200
b = 200
d = a
t = b - 35
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
d = a + 55
t = b - 20
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
d = a - 55
t = b - 25
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
pygame.gfxdraw.filled_ellipse(k,a,b,50,20,YELLOW)
d = a - 90
t = b + 5
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
pygame.gfxdraw.filled_ellipse(k,a,b,50,20,YELLOW)
d = a + 80
t = b
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
d = a - 40
t = b + 20
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
d = a + 40
t = b + 25
pygame.gfxdraw.filled_ellipse(k,d,t,50,20,WHITE)
pygame.gfxdraw.aaellipse(k,d,t,50,20,BLACK)
flower1 = pygame.transform.scale(k, (k.get_width()//4,k.get_height()//4))
flower2 = pygame.transform.scale(k, (k.get_width()//5,k.get_height()//5))
def flower_1(x,y,g):
'''
Функция рисует большие цветки на зеленом кусте
flower1 - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
g - угол поворота плоскости изображения
'''
flower.blit(pygame.transform.rotate(flower1, g), (x,y))
flower_1(420,530,20)
flower_1(490,630,-10)
def flower_2(x,y,g):
'''
Функция рисует маленькие цветки на зеленом кусте
flower2 - объект pygame.Surface
x,y - координаты левого верхнего угла изоображения
g - угол поворота плоскости изображения
'''
flower.blit(pygame.transform.rotate(flower2, g), (x,y))
flower_2(510,550, -30)
flower_2(430,600,20)
flower_2(500,590,-40)
animal = pygame.Surface((620,760))
animal.fill(RED)
animal.set_colorkey(RED)
telo = pygame.Surface((620,760))
telo.fill(RED)
telo.set_colorkey(RED)
def risuem_telo(surface,x,y,width,height,color):
'''
Функция рисует тело животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
width,height - ширина и высота изображения
color - цвет, заданный в формате, подходящем для pygame.Color
'''
pygame.gfxdraw.filled_ellipse(surface,x,y,width,height,color)
risuem_telo(telo,180,580,60,25,WHITE)
neck = pygame.Surface((500,500))
neck.fill(GREEN)
neck.set_colorkey(GREEN)
def risuem_sheyu(surface,x,y,width,height,color):
'''
Функция рисует шею животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
width,height - ширина и высота изображения
color - цвет, заданный в форматe, подходящем для pygame.Color
'''
pygame.gfxdraw.filled_ellipse(surface,x,y,width,height,color)
risuem_sheyu(neck,122,130,50,18,WHITE)
sheya = pygame.transform.rotate(neck,90)
roga = pygame.Surface((60,80))
roga.set_colorkey(BLACK)
horn1 = pygame.Surface((600,600))
horn1.fill(GREEN)
horn1.set_colorkey(GREEN)
polygon(horn1, WHITE, [(50,50),(54,58),(60,62),(68,64),(74,74),(70,74),(60,68),(56,64),(54,60),(52,56),(50,50)],0)
horn2 = horn1.copy()
def risuem_rog(surface, x,y):
'''
Функция рисует рог животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
'''
roga.blit(surface, (x,y))
risuem_rog(horn1,-30,-30)
risuem_rog(horn2, -40, -25)
golova = pygame.Surface((500,500))
golova.fill(GREEN)
golova.set_colorkey(GREEN)
def risuem_golovu(surface,x,y,width,height,color):
'''
Функция рисует голову животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
width,heigth - ширина и высота изображения
color - цвет, заданный в форматe, подходящем для pygame.Color
'''
pygame.gfxdraw.filled_ellipse(surface,x,y,width,height,color)
risuem_golovu(golova,142,319,22,14,WHITE)
def risuem_glaz(surface,x,y,radius,color):
'''
Функция рисует глаз животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
radius - радиус окружности
color - цвет, заданный в форматe, подходящем для pygame.Color
'''
pygame.gfxdraw.filled_circle(surface,x,y,radius,color)
risuem_glaz(golova,140,317,9,PURPLE)
risuem_glaz(golova,143,316,4,BLACK)
eye = pygame.Surface((1200,700))
eye.fill(GREEN)
eye.set_colorkey(GREEN)
pygame.gfxdraw.filled_ellipse(eye,885,665,70,30,WHITE)
eyeball = pygame.transform.scale(eye, (eye.get_width()//16,eye.get_height()//16))
glazok = pygame.transform.rotate(eyeball,-30)
leg = pygame.Surface((600,600))
leg.fill(GREEN)
leg.set_colorkey(GREEN)
def risuem_nogu(surface,x,y,width,height,color):
'''
Функция рисует ногу животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
width,height - ширина и высота изображения
color - цвет, заданный в форматe, подходящем для pygame.Color
'''
pygame.gfxdraw.filled_ellipse(surface,x,y,width,height,color)
risuem_nogu(leg,88,63,24,10,WHITE)
risuem_nogu(leg,107,37,24,10,WHITE)
risuem_nogu(leg,88,120,24,10,WHITE)
risuem_nogu(leg,107,100,24,10,WHITE)
risuem_nogu(leg,46,63,20,10,WHITE)
risuem_nogu(leg,65,37,20,10,WHITE)
risuem_nogu(leg,46,120,20,10,WHITE)
risuem_nogu(leg,65,100,20,10,WHITE)
noga = pygame.transform.rotate(leg, 90)
stupnya = pygame.Surface((600,600))
stupnya.fill(GREEN)
stupnya.set_colorkey(GREEN)
def risuem_stupnyu(surface,x,y,width,height,color):
'''
Функция рисует ступню ноги животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
width,height - ширина и высота изображения
color - цвет, заданный в форматe, подходящем для pygame.Color
'''
pygame.gfxdraw.filled_ellipse(surface,x,y,width,height,color)
risuem_stupnyu(stupnya,21,160,10,7,WHITE)
risuem_stupnyu(stupnya,84,160,10,7,WHITE)
risuem_stupnyu(stupnya,47,179,10,7,WHITE)
risuem_stupnyu(stupnya,104,179,10,7,WHITE)
def risuem_zhivotnoe(surface, x,y):
'''
Функция рисует животное
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
'''
animal.blit(surface, (x,y))
risuem_zhivotnoe(roga,199,421)
risuem_zhivotnoe(sheya,100,150)
risuem_zhivotnoe(golova, 100,150)
risuem_zhivotnoe(glazok,190,400)
risuem_zhivotnoe(noga,100,100)
risuem_zhivotnoe(stupnya, 120,500)
risuem_zhivotnoe(telo, 0,0)
# Копируем изображения животных, отражаем их относительно вертикали и изменяем их размер
animal1 = pygame.transform.scale(animal, (animal.get_width()*2, animal.get_height()*2))
animal2 = pygame.transform.scale(animal, (animal.get_width()//2, animal.get_height()//2))
animal3 = pygame.transform.scale(animal, (animal.get_width()//2, animal.get_height()//2))
animal4 = pygame.transform.scale(animal, (animal.get_width()//2, animal.get_height()//2))
animal5 = pygame.transform.scale(animal, (720,860))
animal6 = pygame.transform.flip(animal5,True,False)
animal7 = pygame.transform.flip(animal4,True,False)
# Копируем изображения зеленых кустов и изменяем их размер
bush1 = pygame.transform.scale(flower, (flower.get_width()//3, flower.get_height()//3))
bush2 = pygame.transform.scale(flower, (flower.get_width()//3, flower.get_height()//3))
bush3 = pygame.transform.scale(flower, (flower.get_width()//3, flower.get_height()//3))
bush4 = pygame.transform.scale(flower, (360,450))
bush5 = pygame.transform.scale(flower, (280, 350))
bush6 = pygame.transform.scale(flower, (420, 560))
def kust(surface,x,y):
'''
Функция отображает на плоскости "sc" изображение зеленого куста с цветами
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
'''
sc.blit(surface, (x,y))
kust(bush1, -152,261)
kust(bush2, 270,323)
kust(bush3, 450,520)
kust(bush4, 180,300)
kust(bush5, 360,200)
kust(bush6, 260,150)
def zhivotnoe(surface,x,y):
'''
Функция оторажает на плоскости "sc" изображение животного
surface - объект pygame.Surface
x,y - координаты левого верхнего угла изображения
'''
sc.blit(surface, (x,y))
zhivotnoe(animal1, -400,-350)
zhivotnoe(animal2, 160,130)
zhivotnoe(animal3, 80,220)
zhivotnoe(animal6, 115,-52)
zhivotnoe(animal7, 80,270)
help(flower_1)
help(flower_2)
help(risuem_telo)
help(risuem_sheyu)
help(risuem_rog)
help(risuem_golovu)
help(risuem_glaz)
help(risuem_nogu)
help(risuem_stupnyu)
help(risuem_zhivotnoe)
help(kust)
help(zhivotnoe)
pygame.display.update()
clock = pygame.time.Clock()
finished = False
while not finished:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
finished = True
pygame.quit()
# In[ ]:
# In[ ]:
| true |
a56cad5d2bcb97e3b50edeeaeca5ef13a654fc36 | Python | daniel-reich/ubiquitous-fiesta | /dMcvdFzSvvqdLJBEC_12.py | UTF-8 | 229 | 3.125 | 3 | [] | no_license |
def num_of_days(cost, savings, start):
n = 21 + (7 * start)
total = cost - savings
a = 7/2
b = - 7/2 + n
weeks = (- b + (b ** 2 - 4 * a * -total)**0.5) / (2 * a)
return int(weeks * 7) + (weeks % 1 != 0)
| true |
78e520e8b9b938ef19c014e35cbc01a777dfd8ad | Python | qiuxiaoshuang/python-exercise100 | /python_exercise_100/36.py | UTF-8 | 359 | 3.75 | 4 | [] | no_license | # 题目:求100之内的素数。(用一个数分别去除2到sqrt(这个数),如果能被整除,则表明此数不是素数,反之是素数)
import math
def is_prime(n):
if n==1:
return False
for i in range(2,int(math.sqrt(n))+1):
if n%i==0:
return False
return n
A=range(1,100)
for a in A:
print(is_prime(a))
| true |
42dadc962f8a0a3b2aa8a832fba27bcb7dedd4e1 | Python | JPHutchins/pyavreceiver | /pyavreceiver/error.py | UTF-8 | 496 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | """pyavreceiver errors."""
class AVReceiverError(Exception):
"""Base class for library errors."""
class AVReceiverInvalidArgumentError(AVReceiverError):
"""Invalid argument error."""
class AVReceiverIncompatibleDeviceError(AVReceiverError):
"""Invalid argument error."""
class QosTooHigh(AVReceiverError):
"""QoS too high error."""
def __init__(self):
self.message = "Highest QoS value is reserved for resent commands."
super().__init__(self.message)
| true |
cf7f587e416108f931f436ccd64da06ae9c4cdfe | Python | cmg-dev/fp_python | /01_getting_started/aufgabe2/loesung2.py | UTF-8 | 805 | 3.984375 | 4 | [
"MIT"
] | permissive | import unittest
"""
Aufgabe 2:
Zahlen-Strings in Integer konvertieren
Konkret:
Die Zahlen im String sollen in Integer konvertiert werden
Beispiel:
"78+67+65" = [78, 67, 65]
"""
class Testsuite(unittest.TestCase):
zahlen = "78+101+118+101+114+32+67+111+100+101+32+65+108+111+110+101"
ergebnis = [78, 101, 118, 101, 114, 32, 67, 111, 100, 101, 32, 65, 108, 111, 110, 101]
def test(self):
self.assertListEqual(self.ergebnis, konvertiere(self.zahlen.split("+")))
self.assertListEqual(self.ergebnis, map(int, self.zahlen.split("+")))
def konvertiere(zahlen):
def custom_map(f, list):
if len(list) > 1:
c = [f(list[0])]
c.extend(custom_map(f, list[1:]))
return c
return [f(list[0])]
return custom_map(int, zahlen)
| true |
dad538342ec97a18b96982f4d50a4a3722352d46 | Python | chandanadasarii/CrackingTheCodingInterview | /CrackingTheCodingInterview/LinkedList/reverse.py | UTF-8 | 406 | 3.578125 | 4 | [] | no_license | from LinkedList import *
def reverseIterative(head):
prev = None
cur = head
while cur:
nxt = cur.next
cur.next = prev
prev = cur
cur = nxt
return prev
inp = [1,2,3,4,5,6,7]
l = LinkedList(Node(inp[0]))
for i in range(1, len(inp)):
l.append(Node(inp[i]))
l.display_list()
# l.head = reverseIterative(l.head)
l.reverseRecursive(l.head)
l.display_list() | true |
6893c712893aa1a69f0ffb8a70d12786b2bad380 | Python | jaychoi12/FINE | /dividemix/svd_classifier.py | UTF-8 | 2,546 | 2.53125 | 3 | [
"MIT"
] | permissive | import torch
import numpy as np
from tqdm import tqdm
from sklearn import cluster
import torch.nn.functional as F
def get_loss_list(model, data_loader):
loss_list = np.empty((0,))
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index) in enumerate(progress):
data = data.cuda()
label = label.long().cuda()
prediction = model(data)
loss = torch.nn.CrossEntropyLoss(reduction='none')(prediction, label)
loss_list = np.concatenate((loss_list, loss.detach().cpu()))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(loss_list[kmeans.labels_==0]) > np.mean(loss_list[kmeans.labels_==1]):
clean_label = 1
else:
clean_label = 0
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def singular_label(v_ortho_dict, model_represents, label):
sing_lbl = torch.zeros(model_represents.shape[0]) == 0.
for i, data in enumerate(model_represents):
data = torch.from_numpy(data).cuda()
if torch.dot(v_ortho_dict[label[i].item()][0], data).abs() < torch.dot(v_ortho_dict[label[i].item()][1], data).abs():
sing_lbl[i] = False
output=[]
for idx, value in enumerate(sing_lbl):
if value:
output.append(idx)
return output
def get_out_list(model, data_loader):
label_list = np.empty((0,))
model.eval()
model.cuda()
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index) in enumerate(progress):
data = data.cuda()
label = label.long().cuda()
output = model.forward(data, lout=4)
output = F.avg_pool2d(output, 4)
output = output.view(output.size(0), -1)
label_list = np.concatenate((label_list, label.cpu()))
if batch_idx == 0:
out_list = output.detach().cpu()
else:
out_list = np.concatenate((out_list, output.detach().cpu()), axis=0)
return label_list, out_list
def get_singular_value_vector(label_list, out_list):
singular_dict = {}
v_ortho_dict = {}
for index in np.unique(label_list):
u, s, v = np.linalg.svd(out_list[label_list==index])
singular_dict[index] = s[0] - s[1]
v_ortho_dict[index] = torch.from_numpy(v[:2])
return singular_dict, v_ortho_dict | true |
5fb413fbc9ef188a128eda110f8d6395f985ea6a | Python | DcortezMeleth/hts_programming | /task_1.py | UTF-8 | 488 | 3.09375 | 3 | [] | no_license | from itertools import permutations
result = ''
with open('entrywordlist.txt') as entries_file:
for line in entries_file:
permutation_list = [''.join(p) for p in permutations(line.strip())]
with open('wordlist.txt') as wordlist_file:
for line2 in wordlist_file:
word = line2.strip()
if word in permutation_list:
result += word
result += ','
break
print result[:-1] | true |
4a44ea2fc10d06b0ce9d04565a26ce5222690d2f | Python | grimley517/sampTask | /t1runner.py | UTF-8 | 2,525 | 3.75 | 4 | [] | no_license | '''
this is the runner for task 1 - this runner implements the tested functions in task1.py and wraps them in a menu system
'''
import task1 as t1
def menuChoice():
choice = None
choices = ['change exchange rates', 'convert a value into another currency', 'exit']
print ('Please choose a choice from the menu:')
for i,c in enumerate(choices):
print ('press {0} to {1}'.format(i,c))
while choice not in range(len(choices)):
choice = input ('Please select from the list {0} '.format(range(len(choices))))
print ('\n ------------------\n')
return (choice)
def chgRates():
symbols = t1.rates.keys()
symbols.remove('GBP')#Pounds should not be changed
print ('which rate do you wish to change?')
for i, symbol in enumerate(symbols):
print ('Enter {0} for {1}'.format(i, symbol))
choice = None
while choice not in range(len(symbols)):
choice = input ('please enter your choice, Please select from the list {0} '.format(range(len(symbols))))
newRate = float(input('Please enter the new rate '))
t1.setRate(symbols[choice],newRate)
print ('\n ------------------\n')
def cvrt():
symbols = t1.rates.keys()
print ('which Currency do you wish to change from? ')
for i, symbol in enumerate(symbols):
print ('Enter {0} for {1}'.format(i, symbol))
fromSym = None
while fromSym not in range(len(symbols)):
fromSym = input ('please enter your choice, Please select from the list {0} '.format(range(len(symbols))))
fromSym = int(fromSym)
fromSym = symbols[fromSym]
print ('\n ------------------\n')
print ('which Currency do you wish to change to? ')
for i, symbol in enumerate(symbols):
print ('Enter {0} for {1}'.format(i, symbol))
toSym = None
while toSym not in range(len(symbols)):
toSym = input ('please enter your choice, Please select from the list {0} '.format(range(len(symbols))))
toSym = int(toSym)
toSym = symbols[toSym]
print ('\n ------------------\n')
value = float(input('Please enter the ammount of {0} you wish to change into {1}'.format(fromSym, toSym)))
value = t1.convert(fromSym, toSym, value)
print ('Your converted ammount is {0} {1:.2f}'.format(toSym, value))
print ('\n ------------------\n')
running = True
while running:
selection = menuChoice()
if selection == 0:
chgRates()
elif selection == 1:
cvrt()
else:
running = False
| true |
fbd5382ba0fb2b00e37b907cede47249f9799561 | Python | gery2/FYS3150---Project-5 | /5aSim.py | UTF-8 | 1,734 | 3.359375 | 3 | [] | no_license | #5a): Simulation of Transactions, and 5b): Recongnizing the distribution.
import numpy as np
from numba import jit
import matplotlib.pyplot as plt
from tqdm import tqdm
plt.rcParams.update({'font.size': 14})
max = 7 #max money value
n = 10**6 #10**7
N = 500 #number of agents
m0 = 1
mc = 10**3 #10**3 - 10**4
dm = 7/500 #0.01 - 0.05 (7/500=0.014)
beta = 1/m0
#Transactions
@jit(nopython=True)
def trans(m, n, m_):
for k in range(n):
eps = np.random.uniform(0,1)
i = np.random.randint(0,N)
j = i
while i == j:
j = np.random.randint(0,N)
m_[i] = eps*(m[i] + m[j])
m_[j] = (1 - eps)*(m[i]+ m[j])
m[i] = m_[i]
m[j] = m_[j]
return m
#Monte Carlo cycles
def MC(n,N,m0,mc,dm,beta):
M = np.zeros(len(np.arange(0, max + dm, dm))-1) #manually deciding limits
for l in tqdm(range(mc)):
m = np.array([m0]*N, dtype=np.float64)
m_ = np.zeros(N)
m = trans(m, n, m_)
bins = np.arange(0, max + dm, dm)
array, bins = np.histogram(m, bins=bins)
M += array
plt.show()
return M
M = MC(n,N,m0,mc,dm,beta)
M = M/np.sum(M)
x = np.linspace(0,max,len(M))
plt.figure(figsize=(10,6))
plt.plot(x, M)
plt.title('Money per agent. 10^%d transactions. MC = %d' %(int(np.log10(n)), mc))
plt.xlabel('money')
plt.ylabel('amount of agents')
plt.show()
wm_analytical = beta*np.exp(-beta*x)
plt.figure(figsize=(10,6))
plt.semilogy(x, M/dm)
plt.semilogy(x, wm_analytical)
plt.title('Logarithmic Gibbs distribution as function of money m')
plt.xlabel('money')
plt.ylabel('log(wm)')
plt.legend(('Numerical', 'Analytical'))
plt.show()
#
| true |
b62e0d0e17c9df505d19d2e2a5d1fe7a674ea67e | Python | 74ifa/74ifa | /Python/Pandas/SQL-Table.py | UTF-8 | 693 | 3.234375 | 3 | [] | no_license | import sqlite3 as sql
from pandas import DataFrame
#Data it is 2 Columns and 5 Rows
data = {
'Name':['huzifa','ajmed','jaber','salim','sam','Ye'], # NeedFul, Name list same name column at Database
'Age':[20,30,32,17,19,22] # Some Name at database
}
con = sql.connect('data.db')
# Delete Table if Founded
con.execute('DROP TABLE Data')
con.commit()
con.execute('CREATE TABLE Data (Name text, Age number)')
con.commit()
DF = DataFrame(data, columns=['Name','Age'])
#if_exists if data already add will delete him and add
DF.to_sql('Data', con,if_exists='replace', index=False)
# for show all data from Data Table
t = con.execute("SELECT * FROM Data")
for x in t.fetchall():
print(x)
| true |
b51d1f7d11c3638c1dcff48b6d7cb5957b12b42d | Python | giuseppe-crea/Lesson1 | /calculator_impl.py | UTF-8 | 653 | 3.5 | 4 | [] | no_license | # interfaces and stuff
def sum(m, n):
# iterator_val = 0
if not isinstance(n, int) or not isinstance(m, int):
raise ValueError('Not an int')
if n >= 0:
iterator_val = 1
else:
iterator_val = -1
for i in range(abs(n)):
m += iterator_val
return m
def divide(m, n):
i = 0
sign = 1
if n == 0:
raise ValueError('Division by Zero.')
if not isinstance(n, int) or not isinstance(m, int):
raise ValueError('Not an int')
if n < 0:
sign = -1
if m < 0:
sign = -sign
m = -m
while m > 0:
m -= abs(n)
i += 1
return i*sign
| true |
fd8065ac67031247f246f3d3ff3b15ecb1f13786 | Python | ranson/leetcode | /53_maximumsubarray/53_maximumsubarray.py | UTF-8 | 890 | 3.921875 | 4 | [] | no_license | """
题目大意:
求数组的最大子数组的和
使用动态规划方法:
1. 原问题可分解为Max为[a,b,c,d,e]的最优解,cur为包含e的最优解,则[a,b,c,d,e,f]的最优解为max(max(cur+f,f),Max)
2. 然后由子问题重新构建到原问题
"""
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
curMax = nums[0]
max = nums[0]
for i in nums[1:]:
if curMax + i < i:
curMax = i
else:
curMax+=i
if curMax > max:
max = curMax
return max
solution = Solution()
nums = [0]
print(solution.maxSubArray(nums))
nums = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(solution.maxSubArray(nums))
nums = [-2]
print(solution.maxSubArray(nums))
nums = [-2,1]
print(solution.maxSubArray(nums)) | true |
77c927f2c856ca369f8190dc8b48edb5d5735df1 | Python | Andrew-C-Peterson/Project-Euler-Andrew | /PE045.py | UTF-8 | 850 | 4.0625 | 4 | [] | no_license | #Triangle, pentagonal, and hexagonal numbers
#T285 = P165 = H143 = 40755
#Find the next triangle number that is also a pentagonal and hexagonal number
import time as time
start = time.time()
#Function for determing if # is pentagonal
def is_pent(P):
if (1+(24*P+1)**0.5) % 6 == 0:
return True
return False
#We are going to iterate through hexagonal numbers, since they have the largest
#Step size in between them. So we are starting after H143
i = 144
while True:
#Find the ith hexagonal number
hex_num = i*(2*i-1)
#Check if it is also a pentagonal number
#To note, all hexagonal numbers are triangle numbers so we don't need to check
if is_pent(hex_num):
#If it is, print the answer and break the look
print(hex_num)
break
i+=1
print(time.time()-start) | true |
2b841f0974e480cbbfd277af4f94f7fa31e4338f | Python | hkjn/src | /infra/validate.py | UTF-8 | 2,253 | 2.75 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | # This tool looks at bootstrap/ .json configs to verify that the specified URLs
# can be fetched and have matching checksums.
#
# TODO: use "download binaries and verify checksums"
# logic here to fetch" CA cert, client cert + key,
# and then call telemetry/install_client:
# curl -o /etc/ssl/mon_ca.pem https://admin1.hkjn.me/151f[...]/files/hkjninfra/1.5.5/certs/mon_ca.pem
#
import json
import logging
import os
import subprocess
import sys
import tempfile
import urllib2
def fetch(url, path):
response = urllib2.urlopen(url)
html = response.read()
with open(path, 'w+') as tmpfile:
tmpfile.write(html)
print(' Wrote {}.'.format(path))
def verify_digest(path, digest):
child = subprocess.Popen(["sha512sum", path], stdout=subprocess.PIPE)
failed = child.wait()
if failed:
raise RuntimeError('sha512sum {} call failed.'.format(path))
shaparts = child.stdout.read().split()
if shaparts[0] != digest:
raise RuntimeError('bad checksum for {}: {} vs {}'.format(
path,
shaparts[0][:5],
digest[:5],
))
def run():
if len(sys.argv) != 2:
raise RuntimeError('Usage: {} node'.format(sys.argv[0]))
dryrun = os.environ.get('VALIDATE_DRYRUN')
data=json.loads(open("bootstrap/{}.json".format(sys.argv[1])).read())
for f in data['storage']['files']:
if 'source' not in f['contents'] or 'http' not in f['contents']['source']:
continue
url = f['contents']['source']
digest = f['contents']['verification']['hash'].split('sha512-')[1]
if dryrun:
print(' URL {} should have checksum {}'.format(url, digest))
else:
print('Verifying checksum of {}..'.format(f['path']))
print(' Fetching {}, which should have checksum {}..'.format(url, digest[:5]))
path = tempfile.mkdtemp(prefix='hkjninfra_checksums')
tmppath = os.path.join(path, digest)
fetch(url, tmppath)
verify_digest(tmppath, digest)
print(' Checksum matches!')
if not dryrun:
print('All checksums matched.')
if __name__ == '__main__':
run()
| true |
aaaa4ac624d3d2baeb8452e96404bcce8fa82e43 | Python | BookaiJin/analyzeTreasure-python | /gcLogAnalyzer.py | UTF-8 | 533 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" main """
# 传入一个月的gc日志文件夹/path/gcLogs解析出宕机了几次,每次宕机的相关信息,输出到文件/path/gcDownTimeResult.log
import os
__author__ = 'bokai'
def analyze_gc_log_path(gc_log_path):
gc_downtime_result = gc_log_path + os.sep + 'gcDownTimeResult.log'
gc_downtime_result_result = open(gc_downtime_result)
if __name__ == '__main__':
gc_log_path = input('输出gc日志的文件夹名:')
analyze_gc_log_path(gc_log_path) | true |
58df0f305fb6a1ba47a1847812d766d261bc74df | Python | asherif844/algorithms | /nextGreaterElement/exercise1.py | UTF-8 | 850 | 3.609375 | 4 | [] | no_license | def nextGreaterElement(array):
lengthOfArray = len(array)
finalList = []
for i in range(lengthOfArray):
# newList = []
positionInIndex = i
forwardRange = array[positionInIndex:lengthOfArray]
backwardRange = array[0: positionInIndex]
newArray = forwardRange + backwardRange
originalNumber = newArray[0]
nge = findNextNumber(newArray, idx=1)
# newList.append(nge)
# print(newArray)
# print(originalNumber)
finalList.append(nge)
return finalList
def findNextNumber(array, idx=1):
original = array[0]
while idx < len(array):
new = array[idx]
if original < new:
return new
else:
idx += 1
return -1
# return originalNumber
array = [2, 5, -3, -4, 6, 7, 2]
nextGreaterElement(array)
| true |
736ca93ce72544572dc0763e8d5fd7d573fc2e95 | Python | JoryAnderson/EMSE_DevInt | /notebook/tokenization.py | UTF-8 | 1,707 | 3.4375 | 3 | [] | no_license | import sklearn as sk
import numpy as np
import scipy
import nltk
from nltk.corpus import stopwords
import re
from nltk.stem import PorterStemmer
nltk.download('stopwords')
# replcae non alpha numeric charactor with space given a text
def remove_non_alphabetic_characters(text):
text= re.sub('[^a-zA-Z]+', ' ', text)
text = re.sub('[\s]+', ' ', text)
return text
# tokenize from space given a text
def tockenize(text):
tokens = text.split(" ")
if "" in tokens:
tokens.remove("")
return tokens
# remove words that contain non alpha numeric characters given a list of tokens
# this not used for processing
def remove_non_alphabetic_tokens(tokens):
tokens = [word for word in tokens if word.isalpha()]
return tokens
# remove stop words such as to,... given a list of tokens
def remove_stop_words(tokens):
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
return tokens
# stemming the words to original stem given a list of tokens
def stem(tokens):
ps = PorterStemmer()
tokens = [ps.stem(w) for w in tokens]
return tokens
# remove words less than a certain length givena list of tokens
def remove_short_tokens(tokens,length=1):
tokens = [word for word in tokens if len(word) > length]
return tokens
# combining the methods defined to process a given text and return the processed text
def process (title):
title = title.lower()
title = remove_non_alphabetic_characters(title)
tokens = tockenize(title)
tokens = remove_stop_words(tokens)
tokens = stem(tokens)
tokens = remove_short_tokens(tokens)
processed_text = ' '.join(tokens)
return processed_text
| true |
f60f2336f8eb062a32e2857bd3f33f53cd7474ec | Python | AlirezaSadeghi/reuters-21578 | /reuters/main.py | UTF-8 | 2,603 | 2.546875 | 3 | [] | no_license | import os
import logging
import pandas as pd
import apache_beam as beam
from apache_beam.io import WriteToText
from apache_beam.io.fileio import MatchFiles, ReadMatches
from reuters.models import RecurrentClassifier
from reuters.preprocessing import DataProcessor
from reuters.tokenizer import Tokenizer
from reuters.utils.preprocessors import (
string_processors,
NoStopWordPreprocessor,
StemmingPreprocessor,
)
from reuters.utils.utility import ReutersUtils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
PROCESSED_FILE_PREFIX = "processed"
def transform_data(dataset_dir, name_pattern):
"""
Spins up an apache beam runner (Here, DirectRunner is used since we're running it locally) to take care of
pre-processing the dataset in a distributed manner.
When complete, it writes out the processed data into a file prefixed "processed" (for now, not the best choice, but
short on time and have to compromise)
:param dataset_dir: The parent directory of the dataset
:param name_pattern: Glob pattern of the input files in the dataset
"""
processors = string_processors + [
NoStopWordPreprocessor(extra_words=["reuter", "\x01", "\x03"]),
StemmingPreprocessor(),
]
data_processor = DataProcessor(
split_field="lewissplit",
word_fields=["title", "body"],
label_fields=["places", "topics"],
document_root="reuters",
processors=processors,
)
with beam.Pipeline() as pipeline:
_ = (
pipeline
| "Find Files" >> MatchFiles(os.path.join(dataset_dir, name_pattern))
| "Read Files" >> ReadMatches()
| "Map" >> beam.FlatMap(data_processor.process)
| "Write"
>> WriteToText(os.path.join(dataset_dir, PROCESSED_FILE_PREFIX + ".txt"))
)
def main(args):
ReutersUtils.ensure_dataset(args.dataset_dir)
ds_path = ReutersUtils.find_file(args.dataset_dir, PROCESSED_FILE_PREFIX)
while not ds_path:
ReutersUtils.download_nltk_packages()
transform_data(args.dataset_dir, "reut2-0*.sgm")
ds_path = ReutersUtils.find_file(args.dataset_dir, PROCESSED_FILE_PREFIX)
df = pd.read_json(ds_path, lines=True)
(X_train, Y_train), (X_test, Y_test), vocab_size = Tokenizer(df).tokenize()
classifier = RecurrentClassifier(X_train, Y_train, vocab_size)
classifier.train()
evaluation = classifier.evaluate(X_test, Y_test)
print(
f"LSTM Model: Loss: {evaluation[0]}, Accuracy: {evaluation[1]}, Top 5 Accuracy: {evaluation[2]}"
)
| true |
e75c515eff501e8871c86334d4df22040aa53cc5 | Python | rogermurer/rpi-sensorlog | /WDE1/WDE1Values2sql.py | UTF-8 | 3,485 | 2.90625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/bin/env python3
""" This script reads value string from an usb-wde1 device (www.elv.de).
The usb-wde1 device can read different sensors and returns the values in a
csv-string.
pyserial is used to read from the usb-port and must therefore be installed.
The configuration is read from a file called config.json residing in the
scrpits path. A config-dist.jason is provided, copy it to config.json and
modify it as needed.
This script is intended to be called from a cron job.
"""
import os
import sys
import datetime
import json
import serial
import psycopg2
def open_db(db_name, user, host, port, pwd):
"""Try to open a connection to postgresql and return connection object"""
connstring = "dbname='{}' user='{}' host='{}' port='{}' password='{}'"
try:
conn = psycopg2.connect(connstring.format(db_name,
user, host,
port, pwd))
except psycopg2.OperationalError as error_string:
print('Unable to connect!\n{}').format(error_string)
sys.exit(1)
return conn
def write_to_db(db_conf, sensor_values):
"""Insert sensor values into sensor_values table"""
db_conn = open_db(db_conf["db"], db_conf["user"],
db_conf["host"], db_conf["prt"], db_conf["pwd"])
sql_str = "INSERT INTO sensor_values (device_id,log_value,log_time) VALUES (%s,%s,%s)"
cur = db_conn.cursor()
for sensor_id, sensor_value in sensor_values.items():
cur.execute(sql_str, (sensor_id, sensor_value, datetime.datetime.now()))
db_conn.commit()
cur.close()
db_conn.close()
def read_config_file():
"""read the config from json file"""
conf_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'config.json')
with open(conf_file) as json_conf:
config = json.load(json_conf)
return config
def line_is_valid(csv_line):
"""Checks if the USB-WDE1 line is in a valid format"""
csv_line = csv_line.strip()
fields = csv_line.split(';')
valid = (len(fields) == 25 and fields[0] == u'$1' and fields[24] == u'0')
return valid
def get_wde1_value_line(serial_port):
"""try to read sensors values from usb-wde1 dongle"""
is_valid = False
with serial.Serial(serial_port, 9600, timeout=180) as ser:
try:
ser.isOpen()
except:
sys.exit(1)
if ser.isOpen():
try:
while is_valid != True:
line = ser.readline()
line = line.decode('utf-8')
is_valid = line_is_valid(line)
except Exception:
sys.exit(1)
if is_valid != True:
line = ''
return line
def main():
"""Main routine to read sensor data and write the values to the database"""
config = read_config_file()
db_config = config["psql"]
sensors = config["sensors"]
usb_port = config["misc"]["usb-port"]
sensor_values = {}
value_line = get_wde1_value_line(usb_port)
if value_line != '':
data = value_line.split(';')
for sensor, sensor_setting in sensors.items():
if int(sensor_setting["enabled"]) == 1:
val = float(data[int(sensor_setting["csv-id"])].replace(',', '.'))
sensor_values.update({sensor_setting["db-id"]: round(val, 1)})
write_to_db(db_config, sensor_values)
if __name__ == '__main__':
main()
| true |
cf59d75a1907c73fbf430d42f652e30fdbb4f498 | Python | sfmqrb/ModernInformationRetrieval | /phase#3/MIR_project_3_97106187_sajad_faghfoor_maghrebi/project#3/MIR3_3.py | UTF-8 | 2,922 | 3.078125 | 3 | [] | no_license | import json
from operator import itemgetter
def get_crawled_file(file_rel_path):
with open(file_rel_path) as f:
s = json.load(f)
return s
def find_article_by_id(id_str, s):
for article in s:
if article['id'] == id_str:
return article
return None
def is_in_dic_and_initialize1(dic, key):
if key not in dic.keys():
dic[key] = 1.0
return False
return True
def find_degree_in(ls_author, to):
k = 0
for aut in ls_author:
if to == aut[1]:
k += 1
return k
def find_degree_out(ls_aut, fro):
k = 0
for aut in ls_aut:
if fro == aut[0]:
k += 1
return k
def find_normalizing_factor(dic):
_normalizing_factor = 0.0
for key in dic:
_normalizing_factor += dic[key]**2
return _normalizing_factor**0.5
def get_author_pair(s):
"""
s: crawled json loaded in s
"""
ls_author = set()
for article_from in s:
for ref in article_from['references']:
id_str = ref
article_to = find_article_by_id(id_str, s)
if article_to != None:
for author_to in article_to['authors']:
for author_from in article_from['authors']:
ls_author.add((author_from, author_to))
return list(ls_author)
def calc_top_hit(ls_author, N):
hub_dic = {}
aut_dic = {}
MAX_ITERATION = 5
for pair in ls_author:
fro = pair[0]
to = pair[1]
is_in_dic_and_initialize1(hub_dic, fro)
is_in_dic_and_initialize1(aut_dic, to)
is_in_dic_and_initialize1(hub_dic, to)
is_in_dic_and_initialize1(aut_dic, fro)
for _ in range(MAX_ITERATION):
visited_hub= set()
visited_aut = set()
for pair in ls_author:
fro = pair[0]
to = pair[1]
if to not in visited_aut:
visited_aut.add(to)
aut_dic[to] = 0.0
aut_dic[to] += hub_dic[fro]
_normalizing_factor_aut = find_normalizing_factor(aut_dic)
for key in aut_dic:
aut_dic[key] /= _normalizing_factor_aut
for pair in ls_author:
fro = pair[0]
to = pair[1]
if fro not in visited_hub:
visited_hub.add(fro)
hub_dic[fro] = 0.0
hub_dic[fro] += aut_dic[to]
_normalizing_factor_hub = find_normalizing_factor(hub_dic)
for key in hub_dic:
hub_dic[key] /= _normalizing_factor_hub
aut_top_dic = dict(sorted(aut_dic.items(), key = itemgetter(1), reverse = True)[:N])
return aut_top_dic
def get_top_hit(file_rel_path, N):
s = get_crawled_file(file_rel_path)
ls_author = get_author_pair(s)
top_hit = calc_top_hit(ls_author, N)
return list(top_hit.keys()) | true |
e93260ee7977d2c606964b3b7fa98f265204f871 | Python | FIGNN/FIGNN | /FI_GraphSAGE_SimpleGCN/simple_gcn_classification.py | UTF-8 | 12,208 | 2.578125 | 3 | [] | no_license | """
This code was modified from the GCN implementation in DGL examples.
Simplifying Graph Convolutional Networks
Paper: https://arxiv.org/abs/1902.07153
Code: https://github.com/Tiiiger/SGC
SGC implementation in DGL.
"""
import argparse, time, math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
import dgl
from dgl.data import register_data_args
from dgl.nn.pytorch.conv import SGConv
from sklearn.metrics import f1_score, confusion_matrix
from tqdm import tqdm
class AttentionalFactorizationMachine(torch.nn.Module):
def __init__(self, embed_dim, attn_size, dropouts):
super().__init__()
self.attention = torch.nn.Linear(embed_dim, attn_size)
self.projection = torch.nn.Linear(attn_size, 1)
self.fc = torch.nn.Linear(embed_dim, 1)
self.dropouts = dropouts
def interaction(self, fm_paris_feature, gnn_feature):
gnn_feature_expand = gnn_feature.unsqueeze(1)
gnn_feature_expand = gnn_feature_expand.unsqueeze(2)
feature_pair_count = fm_paris_feature.shape[1]
gnn_feature_expand = gnn_feature_expand.expand(-1, feature_pair_count, -1, -1)
# interaction
gnn_shape = gnn_feature_expand.shape
gnn_feature_expand = gnn_feature_expand.reshape(gnn_shape[0] * gnn_shape[1],
gnn_shape[2], gnn_shape[3])
fm_paris_feature = fm_paris_feature.reshape(fm_paris_feature.shape[0] * fm_paris_feature.shape[1],fm_paris_feature.shape[2])
fm_paris_feature = fm_paris_feature.unsqueeze(2)
att_score = torch.bmm(gnn_feature_expand, fm_paris_feature)
att_score = att_score.view(gnn_shape[0], gnn_shape[1], 1)
att_score = torch.softmax(att_score, dim=1)
return att_score
def forward(self, gnn_feature, x ):
"""
:param x: Float tensor of size ``(batch_size, num_fields, embed_dim)``
"""
num_fields = x.shape[1]
row, col = list(), list()
for i in range(num_fields - 1):
for j in range(i + 1, num_fields):
row.append(i), col.append(j)
p, q = x[:, row], x[:, col]
inner_product = p * q
fm_pairs_feature = F.relu(self.attention(inner_product))
attn_scores = self.interaction(fm_pairs_feature, gnn_feature)
attn_output = torch.sum(attn_scores * inner_product, dim=1) * 100
x_all = torch.cat((gnn_feature, attn_output), dim=1)
return x_all
class FMLayer(nn.Module):
def __init__(self, in_features, k_embedding):
'''
:param in_features: 输入特征维数
:param k: 单一特征embedding
:param bias:
'''
super(FMLayer, self).__init__()
self.in_features = in_features
self.k_embedding = k_embedding
self.embedding = nn.Embedding(in_features+1, k_embedding, padding_idx=0)
# self.weight = Parameter(torch.FloatTensor(in_features,k_embedding))
# self.reset_parameters()
self.init_embedding()
def init_embedding(self):
nn.init.xavier_uniform_(self.embedding.weight)
# print('embedding_init',self.embedding.weight)
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
def forward(self, nonzero_index, nonzero_value):
feature_embed = self.embedding(nonzero_index)
nonzero_value = nonzero_value.unsqueeze(-1)
feature_vector = feature_embed * nonzero_value
return feature_vector
# left = torch.sum(feature_vector, dim=1) ** 2
# right = torch.sum(feature_vector ** 2, dim=1)
#
# output = 0.5 * (left - right)
# return output
class SimpleGCN(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes, direct=False):
super().__init__()
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.model = SGConv(in_feats,
n_hidden,
k=2,
cached=True,
bias=args.bias)
# self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
# self.dropout = nn.Dropout(dropout)
# self.activation = activation
self.fm = FMLayer(in_feats, n_hidden)
self.pa_attn = AttentionalFactorizationMachine(n_hidden, n_hidden, dropouts=[0.2, 0.2])
self.final_linear = nn.Linear(2 * n_hidden, n_classes)
self.final_linear_single = nn.Linear(n_hidden, n_classes)
self.direct = direct
def forward(self, g, x, nonzer_index, nonzer_value):
h = self.model(g, x)
# bmp
if self.direct:
return self.final_linear_single(h)
x_right = self.fm(nonzer_index, nonzer_value)
x_all = self.pa_attn(h, x_right)
x_all = self.final_linear(x_all)
# x_all = self.final_linear_single(x_right)
return x_all
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE mod el on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = torch.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = torch.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
input_nodes = block.srcdata[dgl.NID]
h = x[input_nodes].to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[start:end] = h.cpu()
x = y
return y
def evaluate(model, g, features, non_zero_index, non_zero_value, labels, mask, args, t1):
model.eval()
with torch.no_grad():
logits = model(g, features, non_zero_index, non_zero_value)[mask] # only compute the evaluation set
labels = labels[mask]
# logits = model(g, features, non_zero_index, non_zero_value) # only compute the evaluation set
# labels = labels
_, indices = torch.max(logits, dim=1)
f1 = f1_score(y_pred=indices.cpu().numpy(), y_true=labels.cpu().numpy(), average="weighted")
c_m = confusion_matrix(y_pred=indices.cpu().numpy(), y_true=labels.cpu().numpy())
# print("MASK Length {}, True Count: {}, Type:{}".format(len(mask), sum(mask), type(mask)))
# with open(f"./{args.dataset}_hello_{args.seed}.result", 'a') as f1:
# f1.write("")
# f1.write(f"{type}-label:" + ",".join([str(i) for i in labels.cpu().numpy().tolist()]) + "\n")
# f1.write(f"{type}-logits:" + ",".join([str(i) for i in indices.cpu().numpy().tolist()]) + "\n")
# f1.write(f"{type}-all:" + ",".join([str(i) for i in raw_labels]) + "\n")
# f1.write("Val:" + ",".join([str(i) for i in labels[val_mask]]) + "\n")
# print(th.cpu().values().tolist())
correct = torch.sum(indices == labels)
return correct.item() * 1.0 / len(labels), f1
from dataset import OurDataset
import random
def main(args):
output_f1 = open(args.output_file, 'w+')
if args.gpu >= 0:
device = torch.device('cuda:%d' % args.gpu)
else:
device = torch.device('cpu')
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.gpu != -1:
torch.cuda.manual_seed(args.seed)
data = OurDataset(args.dataset,args.truncate_size, seed=args.seed)
non_zero_index = data.non_zero_index
non_zero_value = data.non_zero_value
g = data[0]
if args.gpu < 0:
cuda = False
else:
cuda = True
g = g.int().to(args.gpu)
train_mask = data.train_mask
val_mask = data.val_mask
test_mask = data.test_mask
train_mask = train_mask.tolist()
val_mask = val_mask.tolist()
test_mask = test_mask.tolist()
features = torch.Tensor(data.features)
in_feats = features.shape[1]
labels = torch.LongTensor(data.labels)
n_classes = data.num_labels
n_edges = g.number_of_edges()
# Construct graph
g = dgl.graph(data.graph.all_edges())
g.ndata['features'] = features
g.ndata['non_zero_index'] = non_zero_index
g.ndata['non_zero_value'] = non_zero_value
# create SGC model
n_hidden = 32
model = SimpleGCN(
in_feats,
n_hidden,
n_classes, direct=args.direct
)
if cuda:
model.cuda()
loss_fcn = torch.nn.CrossEntropyLoss()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
# initialize graph
# non_zero_index = torch.tensor(non_zero_index)
# non_zero_value = torch.FloatTensor(non_zero_value)
if cuda:
non_zero_index = non_zero_index.cuda()
non_zero_value = non_zero_value.cuda()
dur = []
acc = 0
f1 = 0
for epoch in range(args.n_epochs):
model.train()
if epoch >= 3:
t0 = time.time()
# forward
logits = model(g, features, non_zero_index, non_zero_value) # only compute the train set
loss = loss_fcn(logits[train_mask], labels[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch >= 3:
dur.append(time.time() - t0)
acc_here, f1_here = evaluate(model, g, features, non_zero_index, non_zero_value, labels, test_mask, args, 'val')
if acc_here > acc:
acc = acc_here
f1 = f1_here
print("Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | F1 {:.4f} | "
"ETputs(KTEPS) {:.2f}". format(epoch, np.mean(dur), loss.item(),
acc,f1, n_edges / np.mean(dur) / 1000))
# model, g, features, non_zero_index, non_zero_value, labels, mask
# acc, f1 = evaluate(model, g, features, non_zero_index, non_zero_value, labels, test_mask, args, 'test')
print("Test Accuracy {:.4f}, F1 {:.4f}".format(acc,f1))
output_f1.write("Acc: {:.4f}, F1: {:.4f}".format(acc, f1))
output_f1.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SGC')
# register_data_args(parser)
parser.add_argument("--gpu", type=int, default=-1,
help="gpu")
parser.add_argument("--seed", type=int, default=123,
help="seed")
parser.add_argument("--lr", type=float, default=0.2,
help="learning rate")
parser.add_argument("--bias", action='store_true', default=False,
help="flag to use bias")
parser.add_argument("--n_epochs", type=int, default=100,
help="number of training epochs")
parser.add_argument("--weight-decay", type=float, default=5e-6,
help="Weight for L2 loss")
parser.add_argument("--dataset", type=str, default="BlogCatalog")
parser.add_argument("--output_file", type=str, default="./simple_gcn.result")
parser.add_argument("--direct", action="store_true")
parser.add_argument("--truncate_size", type=int, default=200)
args = parser.parse_args()
print(args)
main(args)
| true |
94129ff9e3c9450005f468f9b630f1ffb4040522 | Python | robingirard/PopulationEtEnergie | /populationetenergie/emissions/pop_cc.py | UTF-8 | 4,375 | 2.671875 | 3 | [] | no_license | import numpy as np
'''
L'argument fichier de la première fonction est trouvé à l'adresse suivante : https://www.theshiftdataportal.org/energy/final-energy?chart-type=stacked&chart-types=stacked&chart-types=stacked-percent&chart-types=pie&chart-types=line&chart-types=ranking&disable-en=false&energy-families=Oil%20products&energy-families=Gas&energy-families=Electricity&energy-families=Coal&energy-families=Heat&energy-families=Geothermal&energy-families=Biofuels%20and%20waste&energy-families=Crude%20oil&energy-families=Others&energy-unit=KWh&group-names=Albania&is-range=true&gdp-unit=GDP%20(constant%202010%20US%24)§ors=Transport&dimension=byEnergyFamily&end=2015&start=2014&multi=false
'''
def conso_par_vect(fichier):
fic=open(fichier) # Selon l'endroit où le fichier est rangé, on mettra la lettre r devant le chemin d'accès, sinon open() n'aime pas
ch=fic.readline()
while ch!="": #dans le fichier dont nous disposons, on ne s'interesse qu'à la dernière ligne, qui est la donnée la plus récente
L=ch.split(",")
ch=fic.readline() #L est la liste [date, biofuels, charbon, pétrole brut, électricité, gaz, géothermique, thermique, produits pétroliers] en kWh (ou en l'unité qu'on choisit sur le site)
for i in range(1,len(L)-1):
L[i]=float(L[i]) #On convertit les valeurs de notre liste en flottants : attention, le premier élement est undate (donc pas un float) et le dernier a un vilain '\n' au bout
L[-1]=float(L[-1][:-1]) #On n'oiublie pas de supprimmer le \n
return L
'''
Le coût en CO2 équivalent de l'électricité dans chaque pays est disponible à l'adresse :
https://www.bilans-ges.ademe.fr/documentation/UPLOAD_DOC_FR/index.htm?moyenne_par_pays.htm
Le mix électrique précis est disponible ici :
https://www.theshiftdataportal.org/energy/electricity?chart-type=stacked&chart-types=stacked&chart-types=stacked-percent&chart-types=pie&chart-types=line&chart-types=ranking&disable-en=false&ef-generation=Oil&ef-generation=Coal&ef-generation=Gas&ef-generation=Nuclear&ef-generation=Hydro&ef-generation=Wind&ef-generation=Biomass&ef-generation=Waste&ef-generation=Solar%20PV&ef-generation=Geothermal&ef-generation=Solar%20Thermal&ef-generation=Tide&ef-capacity=Fossil%20Fuels&ef-capacity=Hydroelectricity&ef-capacity=Nuclear&ef-capacity=Hydroelectric%20Pumped%20Storage&ef-capacity=Wind&ef-capacity=Solar%2C%20Tide%2C%20Wave%2C%20Fuel%20Cell&ef-capacity=Biomass%20and%20Waste&ef-capacity=Geothermal&energy-unit=TWh&group-names=World&is-range=true&gdp-unit=GDP%20(constant%202010%20US%24)&type=Generation&dimension=byEnergyFamily&end=2015&start=1990&multi=false
'''
'''
Remarque sur la suite : je n'ai pas trouvéde base de donnée qui donne pour chaque pays les émissions de CO2 par kWh par source d'énergie, donc je n'écris pas encore la fonction associée (attention aux unités, c'est en cCO2eq/kWh)
'''
def calcul(consvect,elec,CO2vect): #En gCO2eq
L1=conso_par_vect(consvect)
L2=CO2_par_vect(CO2vect)
L3=L2[:3]+[elec]+L2[4:] #On propose bien le mix électrique de chaque pays.
conso_totale=np.sum([L1[i+1]*L3[i] for i in range(8)]) #On procède à une habile multiplication : conso_CO2_totale_dun_vecteur = conso_énergétique_par_vecteur * conso_en_CO2_du_vecteur, puis on somme pour chaque vecteur
return conso_totale
'''
Pour que l'algorithme tourne quand même, je vais utiliser les moyennes mondiales : https://fr.wikipedia.org/wiki/Empreinte_carbone#%C3%89missions_directes_en_CO2_des_combustibles pour le charbon et le gaz ou https://fr.wikipedia.org/wiki/Empreinte_carbone#%C3%89missions_directes_en_CO2_des_combustibles pour la géothermie et la thermique, https://www.ademe.fr/mediatheque/recherche?query=BIOCARBURANT%20%C3%A9mission&items_per_page=10&sort_by=field_resource_edition_date&sort_order=DESC pour les biocarburants (attention c'est en kg equivalent CO2 par MJ) ou https://convertlive.com/fr/u/convert/kilowatt-heures/a/gigajoules#73.6 pour les pétroles.
'''
CO2vect=[90,377,264,0,243,50,850,300]
def calcul2(consvect,elec,CO2vect):
L1=conso_par_vect(consvect)
L2=CO2vect
L3=L2[:3]+[elec]+L2[4:]
conso_totale=np.sum([L1[i+1]*L3[i] for i in range(8)])
return conso_totale | true |
860c823c8820599821e1e71b247089cef89c4b35 | Python | xix017/mlia | /chapter_3/trees.py | UTF-8 | 3,051 | 2.9375 | 3 | [] | no_license | from math import log
import operator
def createDataSet():
dataSet = [
[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']
]
labels = ['no surfacing', 'flippers']
return dataSet, labels
def entropy(data):
entries = len(data)
counts = {}
for entry in data:
label = entry[-1]
if label not in counts:
counts[label] = 0
counts[label] += 1
ent = 0.0
for key in counts:
prob = float(counts[key])/entries
ent -= prob * log(prob, 2)
return ent
def split(data, label, value):
new_data = []
for entry in data:
if entry[label] == value:
reduced_entry = entry[:label]
reduced_entry.extend(entry[label+1:])
new_data.append(reduced_entry)
return new_data
def best_feature(data):
total = len(data[0]) - 1
base_entropy = entropy(data)
max_info = 0.0
best_feature = -1
for i in range(total):
features = [example[i] for example in data]
new_entropy = 0.0
for value in set(features):
sub_data = split(data, i, value)
prob = len(sub_data)/float(len(data))
new_entropy += prob * entropy(sub_data)
info_gain = base_entropy - new_entropy
if (info_gain > max_info):
max_info = info_gain
best_feature = i
return best_feature
def majority(class_list):
class_count = {}
for vote in class_list:
if vote not in class_count:
class_count[vote] = 0
class_count[vote] += 1
sorted_class_count = sorted(class_count.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_class_count[0][0]
def create_tree(data, labels):
class_list = [example[-1] for example in data]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0]
if len(data[0]) == 1:
return majority(class_list)
best_feat = best_feature(data)
best_label = labels[best_feat]
tree = {best_label: {}}
del(labels[best_feat])
values = [ex[best_feat] for ex in data]
unique_vals = set(values)
for value in unique_vals:
sub_labels = labels[:]
tree[best_label][value] = create_tree(split(data, best_feat, value), sub_labels)
return tree
def classify(tree, labels, test_feature):
first_label = tree.keys()[0]
first_decision = tree[first_label]
feat_index = labels.index(first_label)
for key in first_decision.keys():
if test_feature[feat_index] == key:
if type(first_decision[key]).__name__ == 'dict':
class_label = classify(first_decision[key], labels, test_feature)
else:
class_label = first_decision[key]
return class_label
def store_tree(tree, filename):
import pickle
fw = open(filename, 'w')
pickle.dump(tree, fw)
fw.close()
def restore_tree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)
| true |
3dc533e67894a208c24a4dfdb836d5312e573fee | Python | MYREarth/secret-weapon | /2014-05-29/D.py | UTF-8 | 2,545 | 2.921875 | 3 | [] | no_license | from fractions import Fraction
from random import shuffle
def brute_force(n, m):
if n > m:
return brute_force(m, n)
board = [[False] * (n + m - 1) for _ in xrange(n + m - 1)]
x0, y0 = m - 1, 0
for i in xrange(n):
for j in xrange(m):
board[x0 - j][y0 + j] = True
x0 += 1
y0 += 1
for i in xrange(n + m - 1):
y0 = 0
while not board[i][y0]:
y0 += 1
y1 = n + m - 2
while not board[i][y1]:
y1 -= 1
for j in xrange(y0, y1 + 1):
board[i][j] = True
#print "".join(map(lambda flag : flag and '#' or '.', board[i]))
partial = [[0] * (n + m) for _ in xrange(n + m)]
for i in reversed(xrange(n + m - 1)):
for j in reversed(xrange(n + m - 1)):
partial[i][j] = partial[i + 1][j] + partial[i][j + 1] - partial[i + 1][j + 1]
if not board[i][j]:
partial[i][j] += 1
result = 0
for i in xrange(n + m - 1):
for ii in xrange(i, n + m - 1):
for j in xrange(n + m - 1):
for jj in xrange(j, n + m - 1):
if partial[i][j] - partial[ii + 1][j] - partial[i][jj + 1] + partial[ii + 1][jj + 1] == 0:
result += 1
return result
## 1, n, m, n * m, n * n, m * m, n * n * m, n * m * m, n * n * m * m
coefficient = []
for n in xrange(1, 15):
for m in xrange(n, 15):
rows = []
for i in xrange(7):
for j in xrange(7):
if i + j <= 6:
rows.append(pow(n, i) * pow(m, j))
rows.append(brute_force(n, m))
coefficient.append(rows)
shuffle(coefficient)
n = len(coefficient)
m = len(coefficient[0]) - 1
print n, m
for j in xrange(m):
pivot = j
while pivot < n and coefficient[pivot][j] == 0:
pivot += 1
assert pivot < n
coefficient[j], coefficient[pivot] = coefficient[pivot], coefficient[j]
for i in xrange(n):
if i != j:
t = Fraction(coefficient[i][j]) / coefficient[j][j]
for k in xrange(m + 1):
coefficient[i][k] -= coefficient[j][k] * t
for i in xrange(m, n):
assert all(map(lambda zero : zero == 0, coefficient[i]))
k = 0
for i in xrange(7):
for j in xrange(7):
if i + j <= 6:
rows.append(pow(n, i) * pow(m, j))
c = coefficient[k][m] / coefficient[k][k]
if c != 0:
print c, "n^%d m^%d" %(i, j)
k += 1
#for i in xrange(n):
# print coefficient
| true |
992ce7bcd08a754e7cbf78287798ed32c0844087 | Python | hauensteina/kifu-cam | /scripts/train_stones/01_json2sgf.py | UTF-8 | 3,356 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# /********************************************************************
# Filename: json2sgf.py
# Author: AHN
# Creation Date: Feb 28, 2018
# **********************************************************************/
#
# Copy intersection coords from wallstedt format json to sgf GC tag
#
from __future__ import division, print_function
from pdb import set_trace as BP
import os,sys,re,json,shutil
import numpy as np
from numpy.random import random
import argparse
import matplotlib as mpl
mpl.use('Agg') # This makes matplotlib work without a display
from matplotlib import pyplot as plt
# Look for modules in our pylib folder
SCRIPTPATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append( SCRIPTPATH + '/..')
import ahnutil as ut
#---------------------------
def usage(printmsg=False):
name = os.path.basename(__file__)
msg = '''
Name:
%s -- Copy intersection coords from wallstedt format json to sgf GC tag
Synopsis:
%s --folder <folder>
Description:
Finds all pairs (*_intersections.json, *.sgf) and cpoies the
intersections coords into the sgf GC tag.
The results go into a subfolder <folder>/json2sgf.
Example:
%s --folder ~/kc-trainingdata/andreas/mike_fixed_20180228
''' % (name,name,name)
if printmsg:
print(msg)
exit(1)
else:
return msg
# Collect matching jpeg and json in a dictionary
#----------------------------------------------------
def collect_files( infolder):
# Find images
jsons = ut.find( infolder, '[!.]*_intersections.json')
# Basenames
basenames = [os.path.basename(f) for f in jsons]
basenames = [re.sub( '_intersections.json','',x) for x in basenames]
sgfs = [ut.find( infolder, '%s.sgf' % f)[0] for f in basenames]
jpegs = [ut.find( infolder, '%s.jpeg' % f)[0] for f in basenames]
res = zip( jsons, sgfs, jpegs)
return res
# Read both files and return sgf with intersections in GC tag
#-------------------------------------------------------------
def fix( jsonfile, sgffile):
jobj = json.load( open( jsonfile))
sgf = open( sgffile).read()
boardsz = len(jobj)
coords = [0] * boardsz * boardsz
if boardsz != 19:
print ('Size %d is not a 19x19 board. Skipping' % boardsz)
return ''
for c,col in enumerate( jobj):
for r,isec in enumerate( col):
idx = r * boardsz + c
coords[idx] = (isec['x'],isec['y'])
tstr = json.dumps( coords)
tstr = re.sub( '\[','(',tstr)
tstr = re.sub( '\]',')',tstr)
tstr = 'GC[intersections:' + tstr + ']'
res = sgf
res = re.sub( '(SZ\[[^\]]*\])', r'\1' + tstr, res)
res = re.sub( r'\s*','', res)
return res
#-----------
def main():
if len(sys.argv) == 1:
usage(True)
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument( "--folder", required=True)
args = parser.parse_args()
outfolder = args.folder + '/json2sgf/'
os.makedirs( outfolder)
file_triples = collect_files( args.folder)
for p in file_triples:
newsgf = fix( p[0], p[1])
if not newsgf: continue
fname = outfolder + os.path.basename( p[1])
shutil.copy( p[2], outfolder)
with open( fname, 'w') as f:
f.write( newsgf)
if __name__ == '__main__':
main()
| true |
17d6b6f4b17184780a2d7b2f44b2a05fea2519d7 | Python | Nostalogicwh/Leetcode | /m_单调递增的数字/738.py | UTF-8 | 933 | 3.234375 | 3 | [] | no_license | class Solution:
def monotoneIncreasingDigits(self, N: int) -> int:
nums = list(str(N))
length = len(nums)
begin = 0
# N 是否符合条件
is_result = True
max_num = float('-inf')
# 从前往后观察
for i in range(1, length):
num = int(nums[i])
pre_num = int(nums[i - 1])
# 记录最大值
if pre_num > max_num:
begin = i - 1
max_num = pre_num
if pre_num > num:
is_result = False
break
# 如果 N 本身符合条件,直接返回 N
if is_result:
return N
# begin 位置减去 1,后面全部替换为 9
nums[begin] = str(int(nums[begin]) - 1)
for i in range(begin + 1, length):
nums[i] = '9'
return int("".join(nums)) | true |
15182cda71eb15af7f2c9b2c217a70b1d314e589 | Python | programmerashishkumarverma/Web-Broswer-Automation-to-send-email | /.gitignore/Sending Email Automation.py | UTF-8 | 9,106 | 2.75 | 3 | [] | no_license | import tkinter as tk # python 3
from tkinter import font as tkfont # python 3
import tkinter.messagebox
import time
import pyautogui as py
class SampleApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.title_font = tkfont.Font(family='Helvetica', size=18, weight="bold", slant="italic")
# the container is where we'll stack a bunch of frames
# on top of each other, then the one we want visible
# will be raised above the others
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
menubar = tk.Menu(container)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Save settings", command = lambda: popupmsg("Not supported just yet!"))
filemenu.add_separator()
filemenu.add_command(label="Exit", command=quit)
menubar.add_cascade(label="File", menu=filemenu)
self.frames = {}
for F in (StartPage, PageOne, PageTwo):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
# put all of the pages in the same location;
# the one on the top of the stacking order
# will be the one that is visible.
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("StartPage")
def show_frame(self, page_name):
'''Show a frame for the given page name'''
frame = self.frames[page_name]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
# label = tk.Label(self, text="This is the start page", font=controller.title_font)
# label.pack(side="top", fill="x", pady=10)
label = tk.Label(self, text='Welcome, To')
label.config(font=("Courier", 44))
label.place(x=20, y=20)
label.pack()
label2 = tk.Label(self, text='Ultimate Logger')
label2.config(font=("Courier", 44))
label2.place(x=20, y=50)
label2.pack()
label3 = tk.Label(self, text='Make Sure that your screen Resoulution is 1366x768')
label3.config(font=("Courier", 30))
label3.place(x=20, y=500)
label3.pack()
next_button = tk.Button(self, text='Next', command=lambda: controller.show_frame('PageOne'))
next_button.pack()
label4 = tk.Label(self, text='Made by Ashish', pady=500)
label4.config(font=("Courier", 20))
label4.pack()
# button1 = tk.Button(self, text="Go to Page One",
# command=lambda: controller.show_frame("PageOne"))
# button2 = tk.Button(self, text="Go to Page Two",
# command=lambda: controller.show_frame("PageTwo"))
# button1.pack()
# button2.pack()
def Run_fire():
pass
def askquestionchrome():
answer = tk.messagebox.askquestion('NOTE', 'Do You Have Any Attachments')
if answer == 'yes':
tk.messagebox.showinfo('READ', "when atuomation starts don't do anything to the computer when attachments folder open time span will be open for only 25 sec open your file and submit it and Make Sure your Window Has full size ")
Run_Chrome_attachment()
else:
tk.messagebox.showinfo('READ', "when atuomation starts don't do anything to the computer when attachments folder open time span will be open for only 25 sec open your file and submit it and Make Sure your Window Has full size")
Run_Chrome()
print(1366/2)
print(768/2)
class PageOne(tk.Frame):
def __init__(self, parent, controller):
global Name_entry
global password
global t
global s
global m
tk.Frame.__init__(self, parent)
self.controller = controller
name_label = tk.Label(self, text='Email')
name_label.config(font=("Courier", 30))
name_label.config(font=("Courier", 30))
name_label.grid(row=0, column=0)
Name_entry = tk.Entry(self)
Name_entry.grid(row=0, column=1)
passwordl= tk.Label(self, text='Password')
passwordl.config(font=("Courier", 30))
passwordl.grid(row=1, column=0)
password = tk.Entry(self)
password.grid(row=1, column=1)
to= tk.Label(self, text='To')
to.config(font=("Courier", 30))
to.grid(row=2, column=0)
t = tk.Entry(self)
t.grid(row=2, column=1)
subject= tk.Label(self, text='Subject')
subject.config(font=("Courier", 30))
subject.grid(row=3, column=0)
s = tk.Entry(self)
s.grid(row=3, column=1)
M= tk.Label(self, text='Message')
M.config(font=("Courier", 30))
M.grid(row=4, column=0)
m = tk.Entry(self)
m.grid(row=4, column=1)
button = tk.Button(self, text='Submit', command=lambda: controller.show_frame('PageTwo'))
button.grid(row=5, column=1)
button = tk.Button(self, text='Submit', command=lambda: controller.show_frame('PageTwo'))
button.grid(row=5, column=1)
# class Chrome:
# global Name_entry
#
# email = Name_entry.get()
#
def Run_Chrome_attachment():
# https://accounts.google.com/signin/v2/identifier?hl=en&service=local&flowName=GlifWebSignIn&flowEntry=ServiceLogin
global Name_entry
global password
global t
global s
global m
email = Name_entry.get()
passw = password.get()
To = t.get()
subject = s.get()
message = m.get()
py.click(25, 738)
time.sleep(1)
py.click(88, 696)
time.sleep(1)
py.typewrite('Google Chrome')
time.sleep(1)
py.click(111, 226)
time.sleep(20)
py.click(1349,46)
py.click(1164, 121)
time.sleep(5)
py.click(443,47)
py.typewrite('https://accounts.google.com/signin/v2/identifier?hl=en&service=local&flowName=GlifWebSignIn&flowEntry=ServiceLogin')
py.typewrite(['enter'])
time.sleep(10)
py.click(559,364)
py.typewrite(email)
time.sleep(0.1)
py.click(816,536)
time.sleep(10)
py.click(559,375)
py.typewrite(passw)
py.click(819,448)
time.sleep(10)
py.click(1217,94)
time.sleep(0.1)
py.click(1090,388)
time.sleep(10)
py.click(81, 189)
time.sleep(4)
py.click(909,316)
py.typewrite(To)
time.sleep(1)
py.click(880,357)
py.typewrite(subject)
time.sleep(1)
py.click(817,400)
py.typewrite(message)
time.sleep(1)
py.click(969,706)
time.sleep(25)
py.click(881,705)
def Run_Chrome():
# https://accounts.google.com/signin/v2/identifier?hl=en&service=local&flowName=GlifWebSignIn&flowEntry=ServiceLogin
global Name_entry
global password
global t
global s
global m
email = Name_entry.get()
passw = password.get()
To = t.get()
subject = s.get()
message = m.get()
py.click(25, 738)
time.sleep(1)
py.click(88, 696)
time.sleep(1)
py.typewrite('Google Chrome')
time.sleep(1)
py.click(111, 226)
time.sleep(20)
py.click(1349,46)
py.click(1164, 121)
time.sleep(5)
py.click(443,47)
py.typewrite('https://accounts.google.com/signin/v2/identifier?hl=en&service=local&flowName=GlifWebSignIn&flowEntry=ServiceLogin')
py.typewrite(['enter'])
time.sleep(10)
py.click(559,364)
py.typewrite(email)
time.sleep(0.1)
py.click(816,536)
time.sleep(10)
py.click(559,375)
py.typewrite(passw)
py.click(819,448)
time.sleep(10)
py.click(1217,94)
time.sleep(0.1)
py.click(1090,388)
time.sleep(10)
py.click(81, 189)
time.sleep(4)
py.click(909,316)
py.typewrite(To)
time.sleep(1)
py.click(880,357)
py.typewrite(subject)
time.sleep(1)
py.click(817,400)
py.typewrite(message)
time.sleep(1)
py.click(881,705)
# button = tk.Button(self, text="Go to the start page",
# command=lambda: controller.show_frame("StartPage"))
# button.pack()
class PageTwo(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
label = tk.Label(self, text="Choose Your WebBroswer")
label.place(x=100, y=100)
label.config(font=("Courier", 30))
label.pack()
Chrome = tk.Button(self, text='Chrome', command=askquestionchrome)
Chrome.pack()
if __name__ == "__main__":
app = SampleApp()
app.geometry('1366x768')
app.mainloop()
| true |
16efb42e59f286c4c33d2ef5035e7b59eadf884f | Python | YorkFish/learning_notes | /Souvenir/Python2/05_note.py | UTF-8 | 302 | 3.59375 | 4 | [] | no_license | # -*-coding:utf-8-*-
# 1.
s1 = "C:\newPython\bin"
s2 = r"C:\newPython\bin"
print s1
print s2
# 2.
s3 = u"spend"
print s3
print "type(s3) = %s" % type(s3)
# 3.
s4 = "gooD oF boY"
print s4
print s4.lower()
print s4.upper()
print s4.swapcase() # 大小写互换
print s4.capitalize()
print s4.title()
| true |
db14430f195c94e3463fcfa49f6ab56c321b6633 | Python | MurthyVemuri/Solutions---While-SEF | /Problem28.py | UTF-8 | 226 | 3.203125 | 3 | [] | no_license | def compute(m):
ans = 1
ans = 4*(m ** 3) + 3*(m ** 2) + 8*m - 9;
return str(ans/6)
if __name__ == "__main__":
count = int(input())
for i in range(count):
size = int(input())
print(compute(size))
| true |
024220232371ae5bd19d53ec8412a2f3ba7b337d | Python | oliver-soeser/advent-of-code-2020-python-solutions | /4/4_p1.py | UTF-8 | 552 | 3.234375 | 3 | [] | no_license | f = open("input.txt", "r")
passports = [{}]
i = 0
for line in f:
if line != "\n":
for item in line.split():
passports[i][item.split(":")[0]] = item.split(":")[1]
else:
passports.append({})
i += 1
n = 0
for j in range(len(passports)):
try:
print(passports[j]["byr"])
print(passports[j]["iyr"])
print(passports[j]["eyr"])
print(passports[j]["hgt"])
print(passports[j]["hcl"])
print(passports[j]["ecl"])
print(passports[j]["pid"])
#print(passports[j]["cid"])
n += 1
print("Valid!")
except:
print("Invalid!")
print(n) | true |
e281530182b6d86ae8a0feaf7b1143b7a81f86e8 | Python | cnlong/everyday_python | /22_File-Backup/code/test1 (2).py | UTF-8 | 665 | 2.71875 | 3 | [] | no_license | from PIL import Image, ImageDraw, ImageFont, ImageFilter
import string
import random
back = Image.new("RGB", [140, 50], "gray")
back.save("vcode.jpg")
im = Image.open("vcode.jpg")
myfont = ImageFont.truetype(r'C:\Windows\Fonts\Arial.ttf', size=40)
draw = ImageDraw.Draw(im)
num = list()
for i in range(4):
word = random.choice(string.ascii_uppercase)
num.append(word)
# print(num)
draw.text((10,5), str(num[0]), font=myfont, fill="red")
draw.text((40,5), str(num[1]), font=myfont, fill="red")
draw.text((70,5), str(num[2]), font=myfont, fill="red")
draw.text((100,5), str(num[3]), font=myfont, fill="red")
vcode = im.filter(ImageFilter.BLUR)
vcode.show() | true |
61ad4f33dbee0b9c59b64b4fe4bab622d4891dff | Python | Marteinn-Vidir/Assignment-5 | /sequence.py | UTF-8 | 543 | 3.953125 | 4 | [] | no_license | # A program that askes for a length of sequence
# the higher length of sequnence the more number will print out
n = int(input("Enter the length of the sequence: ")) # Do not change this line
tala_1 = 1
tala_2 = 2
tala_3 = 3
for i in range(1, n+1):
if i == 1:
print(tala_1)
elif i == 2:
print(tala_2)
elif i == 3:
print(tala_3)
else:
tala_summa = tala_1 + tala_2 + tala_3
tala_1 = tala_2
tala_2 = tala_3
tala_3 = tala_summa
print(tala_summa)
| true |
d89a051651d9ddbde1966c30a6a99fe25b256b4d | Python | RuchitaD1/CrawlerIndexer | /bs.py | UTF-8 | 863 | 2.671875 | 3 | [] | no_license | import urllib2
from bs4 import BeautifulSoup
import re
import requests
import sys
reload(sys)
rem=[]
sys.setdefaultencoding('utf8')
def scape(urlText):
outlinks=[]
url = urllib2.urlopen(urlText).read()
soup = BeautifulSoup(url,features="html.parser")
for line in soup.find_all('a'):
link=str(line.get('href'))
rem.append(re.compile('^(file|ftp|mailto):'))
rem.append(re.compile('.*(/[^/]+)/[^/]+\1/[^/]+\1/'))
rem.append(re.compile('[?*!@=]'))
rem.append(re.compile("(?i)\.(gif|jpg|png|ico|css|sit|eps|wmf|zip|ppt|mpg|xls|gz|rpm|tgz|mov|exe|jpeg|bmp|js)$"))
rem.append(re.compile('(?::\d+)?(?:/|$)'))
rem.append(re.compile('(?:/|$)'))
for exp in rem:
if re.match(exp,link):
pass
else:
outlinks.append(link)
return outlinks | true |
fe759cee2c358977bcf6268eb3485bfca0935fa7 | Python | lericson/irken | /irken/base.py | UTF-8 | 4,008 | 3.21875 | 3 | [] | no_license | import logging
from irken.nicks import Mask
from irken.parser import parse_line, build_line
logger = logging.getLogger("irken.base")
class BaseConnection(object):
"""Very basic connection.
This essentially knows how to parse IRC data and build lines. It doesn't
know /how/ to send, or how to dispatch, and so on, but it does know that
it should send etc.
"""
def __init__(self, nick):
self.io = self.make_io()
self.nick = nick
self._prefix_cache = {}
@property
def mask(self):
return Mask(self.nick)
def __eq__(self, other):
if hasattr(other, "nick"):
return self.nick == other.nick
return NotImplemented
def connect(self, *args, **kwds):
"""Connect to something. This is outsourced to io."""
return self.io.connect(*args, **kwds)
def parse_line(self, line):
return parse_line(line)
def build_line(self, prefix, command, args):
return build_line(prefix, command, args)
def send_cmd(self, prefix, command, args):
"""Send an IRC command."""
line = self.build_line(prefix, command, args)
logger.debug("send " + repr(line))
self.io.deliver(line + "\r\n")
def recv_cmd(self, prefix, command, args):
"""Receive an IRC command."""
raise NotImplementedError("dispatch mixin")
def run(self, *args, **kwds):
kwds.setdefault("consumer", self.consume)
return self.io.run(*args, **kwds)
def consume(self, data):
"""Consume every line in string *data*, returning any incomplete data
found.
This really just iterates through each line, parses it and calls
`self.recv_cmd`.
"""
lines = data.replace("\r\n", "\n").replace("\r", "\n").split("\n")
trail = lines.pop()
for line in lines:
logger.debug("recv " + repr(line))
self.recv_cmd(*self.parse_line(line))
return trail
def lookup_prefix(self, prefix):
"""Turn *prefix* into an actual source with similar behavior to this
instance itself.
This default implementation does nothing smart, it's more of a factory
with a cache than anything else. (Which means it wastes memory, yeah.)
RemoteSource is used for unknowns, which is created through
self.make_source. (So look there if you want to change stuff.)
>>> from irken.tests import TestConnection
>>> bc = TestConnection("self")
>>> bc.lookup_prefix(("other",))
<RemoteSource ('other',)>
Though, if the mask looked up matches the instance's own nickname, then
self is returned:
>>> bc.lookup_prefix(("self",)) is bc
True
This is actually used for channels and other things as well (server
names most notably), and a "source" is a very vague term by intent.
>>> bc.lookup_prefix(("#im.a.channel",))
<RemoteSource ('#im.a.channel',)>
Regularly, this will be running on mask instances:
>>> from irken.nicks import Mask
>>> bc.lookup_prefix(Mask.from_string("self!foo@bar")) is bc
True
"""
# TODO There really should be some eviction strategy for entries in the
# cache, but hey... Realistically, I can leak that memory.
cache = self._prefix_cache
key = prefix[0] if prefix else prefix
if key == self.nick:
return self
if key not in cache:
value = cache[key] = self.make_source(prefix)
return value
return cache[key]
def make_source(self, prefix):
return RemoteSource(prefix)
class RemoteSource(object):
def __init__(self, mask):
self.mask = mask
# Could be a property, but it isn't.
self.nick = mask[0] if mask else mask
def __repr__(self):
return "<RemoteSource %r>" % (self.mask,)
if __name__ == "__main__":
import doctest
doctest.testmod()
| true |
da7dbf614aa62fbd6babdd67fb906a7a5858b7be | Python | ik486/kivy_examples | /basic/03.scatter/bad/z.py | UTF-8 | 1,099 | 2.890625 | 3 | [] | no_license |
from kivy.uix.image import Image
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scatter import Scatter
from kivy.uix.scatterlayout import ScatterLayout
import random
class MyImage(Scatter):
def __init__(self, no, **kwargs):
super(MyImage, self).__init__(**kwargs)
self.no = no
self.image = Image(source="bluerose.png")
self.add_widget(self.image)
self.size_hint = None, None
self.width = 140
self.height = 140
self.pos = 400-70+random.randint(-200,200), 300-70+random.randint(-150,150)
def on_touch_down(self, touch):
if self.image.collide_point( *touch.pos):
print self.no
else:
print "failed", self.no
return super(MyImage, self).on_touch_down(touch)
class MyBigImage(FloatLayout):
def __init__(self, **kwargs):
super(MyBigImage, self).__init__(**kwargs)
for i in range(20):
image = MyImage(i)
self.add_widget(image)
if __name__ == '__main__':
from kivy.app import App
class TestApp(App):
def build(self):
self.root = MyBigImage()
return self.root
app = TestApp()
app.run()
| true |
af5bc8abf5b251d324eedb171916e67a6fbe0eb8 | Python | huvers/whaleID | /move_files.py | UTF-8 | 1,306 | 3.015625 | 3 | [] | no_license | __author__ = 'Sean Huver'
import os
import sys
import csv
# train.csv should be in same directory as python file. User points to img directory.
def main():
if len(sys.argv) < 2:
print "No folder given to create image list."
print "Usage: python list_images.py [directory]"
sys.exit()
target_dir = sys.argv[1]
with open('train.csv') as csvfile:
reader = csv.DictReader(csvfile)
tempID = 'temp'
for row in reader:
print row['whaleID']
mypath = target_dir + '/' + row['whaleID']
if not os.path.isdir(mypath):
if tempID != row['whaleID']: # if new whaleID - Make whaleID folder
if not os.path.isdir(mypath): # if dir doesn't exist, make it
os.makedirs(mypath)
print row['Image'], mypath + '/' + row['Image']
os.rename(target_dir + '/' + row['Image'], mypath + '/' + row['Image'])
tempID = row['whaleID']
else: # else move image to folder that exists
if os.path.isfile(target_dir + '/' + row['Image']): # if image exists, move it
os.rename(target_dir + '/' + row['Image'], mypath + '/' + row['Image'])
if __name__ == "__main__":
main()
| true |
9886ee65d563d3314bdc8c5fd63736ede77deb29 | Python | mandgerohit/riot_predictor | /rfmodel.py | UTF-8 | 2,007 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 28 00:11:43 2016
@author: Annie Tran
"""
#random forest
from sklearn.ensemble import RandomForestClassifier
from numpy import genfromtxt, savetxt
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
xl = pd.ExcelFile('/Users/isarasuntichotinun/Desktop/ANNIE/CSC 591/riot_predictor/completedata.xlsx')
xl.sheet_names
fulldata=xl.parse('Sheet1')
fulldata = pd.DataFrame(fulldata)
fulldata = fulldata.reset_index(drop=True)
fulldata = fulldata.drop('locality',1)
fulldata = fulldata.drop('country',1)
fulldata = fulldata.drop('Duration',1)
#fulldata['violentce rating']=map(lambda x: 2 if x < 0 else 1, fulldata['violentce rating'])
fulldata['target'] = pd.Categorical(fulldata['target'])
fulldata['npart'] = pd.Categorical(fulldata['npart'])
fulldata['issue'] = pd.Categorical(fulldata['issue'])
fulldata['riot'] = pd.Categorical(fulldata['riot'])
fulldata['crime rate'] = pd.to_numeric(fulldata['crime rate'])
fulldata['deaths'] = pd.to_numeric(fulldata['deaths'])
fulldata['violentce rating'] = pd.to_numeric(fulldata['violentce rating'])
#fulldata['violentce rating'] = pd.Categorical(fulldata['violentce rating'])
#randomize the rows, frac tells it to return all rows
fulldata = fulldata.sample(frac=1).reset_index(drop=True)
train = fulldata.iloc[0:4303]
test = fulldata.iloc[4304:]
#fit random forest
rf = RandomForestClassifier(n_estimators=100)
rf_fit = rf.fit(train.iloc[0::,0:6],train.iloc[0::,6])
#test on test data
output = rf.predict(test.iloc[0::,0:6])
outputprob = rf.predict_proba(test.iloc[0::,0:6])
df_confusion = pd.crosstab(test.iloc[0::,6], output)
df_confusion / df_confusion.sum(axis=1)
accuracy_score(test.iloc[0::,6], output)
testvalues=[[16.2, 2, 0, 1, -2.94544,10]] #remeber to do double brackets
rf.predict(testvalues)
rf.predict_proba(testvalues)
#save model
import pickle
f = open('rfmodel.pickle','wb')
pickle.dump(rf_fit, f)
f.close()
| true |
7ec61ce074cb3e430ce04a21b1b688a9441e1b23 | Python | yuriks/OpenVikings | /tools/lvtools/world.py | UTF-8 | 1,592 | 3.015625 | 3 | [] | no_license | from util import read_word, take_n
class ObjectType(object):
ENTRY_SIZE = 0x15
def __init__(self, data):
self.gfx_chunk_id = read_word(data, 0x0)
self.num_sprites = data[0x2]
self.script_entry_point = read_word(data, 0x3)
self.unk_5 = read_word(data, 0x5)
self.unk_7 = read_word(data, 0x7)
self.width = data[0x9]
self.height = data[0xA]
self.unk_B = read_word(data, 0xB)
self.unk_D = read_word(data, 0xD)
self.unk_F = read_word(data, 0xF)
self.max_x_velocity = read_word(data, 0x11)
self.max_y_velocity = read_word(data, 0x13)
format_str = (
"""Graphics Chunk: {0.gfx_chunk_id:04X}h
Number of sprites: {0.num_sprites}
Script entry point: {0.script_entry_point:04X}h + 3
unk_5: {0.unk_5:X}h
unk_7: {0.unk_7:X}h
Width: {0.width}
Height: {0.height}
unk_B: {0.unk_B:X}h
unk_D: {0.unk_D:X}h
unk_F: {0.unk_F:X}h
Max X velocity: {0.max_x_velocity}
Max Y velocity: {0.max_y_velocity}""")
def __str__(self):
return self.format_str.format(self)
def parse_object(world_data, obj_idx):
offset = obj_idx * ObjectType.ENTRY_SIZE
return ObjectType(world_data[offset:offset + ObjectType.ENTRY_SIZE])
def parse_objects(world_data, num_objects):
objects_data = world_data[:num_objects * ObjectType.ENTRY_SIZE]
return (ObjectType(obj_data) for obj_data in take_n(objects_data, ObjectType.ENTRY_SIZE))
def print_objects(world_data, num_objects):
for i, obj in enumerate(parse_objects(world_data, num_objects)):
print "=== Object type %Xh\n%s\n" % (i, obj)
| true |
23542d7e5f8b071bb9d491737df984791a3f0e9e | Python | developyoun/AlgorithmSolve | /solved/10844.py | UTF-8 | 357 | 2.953125 | 3 | [] | no_license | mod = 1000000000
dp = [[0]*10 for _ in range(101)]
for i in range(1, 10):
dp[1][i] = 1
for i in range(2, 101):
for j in range(10):
if j + 1 < 10:
dp[i][j+1] = (dp[i][j+1] + dp[i-1][j]) % mod
for j in range(10):
if 0 <= j - 1:
dp[i][j-1] = (dp[i][j-1] + dp[i-1][j]) % mod
print(sum(dp[int(input())])%mod) | true |
2e132d5a0aa707708127379020e2835a921aa4e7 | Python | jacebrowning/gridcommand | /app/enums.py | UTF-8 | 754 | 2.890625 | 3 | [] | no_license | from enum import Enum
class Color(Enum):
BLUE = "primary"
RED = "danger"
GREEN = "success"
YELLOW = "warning"
NONE = "dark"
@property
def key(self) -> str:
return self.name.lower()
@property
def title(self) -> str:
return self.name.title()
@property
def icon(self) -> str:
values = {
self.BLUE: "🟦",
self.RED: "🟥",
self.GREEN: "🟩",
self.YELLOW: "🟨",
self.NONE: "⬜",
}
return values[self] # type: ignore
class State(Enum):
UNKNOWN = None
READY = "ready"
PLANNING = "planning"
WAITING = "waiting"
@property
def title(self) -> str:
return self.name.title()
| true |
a3b5f0e7f1c40910f096c634d5c9c6d3461a3194 | Python | ramuklihtnes/sk | /natural.py | UTF-8 | 71 | 3.5 | 4 | [] | no_license | a=input()
b=int(a)
sum=0
for i in range(1,b+1):
sum=sum+i
print(sum)
| true |
331791b7f64fea144389c42977f26cfb7fb51fb0 | Python | YB947624487/RBAC | /s9day88/s9day88/app01/tests.py | UTF-8 | 653 | 3.53125 | 4 | [] | no_license | from django.test import TestCase
# Create your tests here.
# class A(object):
#
# x=12
#
# def xxx(self):
# print(self.x)
#
#
# class B(A):
# y=5
#
# b=B()
# b.xxx()
#######################################
#
# class Person(object):
# def __init__(self,name):
# self.name=name
#
# alex=Person("alex")
#
# s="name"
#
#
# print(getattr(alex,s))
########################################
class Person(object):
def __init__(self,name):
self.name=name
def eat(self):
print(self)
print("eat....")
# 实例方法
# egon=Person("egon")
# egon.eat()
# 函数
# Person.eat(123)
| true |
a9b11f238e06472141bc8d780dad7ddbbf91ffab | Python | wisest30/AlgoStudy | /source/leetcode/1632/hyoseong.py | UTF-8 | 1,038 | 2.609375 | 3 | [] | no_license | class Solution:
def matrixRankTransform(self, matrix: List[List[int]]) -> List[List[int]]:
n = len(matrix)
m = len(matrix[0])
G = [0] * (n + m)
G_max = [0] * (n + m)
def get_group(x) :
if G[x] == x :
return x
G[x] = get_group(G[x])
return G[x]
def merge_group(x, y) :
gx, gy = get_group(x), get_group(y)
G_max[gy] = max(G_max[gx], G_max[gy])
G[gx] = gy
D = defaultdict(list)
for i in range(n) :
for j in range(m) :
D[matrix[i][j]].append([i, j])
ret = [[0] * m for _ in range(n)]
rank = [0] * (n + m)
for key in sorted(D.keys()) :
G = list(range(n + m))
G_max = rank[:]
for i, j in D[key] :
merge_group(i, n + j)
for i, j in D[key] :
rank[i] = rank[n + j] = ret[i][j] = G_max[get_group(i)] + 1
return ret
| true |
0172630f88d2c4d5dca806b41db67d6248d5fa68 | Python | anorak6435/CStupid | /cstupid/cststatemachine.py | UTF-8 | 772 | 3.3125 | 3 | [
"LicenseRef-scancode-other-permissive",
"MIT"
] | permissive | # setting up a state machine that could handle parsing a token stream into a syntax tree. following a grammar
from typing import Tuple
from .statemachine import State
# the machine that uses the grammar to parse language rules
class CstMachine:
def __init__(self, grammar : str = "") -> None:
self.grammar = grammar
self.state = Start()
def on_event(self, event):
"""
incomming tokens from the grammar are given as events
to the state machine to be handled.
"""
self.state.on_event(event)
def run_grammar(self):
# run the grammar from the language to change the state of the machine
pass
# The states the machine can be in.
class Start(State):
def __init__(self):
pass | true |
5f47195c624d8b84b2ae578b36323c45cefe58fe | Python | tanle2694/pattern_recognition | /python_script/fig_1_16.py | UTF-8 | 849 | 3.203125 | 3 | [] | no_license |
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
from utils import arrowed_spines
def main():
x = np.linspace(-2, 3, 1000)
y = x**3
plt.xlim(-2, 3)
plt.ylim(-5, 20)
plt.axvline(1.5, 0, 1, color='black')
plt.axhline(y= 1.5**3, xmax=3.5/5, linestyle='--', color='g')
y_gaussian = np.linspace(-5, 20, 1000)
x_gaussian = stats.norm(1.5**3, 1).pdf(y_gaussian) + 1.5
plt.plot(x_gaussian, y_gaussian, color='b')
plt.annotate('$y(x_0, w)$', xy=(-2.5, 1.5**3), annotation_clip=False)
plt.annotate("y(x, w)", xy=(2, 18))
plt.annotate("$p(t|x_0,w,\\beta)$", xy=(1.8, 2))
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
arrowed_spines(plt.gca())
plt.plot(x, y, color='r')
plt.show()
if __name__ == "__main__":
main() | true |
c6c907906b960f4d6982168c553bc238cc65164a | Python | nwaggoner16/flask_planter | /PlanterPi/db.py | UTF-8 | 8,736 | 2.65625 | 3 | [] | no_license | import mysql.connector as mariadb
import configparser
import numpy
#from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
#from matplotlib.figure import Figure
#import io
#Initializing config object
config = configparser.ConfigParser()
config.read("/Users/nathanwaggoner/PlanterPi/config.ini")
#Getting db connection info
host = config.get('credentials','host')
user = config.get('credentials','user')
passwd = config.get('credentials','passwd')
db = config.get('credentials','db')
#Initializing db connection
mysql = mariadb.connect(user=user, password=passwd, database=db)
cursor = mysql.cursor()
class planter():
def __init__(self, planter_name):
#Setting planter variables
self.planter_header = self.get_planter_header_data(planter_name)
self.planter_name = planter_name
self.planter_id = self.planter_header[0]
self.cons_days_watered = self.planter_header[2]
#self.times_watered = self.planter_header[3]
self.planter_sensor_data = self.get_planter_sensor_data(self.planter_id)
self.run_speed = self.planter_header[4]
self.run_time = self.planter_header[5]
self.target_moisture = self.planter_header[6]
def get_planter_header_data(self, planter_name):
#Getting planter header info from planter table using planter_name
temp_query = "SELECT planter_id, planter_name, cons_days_watered, run_speed, run_time, target_moisture FROM PlanterPi.planter WHERE planter_name = '{}';".format(planter_name)
cursor.execute(temp_query)
#Declaring numpy array to store header info
planter_header = []
for arr in cursor:
planter_header = arr
planter_header = numpy.array(planter_header)
return planter_header;
def get_planter_sensor_data(self, planter_id):
#Getting planter sensor data
sensor_data_query = "SELECT planter_id, log_id, log_time, temperature, humidity, light, soil_temperature, soil_moisture FROM PlanterPi.sensor_data WHERE planter_id = {};".format(planter_id)
cursor.execute(sensor_data_query)
#Storing sensor data into numpy array
planter_sesnor_data = []
for row in cursor:
planter_sesnor_data.append(row);
planter_array_np = numpy.array(planter_sesnor_data)
return planter_array_np
def avg_moisture(self, hours=24):
moisture_sum = 0
moisture_avg = 0
last_hours_m = self.planter_sensor_data[-hours:]
last_hours_m_np = numpy.array(last_hours_m)
for p in last_hours_m_np:
moisture_sum += p[7];
moisture_avg = moisture_sum/hours
return moisture_avg
def avg_temperature(self, hours=24):
temperature_sum = 0
temperature_avg = 0
last_hours_temp = self.planter_sensor_data[-hours:]
last_hours_temp_np = numpy.array(last_hours_temp)
for p in last_hours_temp_np:
temperature_sum += p[7];
temperature_avg = temperature_sum/hours
return temperature_avg
def avg_humidity(self, hours=24):
humidity_sum = 0
humidity_avg = 0
last_hours_h = self.planter_sensor_data[-hours:]
last_hours_h_np = numpy.array(last_hours_h)
for p in last_hours_h_np:
humidity_sum += p[7];
humidity_avg = humidity_sum/hours
return humidity_avg
def update_cons_days_watered(self, days):
cursor.execute('update planter set cons_days_watered = {} where planter_id = {}'.format(days, self.planter_id))
mysql.commit()
def water_logging(self, run_time, run_speed):
cursor.execute('insert into PlanterPi.water_log (planter_id, run_time_sec, run_speed) values ({}, {}, {})'.format(self.planter_id, run_time, run_speed))
mysql.commit()
class planter_by_id():
def __init__(self, planter_id):
#Setting planter variables
self.planter_header = self.get_planter_header_data(planter_id)
self.planter_name = self.planter_header[1]
self.planter_id = planter_id
self.cons_days_watered = self.planter_header[2]
#self.times_watered = self.planter_header[3]
self.planter_sensor_data = self.get_planter_sensor_data()
self.run_speed = self.planter_header[3]
self.run_time = self.planter_header[4]
self.target_moisture = self.planter_header[5]
def get_planter_header_data(self, planter_id):
#Getting planter header info from planter table using planter_name
temp_query = "SELECT planter_id, planter_name, cons_days_watered, run_speed, run_time, target_moisture FROM PlanterPi.planter WHERE planter_id = {};".format(planter_id)
cursor.execute(temp_query)
#Declaring numpy array to store header info
planter_header = []
for arr in cursor:
planter_header = arr
planter_header = numpy.array(planter_header)
return planter_header;
def get_planter_sensor_data(self):
#Getting planter sensor data
sensor_data_query = "SELECT planter_id, log_id, log_time, temperature, humidity, light, soil_temperature, soil_moisture FROM PlanterPi.sensor_data WHERE planter_id = {};".format(self.planter_id)
cursor.execute(sensor_data_query)
#Storing sensor data into numpy array
planter_sesnor_data = []
for row in cursor:
planter_sesnor_data.append(row);
planter_array_np = numpy.array(planter_sesnor_data)
return planter_array_np
def avg_moisture(self, hours=24):
moisture_sum = 0
moisture_avg = 0
last_hours_m = self.planter_sensor_data[-hours:]
last_hours_m_np = numpy.array(last_hours_m)
for p in last_hours_m_np:
moisture_sum += p[7];
moisture_avg = moisture_sum/hours
return moisture_avg
def avg_temperature(self, hours=24):
temperature_sum = 0
temperature_avg = 0
last_hours_temp = self.planter_sensor_data[-hours:]
last_hours_temp_np = numpy.array(last_hours_temp)
for p in last_hours_temp_np:
temperature_sum += p[3];
temperature_avg = temperature_sum/hours
return temperature_avg
def avg_humidity(self, hours=24):
humidity_sum = 0
humidity_avg = 0
last_hours_h = self.planter_sensor_data[-hours:]
last_hours_h_np = numpy.array(last_hours_h)
for p in last_hours_h_np:
humidity_sum += p[4];
humidity_avg = humidity_sum/hours
return humidity_avg
def avg_light(self, hours=24):
light_sum = 0
light_avg = 0
last_hours_h = self.planter_sensor_data[-hours:]
last_hours_h_np = numpy.array(last_hours_h)
for p in last_hours_h_np:
light_sum += p[5];
light_avg = light_sum/hours
return light_avg
def avg_soil_temperature(self, hours=24):
soil_temperature_sum = 0
soil_temperature_avg = 0
last_hours_h = self.planter_sensor_data[-hours:]
last_hours_h_np = numpy.array(last_hours_h)
for p in last_hours_h_np:
soil_temperature_sum += p[6];
soil_temperature_avg = soil_temperature_sum/hours
return soil_temperature_avg
def update_cons_days_watered(self, days):
cursor.execute('update planter set cons_days_watered = {} where planter_id = {}'.format(days, self.planter_id))
mysql.commit()
def water_logging(self, run_time, run_speed):
cursor.execute('insert into PlanterPi.water_log (planter_id, run_time_sec, run_speed) values ({}, {}, {})'.format(self.planter_id, run_time, run_speed))
mysql.commit()
def update_run_time(self, rt):
cursor.execute('update planter set run_time = {} where planter_id = {}'.format(rt, self.planter_id))
self.run_time = rt
def update_run_speed(self, rs):
cursor.execute('update planter set run_speed = {} where planter_id = {}'.format(rs, self.planter_id))
def plot_moisture_data(self, hours):
plot_array_time = []
cursor.execute('select log_time from sensor_data where planter_id = {} order by log_time desc limit={};'.format(self.planter_id, hours))
for n in cursor:
plot_array_time.append(n[0])
plot_array_moisture = []
cursor.execute('select soil_moisture from sensor_data where planter_id = {} order by log_time desc limit={};'.format(self.planter_id, hours))
for n in cursor:
plot_array_moisture.append(n[0])
return (plot_array_time), (plot_array_moisture)
def plot_moisture_data_test(self, hours):
plot_array_time = []
plot_array_moisture=[]
plot_var_time = self.planter_sensor_data[-hours:]
#plot_array_moisture = self.planter_sensor_data[-hours:]
for pl in plot_var_time:
plot_array_time.append(pl[2])
plot_array_moisture.append(pl[7])
return (plot_array_time), (plot_array_moisture)
def get_log_times(self):
return self.planter_sensor_data[:, 2]
def get_soil_moisture(self):
return self.planter_sensor_data[:, 7]
def get_temperature(self):
return self.planter_sensor_data[:, 3]
def get_soil_temperature(self):
return self.planter_sensor_data[:, 6]
class general_data():
def get_planter_names(self):
planter_name_arr = []
cursor.execute('select distinct cast(planter_id as varchar(50)), planter_name from planter order by planter_name asc;')
for n in cursor:
planter_name_arr.append(n)
return planter_name_arr
# DEBUG
rongo = planter_by_id(2)
print(rongo.get_soil_moisture())
#debug_ob = general_data()
#print(debug_ob.get_planter_names())
| true |
1a514b0f15aabc0a42456e18375a795e22a0971e | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2809/60724/277575.py | UTF-8 | 274 | 2.828125 | 3 | [] | no_license | n=int(input())
num=input().split()
num=[int(i) for i in num]
chang=[]
kuan=num.copy()
res=[]
for i in range(n-1):
chang.append(num[i])
kuan.remove(num[i])
changlen=sum(chang)
kuanlen=sum(kuan)
res.append(changlen*changlen+kuanlen*kuanlen)
print(max(res)) | true |
4c256929ad71c406ee7deb76762ddc8ff6e3e0bd | Python | matthew-cheney/kattis-solutions | /solutions/chanukah.py | UTF-8 | 124 | 3.046875 | 3 | [] | no_license | P = int(input())
for p in range(P):
I, N = [int(ea) for ea in input().split(' ')]
print(I, sum(range(N+1)) + N) | true |
1c7d448b9248c1cca80d8c551ccd51af4faef0d3 | Python | johnrest/lf | /lf.py | UTF-8 | 1,765 | 3.109375 | 3 | [] | no_license | import os
import glob
import sys
if (len(sys.argv) < 2) or (len(sys.argv) > 4) or (sys.argv[1].lower() == "--help"):
print("""
List Files (lf) tool.
Python script that recursively lists all
subdirectories and files in a main directory.
The representation is display in the console or written to a
file
Usage:
python lf.py --help
python lf.py dir [-f out_filename]
""")
sys.exit()
target_dir = os.path.normpath(os.path.abspath(sys.argv[1]))
nodes = glob.glob(os.path.join(target_dir, "**"), recursive=True)
all_dirs = [target_dir]
folder_sym = "□ "
out = str()
out += folder_sym + target_dir + ":\n"
buffer_parent_dir = target_dir
for node in nodes[1:]:
if os.path.isdir(node):
parent_dir, child_dir = os.path.split(node)
if len(buffer_parent_dir) < len(parent_dir):
all_dirs.append(parent_dir)
buffer_parent_dir = parent_dir
indent_carry = all_dirs.index(parent_dir)+1
out += (indent_carry-1)*"\t" + "└" + 3*"-" + folder_sym + child_dir + "\n"
if os.path.isfile(node):
parent_dir, child_file = os.path.split(node)
if len(buffer_parent_dir) < len(parent_dir):
all_dirs.append(parent_dir)
buffer_parent_dir = parent_dir
indent_carry = all_dirs.index(parent_dir)+1
out += (indent_carry-1)*"\t" + "└" + 3*"-" + child_file + "\n"
if (len(sys.argv) == 2):
print(out)
if (len(sys.argv) > 2) and (sys.argv[2].lower() == "-f"):
filename = sys.argv[3]
with open( os.path.join(target_dir, filename), "w", encoding='utf-8') as f:
f.write(out)
print("Output to file: ", filename)
| true |
5f03ed2c9bffcde2b44188286d8b241e56dc6c97 | Python | emathian/Recuit | /recuit.py | UTF-8 | 19,708 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import division
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from math import sqrt
from math import exp, expm1 , log
from numpy import random
import pandas as pd
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
import random
from scipy.stats import chisquare
from scipy.stats import ttest_ind
from scipy.stats import t
import statistics as s
np.random.seed()
random.seed()
def f_1 (x):
return x**4 -x**3 -20*x**2 + x +1
def g (x,y):
return x**4 -x**3 -20*x**2 + x +1 + y**4 -y**3 -20*y**2 + y +1
def recuit_f1 ( xd,xp,t0, k, kp , tmax , A_rate_max ): # t => time and T=> Temperature
#xd ,xp,
#x=[x0]
x0 =random.uniform( xd, xp )
print(x0)
x=[x0]
f=[f_1(x0)]
t=t0
T = 1/t0
A_rate = 1 # suppose qu'on commence par réduire la fonction de cout
prop_accep = []
while t < tmax and A_rate > A_rate_max : # rmq nb max iter implique une condition sur la fct de cout
xx = x[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1 )
ff = f_1(xx)
if ff < f[-1]:
x.append(xx)
f.append(ff)
else :
P = kp * exp( -1 / (1000*T))
prop_accep.append(P)
if random.uniform(0,1) < P:
x.append(xx)
f.append(ff)
t+=1 # Pas de convergence
T = 1 / t
A_rate = len(x)/t # nombre de mouvement réellement effectué par rapport au nombre d'iéttération total
return x,f,t, prop_accep
def recuit_f1_p (xp, xd, t0, k, kp, tmax , A_rate_max, m ): # t => time and T=> Temperature
x0 =random.uniform( xd, xp )
prop_accep = []
x=[x0]
#print('x0 ', x0)
f=[f_1(x0)]
t=t0
T = 1/t0
A_rate = 1 # suppose qu'on commence par réduire la fonction de cout
while t < tmax : # rmq nb max iter implique une condition sur la fct de cout #and A_rate > A_rate_max
## palier
S = 0
for i in range(m):
xc= x[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1)
#print('xc', xc, 'f_1(xc)', f_1(xc) ,'f_1(x[-1])', f_1(x[-1]) )
S+= f_1(xc) - f_1(x[-1])
#print('S', S )
DE = 1/m * S
#print('DE', DE)
xx = x[-1] +np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1 )
ff = f_1(xx)
if ff < f[-1]:
x.append(xx)
f.append(ff)
else :
P = kp * exp( -DE / (1000*T))
prop_accep.append(P)
if random.uniform(0,1) < P:
x.append(xx)
f.append(ff)
t+=1 # Pas de convergence
T = 1 / t
#A_rate = len(x)/t # nombre de mouvement réellement effectué par rapport au nombre d'iéttération total
return x,f,t, prop_accep
def recuit_g (xd,xp, yd,yp, t0, k, kp , tmax , A_rate_max ): # t => time and T=> Temperature
x0 = random.uniform( xd, xp )
y0= random.uniform( yd, yp )
x=[x0]
y=[y0]
f=[g(x0,y0)]
t=t0
prop_accep = []
surface =[]
T = 1/t0
A_rate = 1 # suppose qu'on commence par réduire la fonction de cout
while t < tmax and A_rate > A_rate_max : # rmq nb max iter implique une condition sur la fct de cout
#print('DE', DE)
xx = x[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1 )
yy = y[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1 )
ff = g(xx,yy)
if ff < f[-1]:
x.append(xx)
y.append(yy)
f.append(ff)
else :
if random.uniform(0,1) < kp * exp( -1 / (1000*T)):
x.append(xx)
y.append(yy)
f.append(ff)
t+=1 # Pas de convergence
T = 1 / t
A_rate = len(x)/t # nombre de mouvement réellement effectué par rapport au nombre d'iéttération total
return x,y,f,t
def recuit_g_p (xd, xp , yd ,yp,t0, k, kp, tmax , A_rate_max, m ): # t => time and T=> Temperature
x0 = random.uniform( xd, xp )
y0= random.uniform( yd, yp )
x=[x0]
y=[y0]
f=[g(x0,y0)]
t=t0
T = 1/t0
A_rate = 1 # suppose qu'on commence par réduire la fonction de cout
while t < tmax and A_rate > A_rate_max : # rmq nb max iter implique une condition sur la fct de cout
## palier
S = 0
for i in range(m):
xc= x[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1)
yc= y[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1)
S+= g(xc, yc) - g(x[-1], y[-1])
DE = 1/m * S
xx = x[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1 )
yy = y[-1] + np.random.normal(0, sqrt(k* exp(-1/(1000*T))) ,1 )
ff = g(xx,yy)
if ff < f[-1]:
x.append(xx)
y.append(yy)
f.append(ff)
else :
if random.uniform(0,1) < kp * exp( -DE / (1000*T)):
x.append(xx)
y.append(yy)
f.append(ff)
t+=1 # Pas de convergence
T = 1 / t
A_rate = len(x)/t # nombre de mouvement réellement effectué par rapport au nombre d'iéttération total
return x,y,f,t
def stat(vp , F , n_rep , f , param , k_default, kp_default , tmax_default, sup_inf ):
#n_rep = 15
#kp = np.arange(0,1,0.02)
lvp = len(vp)
E =np.zeros((n_rep, lvp))
Min_Inf =[]
Min_sup =[]
for i in range(n_rep):
for j in range(lvp):
if f=='f' and param=='k':
#print('ok')
S = recuit_f1 ( -5, 5, 1, vp[j] , kp_default , tmax_default, 0.0001 )
E[i][j] = (abs(S[1][-1] -F))
elif f=='f' and param=='kp':
S = recuit_f1 ( -5, 5, 1, k_default , vp[j] , tmax_default, 0.0001 )
E[i][j] = (abs(S[1][-1] -F))
elif f=='g' and param=='k':
#print('ok')
S = recuit_g ( -3, 3, -3,3,1, vp[j], kp_default , tmax_default, 0.0001 )
E[i][j] = (abs(S[2][-1] -F))
#print(E[i][j])
elif f=='g' and param=='kp':
S = recuit_g ( -3, 3, -3,3,1, k_default, vp[j] , tmax_default, 0.0001 )
E[i][j] = (abs(S[2][-1] -F))
else :
return ('unknown function')
print(int(lvp/2))
n_col_score = int(lvp/2)
Score_inf = np.zeros((n_rep, n_col_score))
for i in range(n_rep):
for j in range(n_col_score):
if E[i][j] <2 and E[i][j]>-2 :
Score_inf[i][j]=1
Score_sup = np.zeros((n_rep, n_col_score))
for i in range(n_rep):
for j in range(n_col_score):
if E[i][j+n_col_score] <2 and E[i][j+n_col_score]>-2 :
Score_sup[i][j]=1
print('Score inf shape', np.shape(Score_inf))
Sum_score_inf = np.sum(Score_inf,axis=0)
Sum_score_sup = np.sum(Score_sup,axis=0)
# ## T test unilateral
# ## H0 : S_k>20 > S_k<20
mInf = s.mean(Sum_score_inf)
mSup = s.mean(Sum_score_sup)
vinf = s.pvariance(Sum_score_inf)
vsup = s.pvariance(Sum_score_sup)
if sup_inf == 0:
# sup > inf
T_test = (mSup - mInf )/ (sqrt(vinf/len(Sum_score_inf) + vsup/len(Sum_score_sup)))
if sup_inf == 1:
# inf > sup
T_test = (mInf - mSup )/ (sqrt(vinf/len(Sum_score_inf) + vsup/len(Sum_score_sup)))
pvB =2*( 1-(t.cdf(abs(T_test),len(Sum_score_inf) +len(Sum_score_sup) -2)) )
pvU = 1-(t.cdf(T_test,len(Sum_score_inf) +len(Sum_score_sup) -2))
print('Return Score inf' , Score_inf,
'Return Score inf' , Score_sup,
'Vecteur du nombre de succes sur n repetition partie inferieure : ', Sum_score_inf ,'\n',
'Vecteur du nombre de succes sur n repetition partie superieure : ', Sum_score_sup ,'\n',
'Moyenne de succes partie inferieure : ', mInf ,'\n',
'Moyenne de succes partie superieur : ', mSup ,'\n',
'Variance de succes partie inferieure : ', vinf ,'\n',
'Variance de succes partie superieur : ', vsup ,'\n',
'Statistique de student : ' , T_test,'\n',
'Pvalue bilaterale :' , pvB , '\n',
'Pvalue Unilateral :', pvU )
###############################################################################################
# MAIN #
###############################################################################################
Which_question = int(input('Which question ? '))
if Which_question==1:
X = np.arange(-6,6,0.1)
fig = plt.figure(1)
plt.plot(X, f_1(X))
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
print('min de f1 : ',min(f_1(X)))
print('min de f1 : ',(f_1(-2.823)))
if Which_question==2:
S = recuit_f1 ( -6, 6, 1,10, 0.5 , 10000, 0.0001 )
X = np.arange(-6,6,0.1)
# fig = plt.figure(1)
# plt.plot(S[0], S[1] , '-x', c= 'lightgrey')
# plt.plot(S[0][-1], S[1][-1] , 'x', c= 'c')
# plt.plot(X, f_1(X))
# plt.xlabel('x')
# plt.ylabel('f(x)')
print(S[0][-1], S[1][-1])
F = min(f_1(X))
# k = np.arange(0,100,0.5)
# L_sol_ecart =[]
# for i in range(len(k)):
# S = recuit_f1 ( -6, 6, 1,k[i], 0.5 , 10000, 0.0001 )
# E = (abs(S[1][-1] -F))
# L_sol_ecart.append(E)
# fig = plt.figure(2)
# plt.plot(k ,L_sol_ecart, 'x')
# plt.xlabel('k')
# plt.ylabel('| f(x_min_calcule) - f(x_opt)|')
# print(S[0][-1], S[1][-1])
# T = np.arange(1*10**-6 ,1/1000,0.00001)
# k = [1,5,10,15]
# fig = plt.figure(3)
# for i in k:
# f= i * np.exp(-1/(1000*T))
# plt.plot(T, f, c=cm.hot(i/15), label=i)
# plt.legend()
# plt.xlabel('T')
# plt.ylabel(' k.e^{-1/(1000*T)}')
# kp = np.arange(0,1,0.01)
# L_sol_ecart =[]
# for i in range(len(kp)):
# S = recuit_f1 ( -6, 6, 1,10, kp[i] , 10000, 0.0001 )
# E = (abs(S[1][-1] -F))
# L_sol_ecart.append(E)
# fig = plt.figure(4)
# plt.plot(kp ,L_sol_ecart, 'x')
# plt.xlabel('kP')
# plt.ylabel('| f(x_min_calcule) - f(x_opt)|')
# Tmax = np.arange(0,30000,100)
# L_sol_ecart =[]
# for i in range(len(Tmax)):
# S = recuit_f1 ( -6, 6, 1,10, 0.5 , Tmax[i], 10**-6 )
# E = (abs(S[1][-1] -F))
# L_sol_ecart.append(E)
# fig = plt.figure(5)
# plt.plot(Tmax ,L_sol_ecart, 'x')
# plt.xlabel('tmax')
# plt.ylabel('| f(x_min_calcule) - f(x_opt)|')
# plt.ylim((0,100))
# plt.show()
# print(S[0][-1], S[1][-1])
#print('k = np.arange(0,15,0.5)')
#print('F est le min global')
k = np.arange(0,30,0.5)
#kp = np.arange(0,1,0.02)
print(stat( k, F , 40,'g', 'k' , 10 , 0.5, 10000 ,0 ) )
#print( stat(kp, F ,40, 'f','kp' , 10 , 0.5, 10000 , 1))
if Which_question==3:
S = recuit_f1 ( -5,5, 1, 0.1, 0 , 20000, 0.0001 )
X = np.arange(-6,6,0.1)
fig = plt.figure(1)
plt.plot(S[0][1], S[1][1] , '-o' ,c='green')
plt.plot(S[0], S[1] , '-x' ,c='lightgrey')
plt.plot(S[0][-1], S[1][-1] , '-o' ,c='red')
plt.plot(S[0][1], S[1][1] , '-o' ,c='green')
plt.plot(X, f_1(X))
plt.xlabel('x')
plt.ylabel('f(x)')
plt.show()
if Which_question==4:
X = np.arange(-5,5,0.2)
Y = np.arange(-5,5,0.2)
X, Y = np.meshgrid(X, Y)
# print(S)
Z= g(X,Y)
M1 = g(3.548, 3.548)
M2 = g(3.548, -2.823)
M3 = g(-2.823,3.548)
M4 = g(-2.823,-2.823)
F= Z.min()
# print(F)
# print(M1 , M2 , M3 , M4)
succes = 0
for i in range (40):
S = recuit_g ( -1, 1, -1,1 ,1, 15, 0.1 , 20000, 0.0001 )
if abs(S[2][-1] - F) < 2 :
succes += 1
print('NOmbre de succes ' , succes)
#S = recuit_g (0, 0, 1, 1, 0.001 , 10000 , 0.0001 )
# fig = plt.figure() #opens a figure environment
# ax = fig.gca(projection='3d') #to perform a 3D plot
# ax.scatter(3.548,3.548,M1, '-o',c='red', s=200,label='(x,y,z)=(3.548,3.548, %d)' %M1)
# ax.scatter(3.548,-2.823,M2,'-o' ,c='pink', s=200,label='(x,y,z)=(3.548,-2.823, %d)' %M2)
# ax.scatter(-2.823,3.548,M3,'-o' ,c='orange', s=200,label='(x,y,z)=(-2.823,3.548, %d)' %M3)
# ax.scatter(-2.823,-2.823,M4,'-o' ,c='yellow', s=200,label='(x,y,z)=(-2.823,-2.823, %d)' %M4)
# ax.scatter(S[0][-1], S[1][-1],S[2][-1], '-o' ,c='green', s=200,label='(x,y,z)=(-2.823,-2.823, %d)' %M4)
# surf = ax.plot_wireframe(X, Y, Z, rstride=5, cstride=5) #plot definition and options
# ax.plot(S[0],S[1],S[2] ,c='yellow')
# plt.legend()
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
##k = np.arange(0,30,0.5)
##kp = np.arange(0.5,1,0.02)
# print('kp = np.arange(0,1,0.02)')
########## print(stat( k, F , 14,'g', 'k' , 10 , 0.5, 10000 ,0 ) )
##print( stat(k, F ,40, 'f','k' , 10 , 0.5, 10000 , 1))
######### k test ########################################
# k = np.arange(0,30,0.5)
# E =np.zeros((15, len(k)))
# K = []
# Min_Inf =[]
# Min_sup =[]
# for i in range(15):
# for j in range(len(k)):
# S = recuit_g ( -3, 3, -3,3,1,k[j], 0.5 , 10000, 0.0001 )
# e = (abs(S[2][-1] -F))
# K.append(k[j])
# E[i][j] = e
# print('len de k',len(k))
# print('len de E', E, 'E shape',np.shape(E))
# print('len de K',len(K))
# Score_inf = np.zeros((15, 30))
# for i in range(np.shape(E)[0]):
# for j in range(30):
# if E[i][j] <2 and E[i][j]>-2 :
# Score_inf[i][j]=1
# Score_sup = np.zeros((15, 30))
# for i in range(np.shape(E)[0]):
# for j in range(30):
# if E[i][j+30] <2 and E[i][j+30]>-2 :
# Score_sup[i][j]=1
# print(Score_inf)
# print(Score_sup)
# Sum_score_inf = np.sum(Score_inf,axis=1)
# Sum_score_sup = np.sum(Score_sup,axis=1)
# # ## T test unilateral
# # ## H0 : S_k>20 > S_k<20
# print('Sum_score_inf',Sum_score_inf)
# print('Sum_score_sup',Sum_score_sup)
# mInf = s.mean(Sum_score_inf)
# print(mInf)
# mSup = s.mean(Sum_score_sup)
# print(mSup)
# vinf = s.pvariance(Sum_score_inf)
# vsup = s.pvariance(Sum_score_sup)
# print(vinf)
# print(vsup)
# T_stat = (mSup - mInf )/ (sqrt(vinf/len(Sum_score_inf) + vsup/len(Sum_score_sup)))
# pvU = 1-(t.cdf(T_stat,len(Sum_score_inf) +len(Sum_score_sup) -2))
# print('pvalue : ', pvU)
######### kp test ########################################
# ######################################### Tmax
# tmax = np.arange(0,20000,20)
# for i in range(len(tmax)):
# S = recuit_g ( -3, 3, -3,3,1,10, 0.5, tmax[i], 0.0001 )
# E = (abs(S[2][-1] -F))
# L_sol_ecart.append(E)
# fig = plt.figure(3)
# plt.xlabel('kp')
# plt.axhline(y=0, xmin=0, xmax=30, c='red', label="Min = -266")
# plt.axhline(y=59, xmin=0, xmax=30, c='pink', label="Min = -208")
# plt.axhline(y=116, xmin=0, xmax=30, c='yellow', label="Min = -150")
# plt.plot(tmax ,L_sol_ecart, 'x')
# plt.ylabel('| f(x_min_calcule) - f(x_opt)|')
# kp = np.arange(0,1.02,0.02)
# print(kp)
# Min_opt_kInf = 0
# Min_opt_kSup = 0-
# Min_Inf= []
# Min_sup =[]
# L_sol_ecart =[]
# for i in range(len(kp)):
# S = recuit_g ( -3, 3, -3,3,1,10, kp[i] , 10000, 0.0001 )
# E = (abs(S[2][-1] -F))
# L_sol_ecart.append(E)
# fig = plt.figure(3)
# plt.xlabel('kp')
# plt.axhline(y=0, xmin=0, xmax=30, c='red', label="Min = -266")
# plt.axhline(y=59, xmin=0, xmax=30, c='pink', label="Min = -208")
# plt.axhline(y=116, xmin=0, xmax=30, c='yellow', label="Min = -150")
# plt.plot(kp ,L_sol_ecart, 'x')
# plt.ylabel('| f(x_min_calcule) - f(x_opt)|')
# for j in range(15):
# for i in range(len(kp)):
# S = recuit_g ( -3, 3, -3,3,1,10, kp[i] , 10000, 0.0001 )
# E = (abs(S[2][-1] -F))
# L_sol_ecart.append(E)
# if E <1 and E>-1 and kp[i] <0.5 :
# Min_opt_kInf += 1
# elif E <1 and E>-1 and kp[i] >0.5 :
# Min_opt_kSup += 1
# Min_Inf.append(Min_opt_kInf)
# Min_sup.append(Min_opt_kSup)
# # ## T test unilateral
# # ## H0 : S_k>20 > S_k<20
# print(Min_Inf)
# print(Min_sup)
# mInf = s.mean(Min_Inf)
# print(mInf)
# mSup = s.mean(Min_sup)
# print(mSup)
# vinf = s.pvariance(Min_Inf)
# print(vinf)
# vsup = s.pvariance(Min_sup)
# print(vsup)
# T = (mSup - mInf) / (sqrt(vinf/len(Min_Inf)+ vsup/len(Min_sup)))
# print(T)
# pvU = 1-(t.cdf(T,len(Min_Inf)+ len(Min_sup)-2))
# print('pvalue : ', pvU)
# print(S[0][-1], S[1][-1])
# S1 = recuit_g ( 0,0, 1, 1, 0.5 , 10000, 0.0001 )
# strS1 = ('x0 = 0 ; y0 = 0 ;t0 = 1 ; k = 1 ; kp = 0.5 ; tmax = 100' )
# S2 = recuit_g ( 0,0, 1, 0.1 , 0.5 , 10000, 0.0001 )
# strS2 = ('x0 = 0 ; y0 = 0 ; t0 = 1 ; k = 0.1 ; kp = 0.5 ; tmax = 100' )
# S3 = recuit_g ( 0,0, 1, 10, 0.2 , 10000, 0.0001 )
# strS3 = ('x0 = 0 ; y0 = 0 ; t0 = 1 ; k = 10 ; kp = 0.2 ; tmax = 100' )
# S4 = recuit_g ( 0,0, 1, 1 , 0.1 , 10000, 0.0001 )
# strS4 = ('x0 = 0 ; y0 = 0 ; t0 = 1 ; k = 10 ; kp = 0.1 ; tmax = 100' )
# L_sol=[]
# L_title=[]
# L_sol.append(S1)
# L_sol.append(S2)
# L_sol.append(S3)
# L_sol.append(S4)
# L_title.append(strS1)
# L_title.append(strS2)
# L_title.append(strS3)
# L_title.append(strS4)
# for i in range(0,4) :
# fig = plt.figure(i+1)
# ax = fig.gca(projection='3d') #to perform a 3D plot
# surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, antialiased=False) #plot definition and options
# ax.plot(L_sol[i][0][-1],L_sol[i][1][-1],L_sol[i][2][-1], '-o',c='red')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('g(x,y)')
# plt.title(L_title[i])
plt.show()
if Which_question==5:
S = recuit_f1_p ( -5,5, 1, 10, 0.5 , 10000, 0.0001 , 5)
#S1 = recuit_f1( -5,1, 10, 0.5 , 10000, 0.0001 )
X = np.arange(-6,6,0.1)
fig = plt.figure(0)
plt.plot(S[0], S[1] , 'k-x')
#plt.plot(S1[0][-1], S1[1][-1] , 'c-x')
plt.plot(X, f_1(X), c='red')
plt.xlabel('x')
plt.ylabel('f(x)')
print(S[1][-1])
# k=[15,1,10,10]
# kp=[0.5,0.5,0.8,0.1]
# for i in range(len(k)):
# #S= recuit_f1 ( 0.5, 1,k[i], kp[i] , 10000, 0.0001 )
# S1= recuit_f1_p ( 0.5, 1,k[i], kp[i] , 10000, 0.0001 ,5)
# strS = 'x0 = 0.5 ; t0 = 1 ; k = (%f); kp = (%f) ; tmax = 10000' % (k[i] ,kp[i] )
# print('len S', len(S[0]), 'lenS1', len(S1[0]))
# fig = plt.figure(i+1)
# #plt.plot(S[0], S[1], 'k-x')
# plt.plot(S1[0], S1[1], 'c-x')
# plt.plot(X, f_1(X), c='red')
# plt.xlabel('x')
# plt.title(strS)
# plt.ylabel('f(x)')
# plt.show()
# for i in range(5):
# S = recuit_f1_p ( -5, 5,1, 10, 0.5 , 10000, 0.0001 , 5)
# fig = plt.figure(i+1)
# #plt.plot(S[0], S[1], 'k-x')
# plt.plot(S1[0], S1[1], 'c-x')
# plt.plot(X, f_1(X), c='red')
# plt.xlabel('x')
# #plt.title(strS)
# plt.ylabel('f(x)')
if Which_question==6:
X = np.arange(-5,5,0.2)
Y = np.arange(-5,5,0.2)
X, Y = np.meshgrid(X, Y)
Z= g(X,Y)
S = recuit_g_p ( 0,0,0,0, 1, 15, 0.2 , 1000, 0.0001 , 5)
S1 = recuit_g( 0,0,0,0, 1, 15, 0.2 , 1000, 0.0001 )
fig = plt.figure(0)
ax = fig.gca(projection='3d') #to perform a 3D plot
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, linewidth=0, antialiased=False) #plot definition and options
ax.plot(S1[0],S1[1],S1[2], '-o',c='gray')
ax.plot(S[0],S[1],S[2], '-o', c='cyan')
ax.set_zlim(-300,300)
ax.set_xlim(-5,5)
ax.set_ylim(-5,5)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
print(S[1][-1])
print(S1[1][-1])
plt.show()
if Which_question==7:
X = np.arange(-5,5,0.2)
Y= f_1(X)
S = recuit_f1_p ( 0, 0, 1, 10, 0.5 , 10000, 0.0001 , 5)
S1 = recuit_f1( 0, 0 , 1, 10, 0.5 , 10000, 0.0001 )
fig = plt.figure(0)
plt.plot(S1[0], S1[1], '-o',c='gray')
plt.plot(S[0], S[1], '-o', c='cyan')
plt.ylim((-200,300))
plt.xlim((-5,5))
plt.plot(X, f_1(X), c='black')
print(S[1][-1])
print(S1[1][-1])
plt.show()
if Which_question==8 :
X= np.arange(-6,6)
F = min(f_1 (X))
dist_max_sp =[]
dist_max_ap = []
Succes_sp = []
Succes_ap = []
for i in range(21):
dist_sp = recuit_f1(-6, 6, 1,10, 0.5 , 10000, 0.0001 )
f =max( abs(dist_sp[1]- F) )
dist_max_sp.append(f)
if dist_sp[1][-1]-F<2 :
Succes_sp.append(1)
dist_ap = recuit_f1_p(-6, 6, 1,10, 0.5 , 10000, 0.0001,5)
f2 =max( abs(dist_ap[1]- F) )
dist_max_ap.append(f2)
if dist_ap[1][-1]-F<2 :
Succes_ap.append(1)
print('distance max sans paliers', dist_max_sp)
print('distance max avec paliers', dist_max_ap)
print('nombre de succes sans paliers', Succes_sp)
print('nombre de succe7s avec paliers', Succes_ap)
if Which_question==9 :
# Variation de k et k' pour f
S1 = recuit_f1( 0, 0 , 1, 10, 0.5 , 10000, 0.0001 )
S2 = recuit_f1_p ( 0, 0, 1, 10, 0.5 , 10000, 0.0001 , 5)
fig = plt.figure(0)
plt.plot(np.arange(len(S2[3])), S2[3], '-o', c='skyblue')
plt.plot(np.arange(len(S1[3])), S1[3], '-o',c='navy')
plt.xlabel("Nb iteration")
plt.ylabel("seuil de proba d acceptation")
plt.ylim(-1,2)
fig = plt.figure(1)
plt.plot(np.arange(len(S2[3])), S2[3], '-o', c='skyblue')
plt.xlabel("Nb iteration")
plt.ylabel("seuil de proba d acceptation")
plt.show()
t =np.arange(0,10000)
plt.show()
# x,f,t, prop_accep, surface
| true |
2609878797e6a45264ff2348ba08cafd7e45950b | Python | bi0sblr/BSides2020Writeups | /Reversing/Discord Emoji Spammer/Admin/exploit.py | UTF-8 | 681 | 2.578125 | 3 | [] | no_license | import base64
from collections import deque
# print(flag)
flag_low = deque([7, 4, 14, 4, 3, 4, 15, 4, 0, 15, 0, 5, 4, 15, 4, 4, 15, 12, 3, 4, 5, 4, 15, 0, 14, 3, 15, 8, 4, 2, 4, 15, 3, 8, 4, 1, 1, 13, 2, 3, 4, 3, 4, 6, 11, 1, 15])
flag_up = deque([4, 5, 4, 4, 5, 4, 7, 3, 5, 7, 3, 6, 7, 3, 6, 5, 7, 3, 5, 7, 7, 7, 5, 3, 7, 5, 6, 3, 3, 3, 7, 5, 3, 6, 3, 5, 6, 3, 7, 6, 5, 6, 6, 3, 3, 3, 7])
for i in range(len(flag_low)):
temp_low = flag_low.copy()
temp_low.rotate(-i)
temp = list(temp_low)
possible_flag = ""
for j in range(len(flag_low)):
possible_flag += chr(flag_up[j]*16+temp[j])
if "BSDCTF" in possible_flag:
print(possible_flag)
| true |
d0f52d78f21ccc75c4ed886b9c95b31f0b978067 | Python | JosueOb/Taller1 | /Ejercicios/eje-7-8.py | UTF-8 | 801 | 3.625 | 4 | [] | no_license | print("Bryan Pilatuña")
print("Ejercicio 7")
from tkinter import *
master = Tk()
var1 = IntVar()
Checkbutton(master, text="Hombre",variable=var1).grid(row=0,sticky=W)
var2=IntVar()
Checkbutton(master, text="Mujer",variable=var2).grid(row=1,sticky=W)
mainloop()
print ("Ejercicio 8")
from tkinter import *
master = Tk()
def var_states():
print (" male: ",var1.get())
print ("Female: ", var2.get())
Label(master, text="Indicar el sexo: ").grid(row=0,sticky=W)
var1 = IntVar()
Checkbutton(master,text="male", variable=var1).grid(row=1,
sticky=W)
var2 = IntVar()
Checkbutton(master, text="Female", variable=var2).grid(row=2,
sticky=W)
Button(master, text='Quit', command=master.quit).grid(row=3,
sticky=W, padx=4)
Button(master, text='Show', command=var_states).grid(row=4,
sticky=W,pady=4)
mainloop()
| true |
5cf44e434c59a154a64b9a8bfd43bb643a5f0687 | Python | cycraig/YACSA | /api/sentiment_api.py | UTF-8 | 2,061 | 2.640625 | 3 | [] | no_license | from flask_restx import Namespace, Resource, fields
from .sentiment import predict_reddit_sentiment
from datetime import datetime, timedelta, timezone
import dateutil.parser as dt
api = Namespace('sentiment', description='Cryptocurrency social media sentiment')
# TODO: return aggregate sentiment _and_ volume, or split endpoints?
sentiment_request = api.model('SentimentRequest', {
'datetime': fields.DateTime(required=True, dt_format='iso8601', description='The datetime to analyse in UTC ISO 8601 format (will be truncated to the hour).'),
});
sentiment_response = api.model('SentimentResponse', {
'sentiment': fields.Fixed(decimals=4, description='Cryptocurrency sentiment [-1,1] for the requested time period.'),
});
@api.route('/')
class Sentiment(Resource):
'''Shows a list of all todos, and lets you POST to add new tasks'''
@api.doc('post_sentiment')
@api.expect(sentiment_request)
@api.marshal_with(sentiment_response)
def post(self):
'''Request sentiment for a particular hour'''
# TODO: production logging
# truncate to the hour
start_epoch = dt.parse(api.payload['datetime'])
start_epoch = start_epoch.replace(minute=0, second=0, microsecond=0)
end_epoch = start_epoch + timedelta(hours=1)
# reject any time more than 24 hours in the past for now...
now = datetime.now(timezone.utc)
diff = now - start_epoch
if diff.total_seconds() / 3600 >= 24:
api.abort(400, "{} is more than 24 hours in the past".format(start_epoch))
print(start_epoch)
print(end_epoch)
print(now)
# TODO: store results in a database, use a put/post to update it (secured endpoint)
# TODO: psaw isn't returning the most recent posts (posted 31 minutes ago etc.)
# Either the UTC is wrong or it's delayed, so maybe make the time range more coarse like per day?
sentiment = predict_reddit_sentiment(start_epoch, end_epoch)
return {'sentiment': sentiment}
| true |
eb23a39063152862ab7009cf78181cae364594aa | Python | hray0903/AutomateTheBoringStuffWithPython | /belonging.py | UTF-8 | 628 | 3.4375 | 3 | [] | no_license | stuff = {"rope": 1, "torch": 6,
"gold coin": 42, "dirk": 1, "arrow": 12}
def display_inventory(inventory):
print("belongings list:")
item_total = 0
for item, num in inventory.items():
print(str(num) + " " + item)
item_total += num
print("total amount:" + " " + str(item_total))
display_inventory(stuff)
dragon_loot = ["gold coin", "dirk", "gold coin", "gold coin", "ruby"]
def add_to_inventory(inventory, added_items):
for item in added_items:
inventory.setdefault(item, 0)
inventory[item] += 1
add_to_inventory(stuff, dragon_loot)
display_inventory(stuff)
| true |
6d03e1b3e8b04b603e9edcba2d128e3d2c1abba0 | Python | PhysTom88/JetRes | /HistFileMaker/PYTHON/UTILS/ROOTUtils.py | UTF-8 | 581 | 2.6875 | 3 | [] | no_license | ##
## Generic ROOT functions
##
## Import Python functions
import PythonUtils
## Import ROOT functions
from ROOT import TFile, gDirectory, TObject
def saveToFile(h, fileName, directory, name):
f = TFile(fileName, 'UPDATE')
path = '/'
## Create the directory tree
for dir in directory.split('/'):
path += dir + '/'
if not gDirectory.Get(dir):
gDirectory.mkdir(dir)
gDirectory.cd(dir)
## Now the tree is written, move to the dir
f.cd(path)
hc = h.Clone()
hc.Write(name, TObject.kOverwrite)
f.Close()
| true |
3923cd77a2281ca3397724242d093a3e9e6b81e2 | Python | nhalverson0726/DataScripts | /BobDylanWorkup.py | UTF-8 | 1,117 | 2.8125 | 3 | [] | no_license | from pathlib import Path
import pprint, shelve
shelfFile = shelve.open('BobDylan')
hasYear = shelfFile['hasYear']
years = []
for key in hasYear:
if hasYear[key]['year'] not in years:
years.append(hasYear[key]['year'])
wordsYear = {}
for year in years:
wordsYear.setdefault(year, [])
for song in hasYear:
for year, lyrics in wordsYear.items():
#print(year)
#print(hasYear[song]['year'])
if hasYear[song]['year'] == year:
#print(lyrics)
#print(hasYear[song]['lyrics'])
lyrics.append(hasYear[song]['lyrics'])
countYear = {}
##for year in wordsYear:
## count = {}
## for lyrics in wordsYear[year]:
## print(lyrics)
## for word in lyrics:
## print(word)
## count.setdefault(word, 0)
## count[word] = count[word] + 1
for year, lyrics in wordsYear.items():
count = {}
for word in lyrics[0]:
count.setdefault(word, 0)
count[word] = count[word]+1
countYear[year] = count
| true |
54f7313282046a17eefe0e40a289d047d4bd5a9d | Python | csdali13/EmployeeManager | /empManager/emp_records/routes.py | UTF-8 | 3,414 | 2.703125 | 3 | [] | no_license | from empManager.emp_records import emp_record_blueprint
from empManager.emp_records.models import Employee, Department
from flask import render_template, request, redirect, flash, url_for
from empManager.emp_records.addEmployeeForm import AddEmployeeForm
from empManager.emp_records.searchEmployeeForm import SearchEmployeeForm
from empManager import empManager_db
import sys
@emp_record_blueprint.route('/')
def emp_list():
emps = Employee.query.all()
return render_template('home.html', emps=emps)
@emp_record_blueprint.route('/add/addemployeeform', methods = ['GET', 'POST'])
def add_employee_popupform():
form = AddEmployeeForm()
# print('name: ' + form.name.data, file=sys.stdout)
if (request.method == 'POST'):
Employee.create_employee(
name = form.name.data,
designation = form.designation.data,
address = form.address.data,
phone = form.phone.data,
department_id = form.department_id.data)
return redirect(url_for('emp_record_blueprint.emp_list'))
# else:
#print('Error in adding employee', file=sys.stderr)
return render_template('addEmployee.html', form=form)
@emp_record_blueprint.route('/delete/deleteEmployee/<employee_id>', methods = ['GET', 'POST'])
def delete_employee(employee_id):
emp = Employee.query.get(employee_id)
print('Inside the delete employee method')
print(emp.id, file=sys.stderr )
if (request.method == 'POST'):
print('Inside the POST if')
print(emp)
Employee.delete_employee(emp.id)
flash('Employee got deleted successfully')
print('employee deleted')
return redirect(url_for('emp_record_blueprint.emp_list'))
else:
print('Error in adding employee', file=sys.stderr)
return render_template('deleteEmployee.html', emp = emp, employee_id = employee_id)
@emp_record_blueprint.route('/add/searchEmployeeform', methods = ['GET', 'POST'])
def search_employee():
form = SearchEmployeeForm()
# print('name: ' + form.name.data, file=sys.stdout)
if (request.method == 'POST'):
empResultSet = Employee.search_employee(
searchName = form.name.data,
searchDesignation = form.designation.data,
searchPhone = form.phone.data)
return render_template('searchResult.html', empResultSet = empResultSet )
# else:
#print('Error in adding employee', file=sys.stderr)
return render_template('searchEmployee.html', form=form)
@emp_record_blueprint.route('/display/department/<department_id>')
def display_department(department_id):
department = Department.query.filter_by(id=department_id).first()
department_employees = Employee.query.filter_by(department_id = department.id).all()
return render_template('department.html', department=department, department_employees=department_employees)
# @emp_record_blueprint.route('/delete/deleteemployee/<emp_id>', methods=['GET', 'POST'])
# def delete_employee(emp_id):
# print(emp_id, file=sys.stdout)
# print('Inside delete employee method: emp id', file=sys.stdout)
# emp = Employee.query.get(emp_id)
# if request.method == 'POST':
# empManager_db.session.delete(emp)
# empManager_db.session.commit()
# print('Emp deleted', file=sys.stdout)
# return redirect(url_for('emp_record_blueprint.emp_list'))
| true |
59e5fd8287002d28a935040edb7a3f2f2b3508a7 | Python | yosef8234/test | /pp_admin/dns_relover.py | UTF-8 | 321 | 2.5625 | 3 | [] | no_license | import dns.resolver
hosts = ["ya.ru","google.com"]
def query(host_list=hosts):
collection= []
for host in host_list:
ip = dns.relosver.query(host,"A")
for i in ip:
collection.append(str(i))
return collection
if __name__ == '__main__':
for arec in query():
print arec | true |
633745f5b17c0b22110a5cbe4a82807067a06aaa | Python | rishavghosh605/Python-Scripting-And-Automation | /Personal Assistant/using_wolfram_alpha.py | UTF-8 | 197 | 2.640625 | 3 | [] | no_license | import wolframalpha
client = wolframalpha.Client('7QJ4YH-T5RP7VR292')
while True:
query=str(input('Query: '))
res= client.query(query)
output=next(res.results).text
print(output)
| true |
96e7188cdc173827ccc83707a39a94b45a4a1c08 | Python | graemephilipross/Angular2CardGame | /services/state_service.py | UTF-8 | 1,571 | 2.65625 | 3 | [] | no_license |
from palringo import *
from cah_bot.game import Game, GameStates
class StateService():
def __init__(self):
# map of state and actions
self.stateActions = {
GameStates.InitGame : self.initGame,
GameStates.InitRound : self.initNextRound,
GameStates.InRound : self.inRound,
GameStates.SubmittingCard : self.whiteCardsSubmitted,
GameStates.CardSubmitted : self.inCardSubmitted,
GameStates.SubmittedAllCards : self.submittedAllCards,
GameStates.InitRoundComplete : self.blackPlayerSelectsWinningCard,
GameStates.RoundComplete : self.roundComplete,
GameStates.InitGameComplete : self.initGameComplete
}
def initGame(self, game, payload):
game.initGame(payload)
def initNextRound(self, game, payload):
game.initNextRound(payload)
def inRound(self, game, payload):
game.inRound(payload)
def whiteCardsSubmitted(self, game, payload):
game.whiteCardsSubmitted(payload)
def inCardSubmitted(self, game, payload):
game.inCardSubmitted(payload)
def blackPlayerSelectsWinningCard(self, game, payload):
game.blackPlayerSelectsWinningCard(payload)
def submittedAllCards(self, game, payload):
game.inSubmittedAllCards(payload)
def roundComplete(self, game, payload):
game.inRoundCompleted(payload)
def initGameComplete(self, game, payload):
game.initGameComplete(payload)
def processState(self, game, payload):
if game.gameState == GameStates.Idle:
return
# run state action
self.stateActions[game.gameState](game, payload)
| true |
a516d607aed43ea1530c43ab0f263c91b99cb4e6 | Python | nicoleannhargrove/python-projects | /PythonFiles/string-info.py | UTF-8 | 244 | 4.15625 | 4 | [] | no_license | message = input("Enter a message: ")
print("First:" , message[0])
print ("Last:" , message[-1])
print("Middle:" , message[int(len(message) / 2)])
print("Even:", message[0::2])
print("Odd:" , message[1::2])
print("Reverse:", message[::-1]) | true |
d89b7fd6cfa839469c9236394ac7c51f03937301 | Python | AlikiZ/LMM-Lasso-on-large-scale-data | /AdaScreen/adascreen/adascreen.py | UTF-8 | 10,433 | 2.671875 | 3 | [
"MIT"
] | permissive | import numpy as np
import sklearn.linear_model as lm
from screening_rules import AbstractScreeningRule
class AdaScreen(AbstractScreeningRule):
""" Adaptive Lasso Screening with halfspace constraints. """
sphere_rule = None # screening rule that produces sphere center (o) and radius (rho)
local_hs_rules = None # list of local (=expensive) halfspace constraint returning screening rules
global_hs_rules = None # list of global halfspace constraint returning screening rules
A = None
b = None
normA = None
debug = False
def __init__(self, sphere_rule, tol=1e-9, debug=False):
#AbstractScreeningRule.__init__(self, 'AdaScreen (o){0}'.format(sphere_rule.name), tol)
AbstractScreeningRule.__init__(self, 'AdaScreen:(o){0}'.format(sphere_rule.name), tol)
self.sphere_rule = sphere_rule
self.local_hs_rules = []
self.global_hs_rules = []
self.debug = debug
def add_local_hs_rule(self, rule):
self.local_hs_rules.append(rule)
self.name = '{0}+(/){1}'.format(self.name, rule.name)
def add_global_hs_rule(self, rule):
self.global_hs_rules.append(rule)
self.name = '{0}+(/){1}'.format(self.name, rule.name)
def init(self, lmax, lmax_x, X, y, normX, normy, path):
print('AdaScreen initialize global halfspace constraints.')
(self.A, self.b, self.normA) = self.get_global_halfspaces(lmax, lmax_x, X, y, normX, normy)
def get_sphere(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
return self.sphere_rule.get_sphere(l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
def screen(self, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
(DIMS, EXMS) = X.shape
(o, rho) = self.sphere_rule.get_sphere(l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
# screening based on sphere constraint
theta = (y - X[nz,:].T.dot(beta[nz])) / l0
lhs = X.dot(o)
if len(normX.shape) == 1:
normX = normX.reshape(normX.shape[0], 1)
rhs = 1.0 - normX*rho
inds = np.where(np.abs(lhs) >= rhs-self.tol)[0]
#inds = np.unique(inds)
# if there are no constraints, then don't bother
(A, b, normA) = self.get_local_halfspaces(o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
# (a) only local constraints or ..
# (b) no constraints, no worries
if b.size==0 & self.b.size==0:
return (inds, intervals)
# (c) only global constraints
if b.size==0 & self.b.size>0:
A = self.A
b = self.b
normA = self.normA
else:
# (d) mixed constraints
if b.size>0 & self.b.size>0:
A = np.concatenate((A, self.A))
b = np.append(b, self.b)
normA = np.append(normA, self.normA)
# pre-calculations
prod_jk = X[inds,:].dot(A.T)
prod_ko = A.dot(o).flatten()
# distance to origin for each hyperplane r \in K x 1
r_k = (b - prod_ko) / normA # element-wise multiplication and division
# change sign according to case
r_inds = np.where(r_k >= 0.0)[0]
r_mul = np.ones(b.size)
r_mul[r_inds] = -1.0
r_k = np.abs(r_k)
#print 'Constraints x Datapoints {0}'.format(A.shape)
cosines_alpha = prod_jk / (normX[inds].reshape(len(inds),1) * normA) # J x K
sines_alpha = np.sqrt( np.maximum(1.0-cosines_alpha**2, 0.0) ) # J X K: the inner element-wise maximum(.) is due to numerics
rhos_plus = np.atleast_2d(self.screen_inner(r_k, r_mul, rho, cosines_alpha, sines_alpha)).T
#rhos_plus = self.screen_inner_dbg_icml(r_k, r_mul, rho, cosines_alpha, sines_alpha, prod_ko, b)
S_plus = lhs[inds] + normX[inds]*rhos_plus
rhos_minus = np.atleast_2d(self.screen_inner(r_k, r_mul, rho, -cosines_alpha, sines_alpha)).T
#rhos_minus = self.screen_inner_dbg_icml(r_k, r_mul, rho, -cosines_alpha, sines_alpha, prod_ko, b)
S_minus = -lhs[inds] + normX[inds]*rhos_minus
S = np.max((S_plus, S_minus), axis=0)
active = np.where(S >= 1.0 - self.tol)[0]
#print inds.size-active.size
if self.debug:
#print 'AdaScreen DEBUG START'
(prodjk_dbg, cos_dbg, sin_dbg) = self.cosines_dbg(X[inds,:], A, normX[inds], normA)
(rows, cols) = np.where(np.abs(prodjk_dbg-prod_jk)>1e-6)
if rows.size>0:
print 'PROD_JK:'
print (rows, cols)
(rows, cols) = np.where(np.abs(cos_dbg-cosines_alpha)>1e-6)
if rows.size>0:
print 'COS_ALPHA:'
print (rows, cols)
(rows, cols) = np.where(np.abs(sin_dbg-sines_alpha)>1e-6)
if rows.size>0:
print 'SIN_ALPHA:'
print (rows, cols)
print normX
print normy
rhos_plus_dbg = self.screen_inner_dbg(r_k, r_mul, rho, cos_dbg, sin_dbg)
rhos_minus_dbg = self.screen_inner_dbg(r_k, r_mul, rho, -cos_dbg, sin_dbg)
#print 'AdaScreen DEBUG END'
#raw_input("Press Enter to continue...")
#rhos_min = np.min((rhos_plus, rhos_minus), axis=0)
#active = np.where(np.abs(lhs[inds])>=1.0 - normX[inds]*rhos_min - self.tol)[0]
return (inds[active], intervals)
def cosines_dbg(self, X, A, normX, normA):
prod_jk = np.zeros((X.shape[0], A.shape[0]))
for j in range(X.shape[0]):
for k in range(A.shape[0]):
for n in range(A.shape[1]):
prod_jk[j,k] += X[j,n] * A[k,n]
cos_alpha = np.zeros(prod_jk.shape)
sin_alpha = np.zeros(prod_jk.shape)
for j in range(prod_jk.shape[0]):
for k in range(prod_jk.shape[1]):
cos_alpha[j,k] = prod_jk[j,k] / (normX[j]*normA[k])
sin_alpha[j,k] = np.sqrt( np.maximum(1.0 - cos_alpha[j,k]*cos_alpha[j,k], 0.))
return (prod_jk, cos_alpha, sin_alpha)
def screen_inner(self, r, r_mul, rho, cos_alpha, sin_alpha):
rhos_prime = rho*np.ones(sin_alpha.shape) # J x K
(rows, cols) = np.where(cos_alpha-r/rho>0.0)
if any(rho**2 - r[cols]**2)<0.0:
print 'dsdfgdggds'
values = np.maximum(rho**2 - (np.sqrt(rho**2 - r[cols]**2) * cos_alpha[rows, cols] + r_mul[cols]*r[cols]*sin_alpha[rows, cols])**2, 0.0)
rhos_prime[rows, cols] = np.sqrt(values)
return np.min(rhos_prime, axis=1) # J x 1
def screen_inner_dbg(self, r, r_mul, rho, cos_alpha, sin_alpha):
(J, K) = sin_alpha.shape
rhos_prime = rho*np.ones(sin_alpha.shape) # J x K
for j in range(J):
for k in range(K):
if cos_alpha[j,k]>r[k]/rho:
#print (j,k)
#print rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
value = rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
if value<0.0:
print value
value = 0.0
rhos_prime[j,k] = np.sqrt(value)
return np.min(rhos_prime, axis=1) # J x 1
def screen_inner_dbg_icml(self, r, r_mul, rho, cos_alpha, sin_alpha, prod_ko, b):
(J, K) = sin_alpha.shape
#print prod_ko.shape
#print b.shape
#print b
rhos_prime = rho*np.ones(sin_alpha.shape) # J x K
for j in range(J):
for k in range(K):
if (cos_alpha[j,k]>r[k]/rho and b[k]-prod_ko[k]>=0):
value = rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
if value<0.0:
print value
value = 0.0
rhos_prime[j,k] = np.sqrt(value)
if (sin_alpha[j,k]<=r[k]/rho and b[k]-prod_ko[k]<0):
value = rho**2 - (np.sqrt(rho**2 - r[k]**2) * cos_alpha[j,k] + r_mul[k]*r[k]*sin_alpha[j,k])**2
if value<0.0:
print value
value = 0.0
rhos_prime[j,k] = np.sqrt(value)
if (sin_alpha[j,k]>r[k]/rho and b[k]-prod_ko[k]<0):
value = np.sqrt(rho**2 - r[k]**2) * sin_alpha[j,k] - r[k]*cos_alpha[j,k]
rhos_prime[j,k] = value
return np.min(rhos_prime, axis=1) # J x 1
def get_local_halfspaces(self, o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals):
# gather halfspace constraints
A = None
b = np.array([])
normA = None
doInit = True
for rule in self.local_hs_rules:
#print('Getting halfspace constraints of {0}..'.format(rule))
(ak, bk, normak) = rule.get_local_halfspaces(o, l, l0, lmax, lmax_x, beta, X, y, normX, normy, nz, intervals)
if doInit and ak.size>0:
A = ak
b = bk
normA = normak
doInit = False
elif ak.size>0:
A = np.concatenate((A, ak))
b = np.append(b, bk)
normA = np.append(normA, normak)
# returns a_k, b_k and ||a_k||
# A \in R^(K x N)
# b \in R^K
# normA \in R_+^K
#print A.shape
return (A, b, normA)
def get_global_halfspaces(self, lmax, lmax_x, X, y, normX, normy):
# gather halfspace constraints
A = None
b = np.array([])
normA = None
doInit = True
for rule in self.global_hs_rules:
#print('Getting halfspace constraints of {0}..'.format(rule))
(ak, bk, normak) = rule.get_global_halfspaces(lmax, lmax_x, X, y, normX, normy)
if doInit:
A = ak
b = bk
normA = normak
doInit = False
else:
A = np.concatenate((A, ak))
b = np.append(b, bk)
normA = np.append(normA, normak)
# returns a_k, b_k and ||a_k||
# A \in R^(K x N)
# b \in R^K
# normA \in R_+^K
#print A.shape
return (A, b, normA)
| true |
e50bb70557abfc54ffa02c96d24e8e90a68a72a3 | Python | Siimon13/CodePlace | /Python/compound_interest.py | UTF-8 | 1,545 | 2.96875 | 3 | [] | no_license | import csv
def cal_return(prev_day_price, curr_day_price):
return (((curr_day_price - prev_day_price)/prev_day_price) * 100)
with open('Desktop/yahoo_hist_prices.csv') as csvfile:
lqd_reader = csv.reader(csvfile)
headers = next(lqd_reader)
headers.append("Return")
headers.append("Cumulative Interest")
lqd_list = list(lqd_reader)
price_ind = 1
return_ind = 3
start_ind = 0
for i,r in enumerate(lqd_list):
if r[0] == "2013-12-31":
start_ind = i
break
lqd_list[i].append(0)
lqd_list[i].append(0)
lqd_list[start_ind].append(0)
lqd_list[start_ind].append(100)
for i in range(start_ind+1,len(lqd_list)):
prev_day_price = float(lqd_list[i-1][price_ind])
curr_day_price = float(lqd_list[i][price_ind])
curr_return = cal_return(prev_day_price, curr_day_price)
curr_cumul_interest = lqd_list[i-1][return_ind]*(1+curr_return/100)
lqd_list[i].append(curr_return)
lqd_list[i].append(curr_cumul_interest)
with open('Desktop/yahoo_hist_lqd.csv', 'wb') as writefile:
csvwriter = csv.writer(writefile)
csvwriter.writerow(headers)
csvwriter.writerow(lqd_list[start_ind])
for r in lqd_list[start_ind+1:]:
period_ind = str(r[2]).index('.')
r[2] = str(r[2])[:period_ind+3] + "%"
period_ind = str(r[3]).index('.')
r[3] = str(r[3])[:period_ind+3] + "%"
csvwriter.writerow(r)
| true |
7e488edb7fbf524df5ac72dd4e4103640aba0410 | Python | drsphelps/PartIIProject | /src/classifier_rulebased.py | UTF-8 | 1,000 | 2.796875 | 3 | [] | no_license | from classifier import Classifier
import random
from sklearn import metrics
class RuleBasedClassifier(Classifier):
def __init__(self):
super().__init__(None)
self.words = [["ewhor", "e-whor"],
["stresser", "booter"], [" rat "], ["crypt", "fud"]]
def train(self, training_data: dict, params: dict):
pass
def pred(self, example: list):
max_mentions = 0
max_class = []
for classification in range(len(self.words)):
mentions = 0
for word in self.words[classification]:
if word in example:
mentions += 1
if mentions >= max_mentions:
max_mentions = mentions
max_class.append(classification)
if len(max_class) == 1:
return max_class[0]
else:
return random.choice(range(0, len(self.words)))
def test(self, data):
super().test(data)
self.present_results()
| true |
3a4401e2bebfbc054d8914d16ccde1ef2875a36d | Python | lakshmanboddoju/Deep_Learning_Prerequisites_The_Numpy_Stack_in_Python | /2/10-Matrix_operations.py | UTF-8 | 1,366 | 3.921875 | 4 | [] | no_license | #! python3
import numpy as np
A = np.array([[1, 2], [3, 4]])
AInv = np.linalg.inv(A)
print(AInv)
print(A.dot(AInv))
print(AInv.dot(A))
# Matrix Determinant
print(np.linalg.det(A))
# Diagonal
print(np.diag(A))
# Matrix with only diagonal elements
print(np.diag([1, 2, 3]))
# Outer product: used for covariance
# C(i, j) = A(i) * B(j)
# Differennt from the dot product / inner product i.e. C = sum_over_i(A(i) * B(i))
a = np.array([1, 2])
b = np.array([3, 4])
print(np.outer(a, b))
print(np.inner(a, b))
# Matrix Trace = sum of diagonals of the matrix
print(np.diag(A).sum())
print("Matrix Trace of A: " + str(np.trace(A)))
# Covariance
X = np.random.randn(100, 3)
covarianceX = np.cov(X)
print("Covariance of X: " + str(covarianceX))
print("Shape of covariance of X: " + str(covarianceX.shape))
print("Incorrect shape, needs to be 3x3.")
covarianceX = np.cov(X.T)
print("Correct covariance of X: " + str(covarianceX))
print("Correct shape: " + str(covarianceX.shape))
# EigenValues and EigenVectors
# Symmetrc Matrix == True when Matrix A == A.T i.e matrix is equal to its transpose
# Hermitian Matrix == True when A == A.H i.e. matrix is equal to its conjugate transpose
print(np.linalg.eigh(covarianceX))
eigenValues, eigenVectors = np.linalg.eig(covarianceX)
print(eigenValues)
print(eigenVectors)
| true |
37fb33c27a116d33e265cc8ced28897f81a75449 | Python | estraviz/codewars | /7_kyu/Array Mash/test_array_mash.py | UTF-8 | 747 | 3.234375 | 3 | [] | no_license | from array_mash import array_mash
def test_array_mash():
assert array_mash([1, 2, 3], ['a', 'b', 'c']) == [1, 'a', 2, 'b', 3, 'c']
assert array_mash([1, 2, 3, 4, 5], ['a', 'b', 'c', 'd', 'e']) == \
[1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e']
assert array_mash([1, 1, 1, 1], [2, 2, 2, 2]) == [1, 2, 1, 2, 1, 2, 1, 2]
assert array_mash([1, 8, 'hello', 'dog'], ['fish', '2', 9, 10]) == \
[1, "fish", 8, "2", "hello", 9, "dog", 10]
assert array_mash([None, 4], [None, 'hello']) == [None, None, 4, "hello"]
assert array_mash([1], [2]) == [1, 2]
assert array_mash(['h', 'l', 'o', 'o', 'l'],
['e', 'l', 'w', 'r', 'd']) == \
["h", "e", "l", "l", "o", "w", "o", "r", "l", "d"]
| true |
ea523225a06fd9ce51a37ec039a7dbc4095b1a47 | Python | ashutosh2329/Hackerrank-solution | /kangaroo.py | UTF-8 | 865 | 3.125 | 3 | [] | no_license | import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
def kangaroo(x1, v1, x2, v2):
result = 'NO'
if(x1<=x2 and v1>v2):
while((x1+v1)<=(x2+v2)):
if(x1+v1 == x2+v2):
result = 'YES'
break
print(x2)
x1 += v1
x2 += v2
elif(x2<=x1 and v2>v1):
while((x1+v1)>=(x2+v2)):
if(x1+v1 == x2+v2):
result = 'YES'
break
x1 += v1
x2 += v2
return result
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
x1V1X2V2 = input().split()
x1 = int(x1V1X2V2[0])
v1 = int(x1V1X2V2[1])
x2 = int(x1V1X2V2[2])
v2 = int(x1V1X2V2[3])
result = kangaroo(x1, v1, x2, v2)
fptr.write(result + '\n')
fptr.close()
| true |
78f4ef34957556716be1ee764c0f26c1f8f17942 | Python | thanhtuan18/anthanhtuan-fundametal-c4e15 | /Session6/homework/ex11.py | UTF-8 | 206 | 3.46875 | 3 | [] | no_license | def is_inside(x1, y1, x2, y2, w, h):
if ((x2 + w) >= x1 >= x2 ) and ((y2 + h) >= y1 >= y2):
return(True)
else:
return(False)
# c = is_inside(100, 120, 140, 60, 100, 200)
# print(c)
| true |
38fa57944159674f7232a70bd08ef5ccd46ebdef | Python | TheMessik/didier | /functions/les.py | UTF-8 | 13,045 | 2.84375 | 3 | [] | no_license | import datetime
import discord
from functions import config, timeFormatters, stringFormatters
from functions.numbers import clamp
import json
# TODO use constants & enums instead of hardcoding platform names
# also make the naming in the jsons more consistent
def createCourseString(courses):
courseString = ""
for course in sorted(courses, key=lambda item: item["slot"]["time"][1]):
# Add a ":" to the hour + add a leading "0" if needed
start = timeFormatters.timeFromInt(course["slot"]["time"][1])
end = timeFormatters.timeFromInt(course["slot"]["time"][2])
courseString += "{} - {}: {} {}\n".format(start, end,
str(course["course"]), getLocation(course["slot"]))
return courseString
def createEmbed(day, dayDatetime, semester, year, schedule):
# Create a date object to check the current week
startDate = 1612224000
currentTime = dayDatetime.timestamp()
week = clamp(timeFormatters.timeIn(currentTime - startDate, "weeks")[0], 1, 13)
# Compensate for easter holidays
# Sorry but I don't have time to make a clean solution for this rn
# this will have to do
if currentTime > 1617377400:
week -= 2
title, week = getTitle(day, dayDatetime, week)
# Add all courses & their corresponding times + locations of today
courses, extras, prev, online = getCourses(schedule, day, week)
embed = discord.Embed(colour=discord.Colour.blue(), title=title)
embed.set_author(name="Lessenrooster voor {}{} Bachelor".format(year, "ste" if year == 1 else "de"))
if len(courses) == 0:
embed.add_field(name="Geen Les", value="Geen Les", inline=False)
else:
courseString = createCourseString(courses)
# TODO uncomment this when covid rules slow down
# courseString += "\nGroep {} heeft vandaag online les.".format(1 if week % 2 == 0 else 2)
embed.description = courseString
if prev:
embed.add_field(name="Vakken uit vorige jaren", value=createCourseString(prev), inline=False)
if extras:
embed.add_field(name="Extra", value="\n".join(getExtras(extra) for extra in extras), inline=False)
# Add online links - temporarily removed because everything is online right now
if online:
uniqueLinks: dict = getUniqueLinks(online)
embed.add_field(name="Online Links", value="\n".join(
sorted(getLinks(onlineClass, links) for onlineClass, links in uniqueLinks.items())))
embed.set_footer(text="Semester {} | Lesweek {}".format(semester, round(week)))
return embed
def findDate(targetWeekday):
"""
Function that finds the datetime object that corresponds to
the next occurence of [targetWeekday].
:param targetWeekday: The weekday to find
"""
now = timeFormatters.dateTimeNow()
while now.weekday() != targetWeekday:
now = now + datetime.timedelta(days=1)
return now
def getCourses(schedule, day, week):
"""
Function that creates a list of all courses of this day,
a list of all online links, and extra information for these courses.
:param schedule: A user's (customized) schedule
:param day: The current weekday
:param week: The current week
"""
# Add all courses & their corresponding times + locations of today
courses = []
extras = []
prev = []
onlineLinks = []
for course in schedule:
for slot in course["slots"]:
if day in slot["time"]:
# Basic dict containing the course name & the class' time slot
classDic = {"course": course["course"], "slot": slot}
# Class was canceled
if "canceled" in slot and "weeks" in slot and week in slot["weeks"]:
extras.append(classDic)
continue
# Add online links for those at home
# Check if link hasn't been added yet
if "online" in slot and not any(el["course"] == course["course"] and
# Avoid KeyErrors: if either of these don't have an online link yet,
# add it as well
("online" not in el or el["online"] == slot["online"])
for el in onlineLinks):
# Some courses have multiple links on the same day,
# add all of them
if "bongo" in slot["online"].lower():
onlineDic = {"course": course["course"], "online": "Bongo Virtual Classroom",
"link": course["bongo"]}
onlineLinks.append(onlineDic)
if "zoom" in slot["online"].lower():
onlineDic = {"course": course["course"], "online": "ZOOM", "link": course["zoom"]}
onlineLinks.append(onlineDic)
if "teams" in slot["online"].lower():
onlineDic = {"course": course["course"], "online": "MS Teams", "link": course["msteams"]}
onlineLinks.append(onlineDic)
# Add this class' bongo, msteams & zoom links
if "bongo" in course:
classDic["slot"]["bongo"] = course["bongo"]
if "msteams" in course:
classDic["slot"]["msteams"] = course["msteams"]
if "zoom" in course:
classDic["slot"]["zoom"] = course["zoom"]
if "custom" in course:
prev.append(classDic)
# Check for special classes
if "weeks" in slot and "online" not in slot:
if week in slot["weeks"]:
if "custom" not in course:
courses.append(classDic)
extras.append(classDic)
elif "weeks" in slot and "online" in slot and "group" not in slot:
# This class is only online for this week
if week in slot["weeks"]:
if "custom" not in course:
courses.append(classDic)
extras.append(classDic)
else:
# Nothing special happening, just add it to the list of courses
# in case this is a course for everyone in this year
if "custom" not in course:
courses.append(classDic)
# Filter out normal courses that are replaced with special courses
for extra in extras:
for course in courses:
if course["slot"]["time"] == extra["slot"]["time"] and course != extra:
courses.remove(course)
break
# Sort online links alphabetically
onlineLinks.sort(key=lambda x: x["course"])
# Remove links of canceled classes
for element in onlineLinks:
if not any(c["course"] == element["course"] for c in courses):
onlineLinks.remove(element)
return courses, extras, prev, onlineLinks
def getExtras(extra):
"""
Function that returns a formatted string giving clear info
when a course is happening somewhere else (or canceled).
"""
start = timeFormatters.timeFromInt(extra["slot"]["time"][1])
end = timeFormatters.timeFromInt(extra["slot"]["time"][2])
location = getLocation(extra["slot"])
if "canceled" in extra["slot"]:
return "De les **{}** van **{}** tot **{}** gaat vandaag uitzonderlijk **niet** door.".format(
extra["course"], start, end
)
if "group" in extra["slot"]:
return "**Groep {}** heeft vandaag uitzonderlijk **{}** **{}** van **{} tot {}**.".format(
extra["slot"]["group"], extra["course"], location,
start, end
)
elif "online" in extra["slot"]:
return "**{}** gaat vandaag uitzonderlijk **online** door {} van **{} tot {}**.".format(
extra["course"], location[7:],
start, end
)
else:
return "**{}** vindt vandaag uitzonderlijk plaats **{}** van **{} tot {}**.".format(
extra["course"], location,
start, end
)
def getUniqueLinks(onlineClasses):
"""
Function that returns a dict of all online unique online links for every class
in case some classes have multiple links on the same day.
"""
# Create a list of all unique course names
courseNames = list(set(oc["course"] for oc in onlineClasses))
uniqueLinks: dict = {}
# Add every link of every class into the dict
for name in courseNames:
uniqueLinks[name] = {}
for oc in onlineClasses:
if oc["course"] == name:
# Add the link for this platform
uniqueLinks[name][oc["online"]] = oc["link"]
return uniqueLinks
def getLinks(onlineClass, links):
"""
Function that returns a formatted string giving a hyperlink
to every online link for this class today.
"""
return "{}: {}".format(onlineClass,
" | ".join(
["**[{}]({})**".format(platform, url) for platform, url in
links.items()])
)
def getLocation(slot):
"""
Function that returns a formatted string indicating where this course
is happening.
"""
if "canceled" in slot:
return None
# TODO fix this because it's ugly
if "online" in slot:
return "online @ **[{}]({})**".format(slot["online"],
slot["zoom"] if slot["online"] == "ZOOM" else slot["msteams"] if slot[
"online"] == "MS Teams" else
slot["bongo"])
# Check for courses in multiple locations
if "locations" in slot:
# Language - 'en' for the last one
return ", ".join(getLocation(location) for location in slot["locations"][:-1]) \
+ " en " + getLocation(slot["locations"][-1])
return "in {} {} {}".format(slot["campus"], slot["building"], slot["room"])
def getSchedule(semester, year):
with open("files/schedules/{}{}.json".format(year, semester), "r") as fp:
schedule = json.load(fp)
return schedule
def getTitle(day, dayDT, week):
# now = timeFormatters.dateTimeNow()
# if timeFormatters.weekdayToInt(day) < now.weekday():
# week += 1
day = day[0].upper() + day[1:].lower()
titleString = "{} {}/{}/{}".format(day, stringFormatters.leadingZero(dayDT.day),
stringFormatters.leadingZero(dayDT.month), dayDT.year)
return titleString, week
# Returns the day of the week, while keeping track of weekends
def getWeekDay(day=None):
weekDays = ["maandag", "dinsdag", "woensdag", "donderdag", "vrijdag"]
# Get current day of the week
dayNumber = datetime.datetime.today().weekday()
# If a day or a modifier was passed, show that day instead
if day is not None:
if day[0] == "morgen":
dayNumber += 1
elif day[0] == "overmorgen":
dayNumber += 2
else:
for i in range(5):
if weekDays[i].startswith(day):
dayNumber = i
# Weekends should be skipped
dayNumber = dayNumber % 7
if dayNumber > 4:
dayNumber = 0
# Get daystring
return dayNumber, weekDays[dayNumber]
def parseArgs(day):
semester = int(config.get("semester"))
year = int(config.get("year"))
years_counter = int(config.get("years"))
# Check if a schedule or a day was called
if len(day) == 0:
day = []
else:
# Only either of them was passed
if len(day) == 1:
# Called a schedule
if day[0].isdigit():
if 0 < int(day[0]) < years_counter + 1:
year = int(day[0])
day = []
else:
return [False, "Dit is geen geldige jaargang."]
# elif: calling a weekday is automatically handled below,
# so checking is obsolete
else:
# TODO check other direction (di 1) in else
# Both were passed
if day[0].isdigit():
if 0 < int(day[0]) < years_counter + 1:
year = int(day[0])
# day = []
else:
return [False, "Dit is geen geldige jaargang."]
# Cut the schedule from the string
day = day[1:]
day = getWeekDay(None if len(day) == 0 else day)[1]
dayDatetime = findDate(timeFormatters.weekdayToInt(day))
return [True, day, dayDatetime, semester, year]
| true |
235c2bb7db6d1fab25a1ca4de4eaea54c28ad052 | Python | jmnel/simulated-annealing | /gradient_descent.py | UTF-8 | 2,032 | 2.65625 | 3 | [
"MIT"
] | permissive | from typing import Callable
import numpy as np
import scipy.optimize as optim
from step_size import armijo_step, gss
from utils import grad_approx
# def _defult_ls(f, x0, jac, ls_kwargs):
# return armijo_step(f, x0, jac, ls_kwargs)
def _defult_ls(f, x0, jac, ls_kwargs):
return gss(f, x0, **ls_kwargs)
def grad_descent(f: Callable,
x0: np.ndarray,
jac: Callable,
tol: float = 1e-14,
max_iterations=200,
ls_method: Callable = _defult_ls,
ls_kwargs={'a': 0.0, 'b': 1.0}):
x = x0
nfev = 0
njev = 0
# Do main algorithm loop.
for i in range(max_iterations):
g = jac(x)
njev += 1
# Exit if jacient is close to 0. We are likely at a local minimum.
if np.linalg.norm(g) <= tol:
return optim.OptimizeResult(x=x,
success=True,
status=0,
message='found optimal value',
fun=f(x),
jac=g,
nfev=nfev,
njev=njev,
nit=i)
ls_result = ls_method(f=lambda t: f(x - t * g),
jac=lambda t: np.dot(-jac(x - t * g), g),
**ls_kwargs)
t = ls_result.x
nfev += ls_result.nfev
njev += ls_result.njev
x = x - t * g
return optim.OptimizeResult(x=x,
success=False,
status=-1,
message='max iteratios exceeded',
fun=f(x),
jac=g,
nfev=nfev,
njev=njev,
nit=max_iterations)
| true |
7ad511c49a5566ea52759e1f2138e83723dc3eb0 | Python | KavinSub/project-euler | /51-75/p_72.py | UTF-8 | 1,734 | 3.53125 | 4 | [] | no_license | # Project Euler Problem 72: Counting Fractions
# Author: Kavin Subramanyam
from math import sqrt
import time
from functools import reduce
from operator import mul
def generate_sieve(n):
L = [True] * (n + 1)
L[0] = False
L[1] = False
prime = 2
k = prime
while prime < int(sqrt(n)) + 1:
while k <= n - prime:
k += prime
L[k] = False
prime += 1
while L[prime] == False:
prime += 1
k = prime
return L
max_value = 1000000
prime_sieve = generate_sieve(max_value)
def is_prime(n):
return prime_sieve[n]
def get_primes(sieve):
return [i for i in range(len(sieve)) if sieve[i] == True]
def prime_divisors(n):
pd = []
for i in range(1, int(sqrt(n)) + 1):
if is_prime(i):
if n % i == 0:
pd.append(i)
v = n//i
if is_prime(v) and v != i:
pd.append(v)
return pd
def phi(n):
if is_prime(n): return n - 1
den = prime_divisors(n)
num = [p - 1 for p in den]
return (n * reduce(mul, num, 1))//reduce(mul, den, 1)
if __name__ == '__main__':
begin = time.time()
count = 1 # {1/2}
memo = {}
primes = get_primes(prime_sieve)
for p in primes:
if p > int(sqrt(max_value)): memo[p] = p -1
x = p - 1
power = 1
while p**power < max_value:
t = p**power
v = x*(p**(power - 1))
memo[t] = v
power += 1
for p in primes:
k = 1
while k*p <= max_value:
if k not in memo:
k += 1
continue
if k % p == 0:
memo[k*p] = phi(k*p)
k += 1
continue
v = (p - 1)*memo[k]
memo[k*p] = v
power = 2
while (k*p)**power <= max_value:
t = (k*p)**power
r = v*((k*p)**(power - 1))
memo[t] = r
power += 1
k += 1
for i in range(3, max_value + 1):
count += memo[i]
end = time.time()
print("Time taken:", end - begin)
print("Solution:", count) | true |
dcb0134f0dc0605f13f107c13efc9ae4f5ee1002 | Python | Artherstock/SosiskaKiller | /POP.py | UTF-8 | 789 | 2.875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
def parse():
URL ='https://steampay.com/special'
HEADERS = {
'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
}
response = requests.get(URL, headers= HEADERS)
soup = BeautifulSoup(response.content, 'html.parser')
items = soup.findAll('a', class_= 'catalog-item')
comps = []
for item in items:
comps.append({
'title': item.find('div', class_= 'catalog-item__name').get_text(strip = True),
'price': item.find('span', calss_= 'ccatalog-item__price-span').get_text(strip = True)
})
for comp in comps:
print(comp['title'])
print(comp['price'])
parse() | true |
9be55e5392482c8dc82e12aa7674abec5453a95f | Python | KirstenSpeirs/AI-Assignment | /AI_script.py | UTF-8 | 1,797 | 3.671875 | 4 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import random
import os
# Reads the file of colours
# Returns the number of colours in the file and a list with the colours (RGB) values
def read_file(fname):
with open(fname, 'r') as afile:
lines = afile.readlines()
n = int(lines[3]) # number of colours in the file
col = []
lines = lines[4:] # colors as rgb values
for l in lines:
rgb = l.split()
col.append(rgb)
return n, col
# Display the colours in the order of the permutation in a pyplot window
# Input, list of colours, and ordering of colours.
# They need to be of the same length
def plot_colours(col, perm):
assert len(col) == len(perm)
ratio = 10 # ratio of line height/width, e.g. colour lines will have height 10 and width 1
img = np.zeros((ratio, len(col), 3))
for i in range(0, len(col)):
img[:, i, :] = colours[perm[i]]
fig, axes = plt.subplots(1, figsize=(8, 4)) # figsize=(width,height) handles window dimensions
axes.imshow(img, interpolation='nearest')
axes.axis('off')
plt.show()
#####_______main_____######
# Get the directory where the file is located
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path) # Change the working directory so we can read the file
ncolors, colours = read_file('colours') # Total number of colours and list of colours
test_size = 100 # Size of the subset of colours for testing
test_colours = colours[0:test_size] # list of colours for testing
permutation = random.sample(range(test_size),
test_size) # produces random pemutation of lenght test_size, from the numbers 0 to test_size -1
plot_colours(test_colours, permutation)
| true |
8f7e65bf5a97a3331a0f4efa8ed545f4ad228805 | Python | jproddy/rosalind | /bioinformatics_textbook_track/ba10a.py | UTF-8 | 949 | 3.671875 | 4 | [] | permissive | '''
Compute the Probability of a Hidden Path
http://rosalind.info/problems/ba10a/
Given: A hidden path π followed by the states States and transition matrix Transition of an HMM (Σ, States, Transition, Emission).
Return: The probability of this path, Pr(π). You may assume that initial probabilities are equal.
'''
filename = 'rosalind_ba10a.txt'
def p_path(path, states, transition):
p = 1 / len(states) # initial probability dependant on number of states
for i in range(len(path)-1):
p *= transition[path[i:i+2]]
return p
def main():
with open(filename) as f:
path = f.readline().strip()
f.readline()
states = f.readline().strip().split()
f.readline()
f.readline()
matrix = [line.strip().split() for line in f.readlines()]
transition = {}
for row in matrix:
for i, prob in enumerate(row[1:]):
transition[row[0] + states[i]] = float(prob)
print(p_path(path, states, transition))
if __name__ == '__main__':
main()
| true |
69ef2aa579778808f81c1e7839050de542b3cedd | Python | tildabarth/pink-workshop-api-start | /services/runs.py | UTF-8 | 1,943 | 2.78125 | 3 | [] | no_license | import datetime as dt
import typing as t
from settings import get_settings
from data import RUNS_TARGET as RUNS_SOURCE
from services import (
create_item_dict,
delete_item_dict,
get_collection_dict,
get_item_dict,
update_item_dict
)
ItemDict = t.Dict[str, t.Any]
ItemCollection = t.Dict[str, ItemDict]
settings = get_settings()
def datetime_to_string(item_datetime: t.Optional[dt.datetime]) -> t.Optional[str]:
"""Convert datetime to string.
Any datetime must be converted to string before serializing.
"""
if item_datetime:
return item_datetime.strftime(settings.datetime_format)
def string_to_datetime(item_datetime: dt.datetime) -> str:
"""Convert string to datetime."""
return dt.datetime.strptime(item_datetime, settings.datetime_format)
def get_collection() -> ItemCollection:
"""Get runs from JSON source."""
return get_collection_dict(RUNS_SOURCE)
def get_dicts() -> t.List[ItemDict]:
"""Return item dicts sorted on start time in descending order."""
collection = get_collection()
return sorted(
collection.values(),
key=lambda item: string_to_datetime(item['start']), reverse=True)
def get_dict(item_id: int) -> t.Optional[ItemDict]:
"""Get run by id."""
return get_item_dict(item_id, RUNS_SOURCE)
def create_dict(item_dict: ItemDict) -> ItemDict:
"""Create run in collection."""
if 'start' in item_dict:
item_dict['start'] = datetime_to_string(item_dict['start'])
return create_item_dict(item_dict, RUNS_SOURCE)
def update_dict(item_id: int, item_dict: ItemDict) -> ItemDict:
"""Update run in collection."""
if 'start' in item_dict:
item_dict['start'] = datetime_to_string(item_dict['start'])
return update_item_dict(item_id, item_dict, RUNS_SOURCE)
def delete_dict(item_id: int) -> None:
"""Delete run from collection."""
return delete_item_dict(item_id, RUNS_SOURCE)
| true |
feaaab2eee93e5e675a5d5abdf9e02593998dbe5 | Python | Rogue05/SPD | /zad34/wyzazanie.py | UTF-8 | 1,610 | 3 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 13:41:32 2019
@author: Wojtek
"""
from Process import *
from copy import deepcopy
from math import exp
from random import random
def swap_noighbours_move(order):
norder = deepcopy(order)
i = int((random()*len(norder))%(len(norder)-1))
j = i+1
# print(i)
norder[i],norder[j] = norder[j],norder[i]
return norder
def swap_random_move(order):
norder = deepcopy(order)
i = int((random()*len(norder))%(len(norder)))
j = int((random()*len(norder))%(len(norder)))
# print(i)
norder[i],norder[j] = norder[j],norder[i]
return norder
def insert_move(order):
norder = deepcopy(order)
i = int((random()*len(norder))%(len(norder)-1))
j = int((random()*len(norder))%(len(norder)-1))
elem = norder[i]
norder.remove(elem)
norder.insert(j,elem)
return norder
def default_kryt(c1,c2,T):
if c2<c1:
return True
return False #exp((c1-c2)/T)
def wyzarzanie(order,*,
T=100,
schemat_chlodzenia=lambda x,i:x*0.99,
wykonaj_ruch=swap_noighbours_move,
kryterium_stopu=lambda x:x>1,
kryterium_ruchu=default_kryt):
best_order = order
best_cost = get_cost(best_order)
i = 1
while kryterium_stopu(T):
new_order = wykonaj_ruch(order)
cost = get_cost(new_order)
if kryterium_ruchu(best_cost,cost,T):
# print('---',best_cost,cost)
best_cost = cost
best_order = new_order
T=schemat_chlodzenia(T,i)
i+=1
return best_order | true |
63f85f632b64825bb498444cb70bc19aa23b0187 | Python | Nibbetts/analyst | /experiments/litmus_small5_part4.py | UTF-8 | 1,924 | 2.59375 | 3 | [] | no_license | #from __future__ import print_function
#from __future__ import absolute_import
#from builtins import str, bytes
if __name__ == "__main__":
# Install the latest Tensorflow version.
#!pip3 install --quiet "tensorflow>=1.7"
# Install TF-Hub.
#!pip3 install --quiet tensorflow-hub
#???? !pip3 install seaborn
import analyst as an
#from ...scholar.scholar import scholar as sch
import numpy as np
import scipy.spatial as sp
from tqdm import tqdm
import pickle as pkl
import os.path
import gensim
import tensorflow as tf
#import tensorflow_hub as hub
MAX_LINES = 20000
def normalize(vec):
return vec/np.linalg.norm(vec)
#metric = "cosine"
metric = an.Analyst.angle
def read_text_table(path, firstline=True, limit_lines=None):
lines = open(path, 'rt').readlines()
if firstline:
numvecs, dim = map(int, lines[0].split(" "))
else:
numvecs = len(lines) if limit_lines == None \
else min(len(lines), limit_lines)
dim = len(lines[0].split(" ")) - 1
strings = []
embeddings = np.empty(shape=(numvecs, dim))
for i in tqdm(range(numvecs), desc="Reading " + path):
row = lines[i + firstline].split(" ")
strings.append(row[0])#str(row[0]))
embeddings[i] = row[1:]
return strings, embeddings
# GloVe:
# ordered by frequency, I think.
# non-normalized.
str_g, embed_g = read_text_table(
"embeddings/glove.6B.300d.txt", firstline=False, limit_lines=MAX_LINES)
embed_g = [normalize(v) for v in embed_g]
an_g = an.Analyst(embeddings=embed_g, strings=str_g, metric=metric,
auto_print=True, desc="GloVe Normalized")
print("Success at saving GloVe Normalized: " + str(an.Analyst.save(an_g,
"saved_analyses/an" + str(MAX_LINES) + "_glove_normalized")))
| true |
8930651d831abc9215ec154e89963aae8524be20 | Python | pavelhouzva/ritetag-api-python | /tests/test_api.py | UTF-8 | 1,453 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | import ritetag
import pytest
from unittest import TestCase
class TestBasics(TestCase):
def setUp(self) -> None:
access_token = ''
self.client = ritetag.RiteTagApi(access_token)
def test_sanitize_hashtag(self):
assert self.client._sanitize_hashtag('#hello') == 'hello'
assert self.client._sanitize_hashtag('#blob') == 'blob'
def test_invalid_hashtag(self):
with pytest.raises(ritetag.RiteTagException, match=r'Invalid hashtag'):
self.client._sanitize_hashtag('#bl ob')
def test_sanitize_domain(self):
assert self.client._sanitize_domain('google.com') == 'google.com'
assert self.client._sanitize_domain('https://google.com') == 'google.com'
def test_invalid_domain_1(self):
with pytest.raises(ritetag.RiteTagException, match=r'Invalid domain'):
self.client._sanitize_domain('ritetag.com/test')
def test_invalid_domain_2(self):
with pytest.raises(ritetag.RiteTagException, match=r'Invalid domain'):
self.client._sanitize_domain('ftp://ritetag.com')
def test_sanitize_url(self):
assert self.client._sanitize_url('google.com') == 'http://google.com'
assert self.client._sanitize_url('https://google.com') == 'https://google.com'
def test_invalid_url(self):
with pytest.raises(ritetag.RiteTagException, match=r'Invalid url'):
self.client._sanitize_url('ftp://ritetag.com') | true |
e152a076b321b8ece8a494aca9ee2eb682c4cc10 | Python | kozer/pytorch-tutorial | /src/pytorch_simple_CNN.py | UTF-8 | 3,696 | 3.109375 | 3 | [] | no_license | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F # relu etc
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class CNN(nn.Module):
def __init__(self, in_channels=1, num_classes=10):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=8,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1)) # same convolution
self.pool = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv2 = nn.Conv2d(in_channels=8,
out_channels=16,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1)) # same convolution
self.fc1 = nn.Linear(16 * 7 * 7, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc1(x)
return x
# Testing if returns correct shape
# model = CNN()
# x = torch.randn(64, 1, 28, 28)
# print(model(x).shape)
# exit()
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else 'cpu')
# Hyperparameters
in_channels = 1
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 5
# Load data
train_dataset = datasets.MNIST(root='dataset/',
train=True,
transform=transforms.ToTensor(),
download=True)
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_dataset = datasets.MNIST(root='dataset/',
train=False,
transform=transforms.ToTensor(),
download=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=True)
# Initialize network
model = CNN(in_channels=in_channels, num_classes=num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# Train network
for epoch in range(num_epochs):
for idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
# print(data.shape)
# forward
scores = model(data)
loss = criterion(scores, targets)
#backward
optimizer.zero_grad()
loss.backward()
# gradient descent or adam step
optimizer.step()
# Check accuracy on training & test to see how good our model is
def check_acc(loader, model):
if loader.dataset.train:
print('Checking accuracy on training data')
else:
print('Checking accuracy on testing data')
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x, y in loader:
x = x.to(device=device)
y = y.to(device=device)
scores = model(x)
# 64 x 10
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(
f'Got {num_correct} / {num_samples} with acc {float(num_correct)/float(num_samples) * 100:.2f}'
)
model.train()
check_acc(train_loader, model)
check_acc(test_loader, model)
| true |
ed16dcceec32508febc8546b718164a244c3695b | Python | amkall/Processamento-de-Imagens | /media.py | UTF-8 | 635 | 2.890625 | 3 | [] | no_license | import cv2 as cv
import numpy as np
def media(img):
resultImage = np.zeros((img.shape[0], img.shape[1]), np.uint8)
value = 0
windowsSize = 3
edge = 1
rows, cols = img.shape
for i in range(edge ,rows - edge):
for j in range(edge, cols - edge):
for x in range(windowsSize):
for y in range(windowsSize):
value += img[i - edge + x][j - edge + y]
resultImage[i, j] = round((value * 1) / (windowsSize * windowsSize))
value = 0
return resultImage
img = cv.imread('soma2.jpg', 0)
cv.imshow('media', media(img))
cv.waitKey() | true |
d968e727b96756a63ffb1682d68eaa662f1df246 | Python | dh-trier/converse | /html2tei.py | UTF-8 | 3,195 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Script for turning FineReader HTML to simple XML-TEI.
See: https://lxml.de/
"""
# Imports
import os
import re
import glob
from os.path import join
# Parameters
datadir = join("/", "home", "christof", "Dropbox", "6-Library", "0-Collections", "converse-data", "")
headerfile = join(os.path.curdir, "teiHeader-model.xml")
htmlfolder = join(datadir, "html", "*.htm")
xmlfolder = join(datadir, "tei", "")
# Functions
def read_html(file):
with open(file, "r", encoding="utf8") as infile:
html = infile.read()
return html
def remove_htmlhead(text):
text = re.sub("<!DOCTYPE.*?</head>", "", text, flags=re.DOTALL)
text = re.sub("</html>", "", text)
text = re.sub("<body>", "<text>\n<body>\n<div>", text)
text = re.sub("</body>", "</div>\n</body>\n</text>", text)
return text
def replace_nbsp(text):
text = re.sub(" ", " ", text)
return text
def remove_spans(text):
text = re.sub("<span class=\"font\d+\">(.*?)</span>", "\\1", text)
return text
def mark_italics(text):
text = re.sub("<span class=\"font\d+\" style=\"font-style:italic;\">(.*?)</span>", "<hi rend=\"italic\">\\1</hi>", text, flags=re.DOTALL)
return text
def mark_sup(text):
text = re.sub("<sup>(.*?)</sup>", "<hi rend=\"sup\">\\1</hi>", text, flags=re.DOTALL)
return text
def mark_divs(text):
text = re.sub("<h1><a name=\"caption1\"></a><a name=\"bookmark\d+\"></a>(.*?)</h1>", "</div><div><head>\\1</head>", text, flags=re.DOTALL)
return text
def mark_directspeech(text):
text = re.sub("<p>-- {1,10}(.*?)</p>", "<p>-- <said>\\1</said></p>", text)
text = re.sub("<p>— {1,10}(.*?)</p>", "<p>-- <said>\\1</said></p>", text)
return text
def mark_chapters(text):
text = re.sub("<h2><a name=\"bookmark\d\"></a>(.*?)</h2>", "\n</div>\n<div type=\"h2\">\n<head>\\1</head>", text)
text = re.sub("<h1><a name=\"bookmark\d\"></a>(.*?)</h1>", "\n</div>\n<div type=\"h1\">\n<head>\\1</head>", text)
return text
def get_text(html):
text = remove_htmlhead(html)
text = replace_nbsp(text)
text = remove_spans(text)
text = mark_italics(text)
text = mark_sup(text)
text = mark_divs(text)
text = mark_directspeech(text)
text = mark_chapters(text)
#print(text[1000:1500])
return text
def get_header(headerfile):
with open(headerfile, "r", encoding="utf8") as infile:
header = infile.read()
return header
def merge(header, text):
xmltei = header + text + "\n</TEI>"
return xmltei
def save_xmltei(text, xmlfolder, basename):
filename = join(xmlfolder, basename + "_generated.xml")
print(filename)
with open(filename, "w", encoding="utf8") as outfile:
outfile.write(text)
def main(headerfile, htmlfolder, xmlfolder):
for file in glob.glob(htmlfolder):
basename,ext = os.path.basename(file).split(".")
html = read_html(file)
text = get_text(html)
header = get_header(headerfile)
xmltei = merge(header, text)
save_xmltei(xmltei, xmlfolder, basename)
main(headerfile, htmlfolder, xmlfolder)
| true |
a40e666839eba8095bb34a265355c95906115a4e | Python | AlexPereverzyev/repack | /repack/filters/crypto.py | UTF-8 | 613 | 2.828125 | 3 | [
"MIT"
] | permissive |
import hashlib
import hmac
def md5(v):
h = hashlib.md5()
h.update(v)
return h.digest()
def sha(v, shaName = 256):
alg = 'SHA'+str(shaName)
h = hashlib.new(alg)
h.update(v)
return h.digest()
def mac(v, secret, name = 'SHA256'):
s = secret
if isinstance(s, str):
s = bytes(s, 'utf-8')
hm = hmac.new(s, v, name)
return hm.digest()
def derive(v, salt, shaName = 256, rounds = 100000):
s = salt
if isinstance(s, str):
s = bytes(s, 'utf-8')
alg = 'SHA'+str(shaName)
dk = hashlib.pbkdf2_hmac(alg, v, s, rounds)
return dk
| true |
38cc5175e840fcc30c4cf87ce0e7b35a089c1593 | Python | saratonite/python-workshop | /basics/01-print.py | UTF-8 | 122 | 3 | 3 | [] | no_license | #! /usr/bin/python3
# This is a comment
print("Hello World");
name = "Sarath"
print("Hello",name)
print("Hello " + name)
| true |
1e7586f3111ce7074a2fa0a1968a227b27130833 | Python | MagorokuYohei/hoby | /pyqt/rov/draw.py | UTF-8 | 1,483 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import sys
import random
from PyQt4 import QtCore, QtGui
class magorock(QtGui.QWidget):
def __init__(self):
super(magorock, self).__init__()
self.initUI()
def initUI(self):
self.text = 'HELLO MAGOROCK'
self.setGeometry(300,300,300,300)
self.setWindowTitle('Draw text')
self.show()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
self.drawText(event,qp)
self.drawPoints(qp)
qp.end()
def drawText(self,event,qp):
qp.setPen(QtGui.QColor(168,34,3))
qp.setFont(QtGui.QFont('Decorative', 10))
qp.drawText(event.rect(), QtCore.Qt.AlignCenter,self.text)
def drawPoints(self,qp):
qp.setPen(QtCore.Qt.green)
size = self.size()
qp.drawRect(80,140,140,20)
qp.setPen(QtCore.Qt.blue)
for i in range(1000):
x = random.randint(1,size.width()-1)
y = random.randint(1,size.height()-1)
qp.drawPoint(x,y)
pen = QtGui.QPen(QtCore.Qt.black, 2, QtCore.Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(20, 40, 250, 40)
color = QtGui.QColor(255,255,255)
color.setNamedColor('#d4d4d4')
qp.setPen(color)
qp.setBrush(QtGui.QColor(200, 0, 0))
qp.drawRect(10, 15, 90, 60)
def main():
app = QtGui.QApplication([])
ex = magorock()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| true |
944f731570b0ba3c3a73a1a6c9458d8f7f42542d | Python | artisteY/py_workstation | /text.py | UTF-8 | 2,457 | 3.90625 | 4 | [] | no_license | # -*-coding:utf-8 -*-
# str类型转化
i = '34'
i = int(i)
print(type(i))
print(i.bit_length()) # bit_length将数学转换为二进制,并且最少返回二进制的位数
# float类型
j = 332.43
# j = str(j)
print(type(j))
# bool类型的结果返回值
# None,bool(''),bool([]),bool(0),bool({}),bool(())
# 字符串的操作
# 索引,切片,长度,遍历,删除,分割,清除空白,大小写转换,判断开头
k = 'hahaa2345hahAAAAAhahahahaha'
print(k)
print(k[5]) #从0 开始数
print(k[0:-2]) #切片操作
print(k.index("a", 4))
print(k.find("h", 3)) #获取索引,这里的index和find一样的
print(k.find("c", 1)) #若索引的值不在此字符串里面,find 返回-1,报错
# print(k.index("c", 2))
print(len(k)) #len()同样可以用于其他的数据了类型,比如元组,列表,字典
# del k 删除语句,
# isalnum(),isalpha()判断是否全为字母,isdigit()
print(k.isdigit()) #判断是否全为数字
# 大小写转化
print(k.capitalize()) #首字母大写
print(k.upper()) #全部大写
print(k.lower()) #全部小写
print(k.title()) #转变为题目
print(k.casefold()) #全部转换为小写
print(k.swapcase()) #大小写互相转换
# 判断以什么开头和结尾,返回值为bool类型
print(k.endswith('4'))
print(k.startswith('h'))
info ="name\tage\temail\nlittlefive\t22\t994263539@qq.com\njames\t33\t66622334@qq.com"
print(info.expandtabs(10))
# 格式化输出的方式: format() ,format_map()
# 第一种方法
info1 = "my name is {name},I'm {age} years old."
print(info1.format(name='xiaofei', age=21))
# 第三种方法看看就行了
# print(info1.format(**{"name": "xiaofei", "age": 33}))
# 第二种方法
info2 = "my name is {0},I'm {1} years old. "
print(info2.format("xiaopang", 13))
info3 = "my name is {name},I'm {age} years old."
print(info3.format_map({"name": "xiaoming", "age": 16}))
# join方法的的使用,将字符串, 元组,列表中的元素以指定的分符()连接生成的一个新的字符串
print("*".join(info1))
print(type(k))
# 元组
tuple = (34, 42, 1)
print(type(tuple))
list = {1, 2, 34, 5, 6}
print(type(list))
dic = {'name': 'tom', 'age': 18, 'address': 'beijing',
'name': 'tim', 'age': 19, 'address': 'tianji',
}
# print(dic.expandtabs())
# 字典类型的数据不能扩展用expandtabs() 表格化输出
print(type(dic))
# 列表中join的使用
l = [1, 2, 3, 4545]
l1 = ["wwee", "ssddd", "ss"]
print("--".join(l1))
print(type(l)) | true |
e259f267deacbfcf4cb8586bd9bc10968d7fad53 | Python | Lisolo/Python-Cookbook | /Numbers, Dates, and Times/Packing_and_Unpacking_Large_Integers_from_Bytes.py | UTF-8 | 2,896 | 4.15625 | 4 | [] | no_license | # coding=utf-8
"""
Problem
You have a byte string and you need to unpack it into an integer value. Alternatively, you need to convert
a large integer back into a byte string.
Solution
Suppose your program needs to work with a 16-element byte string that holds a 128-bit integer value.
For example:
"""
data = b'\x00\x124V\x00x\x90\xab\x00\xcd\xef\x01\x00#\x004'
"""
To interpret the bytes as an integer, use int.from_bytes(), and specify the byte ordering like this:
"""
len(data)
# Returns 16
int.from_bytes(data, 'little')
# Returns 69120565665751139577663547927094891008
int.form_bytes(data, 'big')
# Returns 94522842520747284487117727783387188
"""
To convert a large integer value back into a byte string, use the int.to_bytes() method, specifying the
number of bytes and the byte order. For example:
"""
x = 94522842520747284487117727783387188
x.to_bytes(16, 'big')
# Returns b'\x00\x124V\x00x\x90\xab\x00\xcd\xef\x01\x00#\x004'
x.to_bytes(16, 'little')
# Returns b'4\x00#\x00\x01\xef\xcd\x00\xab\x90x\x00V4\x12\x00'
"""
Discussion
Converting large integer values to and from byte strings is not a common operation. However, it sometimes
arises in certain application domains, such as cryptography or networking. For instance, IPv6 network
addresses are represented as 128-bit integers. If you are writing code that needs to pull such values
out of a data record, you might face this problem.
As an alternative to this recipe, you might be inclined to unpack values using the struct module, as
described in "Reading and Writing Binary Arrays of Structures". This works, but the size of integers
that can be unpacked with struct is limited. Thus, you would need to unpack multiple values and
combine them to create the final value. For example:
"""
import struct
hi, lo = struct.unpack('>QQ', data)
print((hi << 64) + lo)
# 94522842520747284487117727783387188
"""
The specification of the byte order (little or big) just indicates whether the bytes that make up the
integer value are listed from the least to most significant or the other way around. This is easy to
view using a carefully crafted hexadecimal value:
"""
x2 = 0x01020304
x2.to_bytes(4, 'big')
# Returns b'\x01\x02\x03\x04'
x.to_bytes(4, 'little')
# Returns b'\x04\x03\x02\x01'
"""
If you try to pack an integer into a byte string, but it won’t fit, you’ll get an error. You can use the
int.bit_length() method to determine how many bits are required to store a value if needed:
"""
x3 = 523 ** 23
print(x3)
# 335381300113661875107536852714019056160355655333978849017944067
x3.to_bytes(16, 'little')
# Traceback (most recent call last):
# File "<stdin>", line 73, in <module>
# OverflowError: int too big to convert
x3.bit_length()
# 208
nbytes, rem = divmod(x3.bit_length(), 8)
if rem:
nbytes += 1
x3.to_bytes(nbytes, 'little')
# Returns b'\x03X\xf1\x82iT\x96\xac\xc7c\x16\xf3\xb9\xcf...\xd0'
| true |
1bb4934755765268d71e14521a0b618df20e4c05 | Python | MontyThibault/Force-Sensor-Integration | /MayaIntegration/old/MayaReload.py | UTF-8 | 1,185 | 3 | 3 | [
"MIT"
] | permissive | # Put me as a button on the shelf
import sys
import os
def psource(module):
file = os.path.basename( module )
dir = os.path.dirname( module )
toks = file.split( '.' )
modname = toks[0]
# Check if dirrectory is really a directory
if( os.path.exists( dir ) ):
# Check if the file directory already exists in the sys.path array
paths = sys.path
pathfound = 0
for path in paths:
if(dir == path):
pathfound = 1
# If the dirrectory is not part of sys.path add it
if not pathfound:
sys.path.append( dir )
# exec works like MEL's eval but you need to add in globals()
# at the end to make sure the file is imported into the global
# namespace else it will only be in the scope of this function
exec ('import ' + modname) in globals()
# reload the file to make sure its up to date
exec( 'reload( ' + modname + ' )' ) in globals()
# This returns the namespace of the file imported
return modname
# When you import a file you must give it the full path
psource( 'C:/Users/Monty/Desktop/forcePlates/MayaIntegration/MayaScript.py' )
MayaScript.main() | true |
41330e95ee4631f577a4f16652003b06d4f9fcfc | Python | lucianogiannini/Intro-to-Programming-Python- | /Assignment 2/PS2_Part3.py | UTF-8 | 971 | 3.859375 | 4 | [] | no_license | beginningOdometerReading = int(input("Please enter the beginning odometer reading: "))
endingOdometerReading = int(input("Please enter the ending odometer reading: "))
gallonsToFillTank = float(input("Please enter the number of gallons to fill the tank: "))
costPerGallonOfGas = float(input("Please enter the cost per gallon of fuel: "))
numberMilesDrivenPerYear = int(input("Please enter the number of miles driven per year: "))
totalMilesDriven = endingOdometerReading - beginningOdometerReading
averageMilesPerGallon = totalMilesDriven / gallonsToFillTank
costOfGas = gallonsToFillTank * costPerGallonOfGas
fuelCostPerMile = costOfGas/totalMilesDriven
annualCostOfFuel = (numberMilesDrivenPerYear/averageMilesPerGallon) * costPerGallonOfGas
print("Average Miles Per Gallon:",format(averageMilesPerGallon, "<2.2f"))
print("Fuel Cost Per Mile:", '${:,.2f}'.format(fuelCostPerMile))
print("Annual Cost of Fuel:", '${:,.2f}'.format(annualCostOfFuel))
| true |
0512e120e632970d9724a79afe3e66dd8e45e1ea | Python | FelipeDasr/Python | /EstruturasDeControle/For02.py | UTF-8 | 408 | 3.71875 | 4 | [] | no_license | #!python
palavra = "paralelepipedo"
for letra in palavra:
print(letra, end=",")
print("FIM\n\n")
aprovados = ["Rafaela", "Pedro", "Renato", "Maria"]
for nome in aprovados:
print(nome)
for posicao, nome in enumerate(aprovados):
print(posicao, nome)
dias_semana = ("Domingo", "Segunda", "Terca", "Quarta", "Quinta", "Sexta", "Sabado")
for dia in dias_semana:
print("Dia", dia) | true |
128b6ea0dc76b0412652f16bda5188c695f28f71 | Python | soylentdeen/Graffity | /src/SourceFinder/findSource.py | UTF-8 | 4,811 | 2.71875 | 3 | [
"MIT"
] | permissive | import scipy
import numpy
from numpy import sin, cos, arcsin, deg2rad, rad2deg
class Coordinate( object ):
def __init__(self, ra, dec, pm=''):
ra = ra.replace(' ', '')
dec = dec.replace(' ', '')
self.pm = pm
if dec[0] == '+':
self.sign = 1.0
self.dsign = ' '
else:
self.sign = -1.0
self.dsign = '-'
self.rah = ra[:2]
self.ram = ra[2:4]
self.ras = ra[4:]
self.deg = dec[1:3]
self.dm = dec[3:5]
self.ds = dec[5:-1]
self.ra = numpy.int(ra[:2])*15 + numpy.float(ra[2:4])*15.0/60.0 + numpy.float(ra[4:])*15.0/3600.0
self.dec = self.sign*(numpy.int(dec[1:3])+numpy.float(dec[3:5])/60.0 + numpy.float(dec[5:])/3600.0)
self.lat = -1.0*(24. + 37.0/60.0 + 38.64/3600.0)
self.LST = 105.0
self.ha = self.LST - self.ra
if self.ha < 0:
self.ha += 360.0
self.altitude = rad2deg(arcsin(sin(deg2rad(self.dec)) * sin(deg2rad(self.lat)) +
cos(deg2rad(self.dec))*cos(deg2rad(self.lat))*cos(deg2rad(self.ha))))
print self.altitude
def __repr__(self):
return "%s %s %s %s%s %s %s %.3f %s" % (self.rah, self.ram, self.ras, self.dsign, self.deg,
self.dm, self.ds, self.altitude, self.pm)
class Star ( object ):
def __init__(self, coordinate='', vmag1=30.0, vmag2=30.0, spt1='', spt2='', separation=0.0, pm=''):
self.coordinate = Coordinate(coordinate[:9], coordinate[9:], pm=pm)
self.vmag1 = vmag1
self.vmag2 = vmag2
self.separation = separation
self.spt1 = spt1.strip().replace('I','').replace('V','').replace(':','').replace('/','')
if len(spt2) > 0:
self.spt2 = spt2.strip().replace('I','').replace('V','').replace(':','').replace('/','')
else:
self.spt2 = None
def calcKmag(self, colors):
if self.spt1 in colors.keys():
self.kmag1 = self.vmag1 - colors[self.spt1]
else:
self.kmag1 = 30.0
if self.spt2 != None:
if self.spt2 in colors.keys():
self.kmag2 = self.vmag2 - colors[self.spt2]
else:
self.kmag2 = 30.0
else:
self.kmag2 = 30.0
def inBox(self, c1, c2, c3, c4):
if (self.coordinate.ra > c1.ra and self.coordinate.ra < c3.ra):
if (self.coordinate.dec > c1.dec and self.coordinate.dec < c2.dec):
return True
else:
return False
else:
return False
colorsFile = open('starcolors.dat', 'r')
colors = {}
for line in colorsFile.readlines():
if line[0] != '#':
l = line.split()
if l[11][0] != '.':
colors[l[0][:-1]] = numpy.float(l[11])
df = 'wdsweb_summ2.txt'
data = open(df, 'r')
stars = []
for line in data.readlines():
if line[0].isdigit():
try:
spt = line[70:79].split('+')
spt1 = spt[0]
if len(spt) > 1:
spt2 = spt[1]
else:
spt2 = ''
vmag1 = numpy.float(line[58:63])
vmag2 = numpy.float(line[64:69])
coord = line[112:]
pm = line[80:97]
separation = (numpy.float(line[46:51]) + numpy.float(line[52:57]))/2.0
star = Star(coordinate=coord, vmag1=vmag1, vmag2=vmag2, spt1=spt1,
spt2=spt2, separation=separation, pm=pm)
star.calcKmag(colors)
stars.append(star)
except:
pass
ra_min = '170000.0'
ra_max = '220000.0'
dec_min = '-600000.0'
dec_max = '+100000.0'
cutoff = 7.0
sep_min = 7.0
sep_max = 15.0
corner1 = Coordinate(ra_min, dec_min)
corner2 = Coordinate(ra_min, dec_max)
corner3 = Coordinate(ra_max, dec_min)
corner4 = Coordinate(ra_max, dec_max)
possible = []
for star in stars:
if (star.inBox(corner1, corner2, corner3, corner4) and star.kmag1 < cutoff and
star.separation > sep_min and star.separation < sep_max and star.kmag2 < 20.0):
possible.append(star)
#for star in possible:
# print("Kmag1: %.2f Kmag2: % 6.2f Sep:% 6.2f Coord: %s" % (star.kmag1, star.kmag2, star.separation, star.coordinate))
kmag1 = sorted(possible, key=lambda star: star.kmag1)
kmag2 = sorted(possible, key=lambda star: star.kmag2)
Sep = sorted(possible, key=lambda star: star.separation)
for star in kmag2:
#print("Kmag1: %.2f Kmag2: % 6.2f Sep:% 6.2f Coord: %s" % (star.kmag1, star.kmag2, star.separation, star.coordinate))
print("| %s | % 6.2f | % 6.2f | % 6.2f | |" % (star.coordinate, star.kmag1, star.kmag2, star.separation))
print("+-------------------------+--------+--------+--------+------------+")
| true |
19b1e431a0bd6eb4a8081da52411a2c35a0e03f9 | Python | juanjosegarciaripoll/seemps | /mps/test/test_mps.py | UTF-8 | 1,591 | 2.921875 | 3 | [
"MIT"
] | permissive |
import unittest
import numpy as np
from mps.state import TensorArray
class TestTensorArray(unittest.TestCase):
def setUp(self):
self.product_state = [np.reshape([1.0, 2.0], (1, 2, 1)),
np.reshape([3.0, 5.0], (1, 2, 1)),
np.reshape([7.0, 11.0], (1, 2, 1))]
def test_independence(self):
#
# If we create a TestArray, it can be destructively modified without
# affecting it original list.
#
data = self.product_state.copy()
A = TensorArray(data)
for i in range(A.size):
A[i] = np.reshape([13, 15], (1, 2, 1))
self.assertTrue(np.all(A[i] != data[i]))
self.assertTrue(np.all(data[i] == self.product_state[i]))
def test_copy_independence(self):
#
# If we clone a TestArray, it can be destructively modified without
# affecting its sibling.
#
A = TensorArray(self.product_state.copy())
B = A.copy()
for i in range(A.size):
A[i] = np.reshape([13, 15], (1, 2, 1))
self.assertTrue(np.all(A[i] != B[i]))
self.assertTrue(np.all(B[i] == self.product_state[i]))
def test_sharing(self):
#
# The clone of a TensorArray shares the same tensors
#
data = [x.copy() for x in self.product_state]
A = TensorArray(data)
B = A.copy()
for i in range(A.size):
A[i][0,0,0] = 17.0
self.assertTrue(np.all(A[i] == B[i]))
| true |