blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a619ae271b212713b51f2552e083878ae94920e2 | Python | igortereshchenko/amis_python | /km73/Ruban_Yehor/4/task3.py | UTF-8 | 385 | 3.96875 | 4 | [] | no_license | a = float(input("Введите первое число - "))
b = float(input("Введите второе число - "))
c = float(input("Введите третье число - "))
if a < b:
if a < c:
ans = a
else:
ans = c
else:
if b < c:
ans = b
else:
ans = c
print("Наименьшее число - ",ans)
input()
| true |
3f0688b0621e4130d5ba9c8f741d76b1c866f678 | Python | Aasthaengg/IBMdataset | /Python_codes/p02901/s296545330.py | UTF-8 | 428 | 2.8125 | 3 | [] | no_license |
N,M=map(int, input().split())
key=[]
for i in range(M):
a,b=map(int, input().split())
C=list(map(int, input().split()))
c=0
for d in C:
c+=2**(d-1)
key.append((a,b,c))
dp=[10**8+10]*(2**N)
dp[0]=0
for bi in range(0,2**N):
for i in range(M):
t=key[i]
a,b,c=t
dp[bi|c]=min(dp[bi]+a,dp[bi|c])
if dp[-1]<10**8+10:
print(int(dp[-1]))
else:
print(-1)
| true |
b28ba7e3dd809d7839b8d4c9e8839bea430ee520 | Python | DronMDF/vanadis-bot | /bot/IssueLocation.py | UTF-8 | 765 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | class IssueLocation:
def __init__(self, file, line, position=None):
self.file = file
self.line = line
self.position = position
def __eq__(self, other):
return all((self.file == other.file, self.line == other.line,
self.position == other.position))
def __repr__(self):
if self.position is None:
return 'IssueLocation("%s", %u)' % (self.file, self.line)
return 'IssueLocation("%s", %u, %u)' % (self.file, self.line, self.position)
def __str__(self):
if self.position is None:
return '%s:%u' % (self.file, self.line)
return '%s:%u:%u' % (self.file, self.line, self.position)
def print(self, stream):
stream.write(file=self.file)
stream.write(line=self.line)
if self.position is not None:
stream.write(position=self.position)
| true |
a5d39ca5f1fd4cc0c01e0b0011c4f20f742be5ad | Python | calmh/pre-git | /Unix/keeperv4/ObjStore.py | UTF-8 | 638 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | import base64, shelve
class Store:
def __init__(self, dir):
self.dir = dir
class Object:
def __init__(self, fname):
tmp = file(fname).read()
self.data = base64.encodestring(tmp)
self.attrs = {}
self.attrs["Filename"] = fname
self.attrs["Length"] = str(len(tmp))
def __setitem__(self, name, val):
self.attrs[name] = val
def save(self, fname):
f = file(fname, "w")
for k, v in self.attrs.items():
f.write(k + ": " + v + "\n")
f.write(".\n")
f.write(self.data)
f.close()
o = Object("ObjStore.py")
o.save("Obj");
| true |
c1c97b7387b719376a7c7ba22af0de8a3b0299cb | Python | ishanshinde/SMTPTool | /test2.py | UTF-8 | 227 | 3 | 3 | [] | no_license |
from textblob import TextBlob
text="Hello"
blob=TextBlob(text)
print(blob.translate(to='ja'))
print(blob.translate(to='fr'))
print(blob.translate(to='de'))
text2="Hel you"
blob=TextBlob(text2)
print(blob.translate(to='ja')) | true |
50dbb2cde750245d2c4003bbfb412c38846e26b0 | Python | masudurHimel/Problematic_Adventure | /Leetcode/1672. Richest Customer Wealth.py | UTF-8 | 385 | 3.234375 | 3 | [] | no_license | class Solution:
@staticmethod
def totalBalance(x):
sum_list = 0
for i in x:
sum_list += i
return sum_list
def maximumWealth(self, accounts):
res = []
for i in accounts:
res.append(self.totalBalance(i))
res.sort()
return res[-1]
s = Solution()
print(s.maximumWealth([[1, 2, 3], [3, 2, 1]]))
| true |
3b814a29f58737a7cca07190b044a0111be65dee | Python | danielmckeown/galpy | /galpy/potential_src/verticalPotential.py | UTF-8 | 2,506 | 3 | 3 | [
"BSD-2-Clause"
] | permissive | from galpy.potential_src.linearPotential import linearPotential
from galpy.potential_src.Potential import PotentialError, Potential
class verticalPotential(linearPotential):
"""Class that represents a vertical potential derived from a RZPotential:
phi(z;R)= phi(R,z)-phi(R,0.)"""
def __init__(self,RZPot,R=1.):
"""
NAME:
__init__
PURPOSE:
Initialize
INPUT:
RZPot - RZPotential instance
R - Galactocentric radius at which to create the vertical potential
OUTPUT:
verticalPotential instance
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
linearPotential.__init__(self,amp=1.)
self._RZPot= RZPot
self._R= R
return None
def _evaluate(self,z,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential
INPUT:
z
t
OUTPUT:
Pot(z,t;R)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return self._RZPot(self._R,z,t=t)-self._RZPot(self._R,0.,t=t)
def _force(self,z,t=0.):
"""
NAME:
_force
PURPOSE:
evaluate the force
INPUT:
z
t
OUTPUT:
F_z(z,t;R)
HISTORY:
2010-07-13 - Written - Bovy (NYU)
"""
return self._RZPot.zforce(self._R,z,t=t)\
-self._RZPot.zforce(self._R,0.,t=t)
def RZToverticalPotential(RZPot,R):
"""
NAME:
RZToverticalPotential
PURPOSE:
convert a RZPotential to a vertical potential at a given R
INPUT:
RZPot - RZPotential instance or list of such instances
R - Galactocentric radius at which to evaluate the vertical potential
OUTPUT:
(list of) linearPotential instance(s)
HISTORY:
2010-07-21 - Written - Bovy (NYU)
"""
if isinstance(RZPot,list):
out= []
for pot in RZPot:
if isinstance(pot,linearPotential):
out.append(pot)
else:
out.append(verticalPotential(pot,R))
return out
elif isinstance(RZPot,Potential):
return verticalPotential(RZPot,R)
elif isinstance(RZPot,linearPotential):
return RZPot
else: #pragma: no cover
raise PotentialError("Input to 'RZToverticalPotential' is neither an RZPotential-instance or a list of such instances")
| true |
06700793c5295ba347393feb75cbfbbd25fda2a8 | Python | paulrsmithjnr/info3180-project1 | /app/models.py | UTF-8 | 1,664 | 2.609375 | 3 | [] | no_license | from . import db
from datetime import datetime
# from werkzeug.security import generate_password_hash
class Profile(db.Model):
# You can use this to change the table name. The default convention is to use
# the class name. In this case a class name of UserProfile would create a
# user_profile (singular) table, but if we specify __tablename__ we can change it
# to `user_profiles` (plural) or some other name.
__tablename__ = 'userprofiles'
id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(80))
last_name = db.Column(db.String(80))
gender = db.Column(db.String(10))
email = db.Column(db.String(80), unique=True)
location = db.Column(db.String(80))
biography = db.Column(db.String(255))
profile_picture = db.Column(db.String(80))
date_joined = db.Column(db.String(80))
def __init__(self, first_name, last_name, gender, email, location, biography, profile_picture):
self.first_name = first_name
self.last_name = last_name
self.gender = gender
self.email = email
self.location = location
self.biography = biography
self.profile_picture = profile_picture
self.date_joined = datetime.now().strftime("%B %d, %Y")
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2 support
except NameError:
return str(self.id) # python 3 support
def __repr__(self):
return '<User %r>' % (self.username)
| true |
a9f187bae2a2b2fb5ef5e5a01249417d8ea6d2e1 | Python | sharathreddyp/demo1 | /Homework1/ZyLabmain3.18.py | UTF-8 | 676 | 3.109375 | 3 | [] | no_license | #Laura Moreno
#PSID = 1763766
h = int(input('Enter wall height (feet):\n'))
w = int(input('Enter wall width (feet):\n'))
wall = h * w
print('Wall area:', wall, 'square feet')
paint = (wall / 350)
print('Paint needed:', '{:.2f}'.format(paint), 'gallons')
import math
print('Cans needed:', round(paint), 'can(s)\n')
color = input('Choose a color to paint the wall:\n')
colors = {
'red': 35,
'blue': 25,
'green': 23
}
colort = round(paint) * colors[color]
print('Cost of purchasing', color, 'paint: $''{}'.format(colort))
#add the {}.format because i keep getting a space
# need to * the cans * the colors for the last option since are different values
| true |
9890256a6addcb4498ba36aeffe44ed3a2339248 | Python | arhuaco/junkcode | /junk/euler/33.py | UTF-8 | 909 | 3.75 | 4 | [] | no_license | def DigitCancelling():
for num in range(10, 100):
for den in range(10, 100):
if num % 10 == 0 and den % 10 == 0:
continue # Trivial example.
if num / den >= 1.0:
continue # Restriction
num_first = num // 10
num_second = num % 10
den_first = den // 10
den_second = den % 10
if den_first != 0 and num_first == den_second and num_second / den_first == num / den:
print('{} / {} ?= {} / {}'.format(num, den, num_second, den_first))
if den_second != 0 and num_second == den_first and num_first / den_second == num / den:
print('{} / {} ?= {} / {}'.format(num, den, num_first, den_second))
DigitCancelling()
'''
Prints:
16 / 64 ?= 1 / 4
19 / 95 ?= 1 / 5
26 / 65 ?= 2 / 5
49 / 98 ?= 4 / 8
Answer: 8 / 800 == 1 / 100.
'''
| true |
ce504bdd9906d7688cfb17affccb189c876745df | Python | hack2024/movie_web_trailer | /app/app_model.py | UTF-8 | 625 | 3.328125 | 3 | [] | no_license | class Movie():
"""Class that represent the movie blueprint
Attributes:
title: the title of the movie
short_description: short description of a movie
poster_image_url: the poster image of the movie
trailer_youtube_url: the movie trailer of the movie
"""
def __init__(self, movie_data):
"""
Initialize the Movie instance
"""
self.title = str(movie_data["Title"]).upper()
self.short_description = movie_data["Plot"]
self.poster_image_url = movie_data["posterImage"]
self.trailer_youtube_url = movie_data["movieTrailer"]
| true |
bbf11575b392da3cede1f704691b13f5aad22643 | Python | camdeno/miniGolf | /Module/Main_MiniGolf.py | UTF-8 | 3,724 | 3.5 | 4 | [
"MIT"
] | permissive | # A simple mini golf game used to learn python
# Launch and Initialize
import pygame
import os
from pygame.constants import MOUSEBUTTONDOWN
from drawWindow import drawWindow
from getMouse import *
from Ball import Ball
pygame.init() # Initializes pygame
# Declare Constants
WIDTH = 800 # CONST that Sets the Width of the Window
HEIGHT = 600 # CONST that Sets the Height of the Window
FPS = 60 # CONST that Sets the Framerate of the Window
WHITE = (255,255,255) # CONST Color White
BLACK = (0,0,0) # CONST Color Black
RED = (255,0,0) # CONST Color Red
XPOS = 200 # CONST Starting XPOS for Ball
YPOS = 300 # CONST Starting YPOS for Ball
# Create Window and Main Game Loop
display = pygame.display.set_mode ((WIDTH,HEIGHT)) # Sets the display screen size to 800 x 600 -- Fullscreen can be set with 1920,1080
pygame.display.set_caption("Mini Golf") # Sets the display caption to the game name
pygame.display.update() # Pushes the change to the window
open = True # Sets open state
Clock = pygame.time.Clock() # Creates a clock to store framerate
#set ball's color, size, and pos
playerBall = Ball(XPOS,YPOS,WHITE) # Create playerBall
all_sprites_list = pygame.sprite.Group() # Create Sprite List
all_sprites_list.add(playerBall) # Add playerball to Sprite List
while open:
Clock.tick(FPS) # Sets the Framerate
for event in pygame.event.get(): # Checking for an event and storing all events in the event box
if event.type == pygame.QUIT: # If the X is pressed, Close the game -- This can also be used to handle events such as the close in the UI
open = False # Sets open state to False
elif event.type == pygame.MOUSEBUTTONDOWN:
# Check to see if the ball is under the mouse button
button = event.button
status = 0
down = clickPos
down = getMouse(button,playerBall.rect.x,playerBall.rect.y, status)
#isClicked = getMouse(button,playerBall.rect.x,playerBall.rect.y)
# If True, Draw Vector from where the mouse was to where it gets drug to
#if isClicked == True:
# playerBall.rect.x += 20 # Move the Ball if it gets touched.
# Set the velocity of the ball and the direction using Trig and a Set Velocity -- Call a function?
# If no, do nothing
elif event.type == pygame.MOUSEBUTTONUP:
button = event.button
status = 1
up = clickPos
up = getMouse(button,playerBall.rect.x,playerBall.rect.y, status)
#playerBall.rect.x += 1 # Moves the Rect
#print(playerBall.rect.x) # Prints X POS
drawWindow(display,BLACK,all_sprites_list) # Sets the window white
pygame.quit() # Quits pygame
quit() # Quits the module
| true |
6b394e0703c5caebb9998245a6be542f09a89501 | Python | JoshuaPedro/5E-Character | /CSM/character/models.py | UTF-8 | 11,955 | 2.875 | 3 | [] | no_license | """
These are the model definitions that deal with characters. The majority of the information that a user actually interacts
with is in these models or linked by these models.
"""
# Python Imports
import math
# Django Imports
from django.db import models
from django.utils.text import slugify
from gm2m import GM2MField
class IntegerMinMaxField(models.IntegerField):
"""
A field that only allows values between the specified minimum and maximum values.
"""
def __init__(self, min_value=None, max_value=None, *args, **kwargs):
self.min_value, self.max_value = min_value, max_value
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if (self.min_value is not None) and (self.max_value is not None):
kwargs['min_value'] = self.min_value
kwargs['max_value'] = self.max_value
return name, path, args, kwargs
def formfield(self, **kwargs):
defaults = {'min_value': self.min_value, 'max_value': self.max_value}
defaults.update(kwargs)
return super().formfield(**defaults)
class Character(models.Model):
"""
This is the information for a character.
***NOTE: This is the specific model a Member will be interacting with the most.***
"""
username = models.ForeignKey('accounts.Member', related_name='characters', editable=False)
accessed = models.DateTimeField(auto_now=True,)
# Flair
char_name = models.CharField(max_length=1024, blank=True, null=True,)
description = models.TextField(blank=True, null=True,)
portrait = models.ImageField(blank=True, null=True,)
char_age = models.SmallIntegerField(blank=True, null=True,)
char_height = models.SmallIntegerField(blank=True, null=True,)
char_weight = models.SmallIntegerField(blank=True, null=True,)
char_skin_color = models.CharField(max_length=128, blank=True, null=True,)
char_hair_color = models.CharField(max_length=128, blank=True, null=True,)
char_eye_color = models.CharField(max_length=128, blank=True, null=True,)
personality = models.TextField(blank=True, null=True,)
ideals = models.TextField(blank=True, null=True,)
bonds = models.TextField(blank=True, null=True,)
flaws = models.TextField(blank=True, null=True,)
allies = models.CharField(max_length=512, blank=True, null=True,)
organizations = models.CharField(max_length=512, blank=True, null=True,)
# General traits such as languages.
char_traits = GM2MField()
# Basics
char_classes = models.ManyToManyField('rules.Class', related_name='character_classes', through='ClassLevel', blank=True,)
char_prestige_classes = models.ManyToManyField('rules.PrestigeClass', related_name='character_prestiges', blank=True,)
char_race = models.ForeignKey('rules.Race', related_name='character_races', blank=True, null=True,)
char_subrace = models.ForeignKey('rules.Subrace', related_name='character_subraces', blank=True, null=True)
char_background = models.ForeignKey('rules.Background', related_name='character_backgrounds', blank=True, null=True)
alignment = models.ForeignKey('rules.Alignment', related_name='character_alignments', blank=True, null=True,)
char_xp = models.IntegerField(default=0, blank=True, null=True,)
# Ability Scores
STR_score = IntegerMinMaxField(min_value=1, max_value=20, blank=True, null=True,)
DEX_score = IntegerMinMaxField(min_value=1, max_value=20, blank=True, null=True,)
CON_score = IntegerMinMaxField(min_value=1, max_value=20, blank=True, null=True,)
INT_score = IntegerMinMaxField(min_value=1, max_value=20, blank=True, null=True,)
WIS_score = IntegerMinMaxField(min_value=1, max_value=20, blank=True, null=True,)
CHA_score = IntegerMinMaxField(min_value=1, max_value=20, blank=True, null=True,)
# Saving Throws
STR_saving_throw = models.BooleanField(default=False)
DEX_saving_throw = models.BooleanField(default=False)
CON_saving_throw = models.BooleanField(default=False)
INT_saving_throw = models.BooleanField(default=False)
WIS_saving_throw = models.BooleanField(default=False)
CHA_saving_throw = models.BooleanField(default=False)
# Actions >> May not need to use if just pulling through races and etc.
features = models.ManyToManyField('rules.Feature', related_name='character_features', blank=True,)
# Combat
conditions = models.ManyToManyField('rules.Condition', related_name='character_conditions', blank=True,)
death_fails = models.SmallIntegerField(default=0)
death_successes = models.SmallIntegerField(default=0)
max_health = models.SmallIntegerField(default=0)
current_health = models.SmallIntegerField(default=0)
temp_addtl_hp = models.SmallIntegerField(default=0)
hit_dice_current = models.SmallIntegerField(default=1)
speed = models.SmallIntegerField(default=30)
inspiration = models.SmallIntegerField(blank=True, null=True,)
# Spells
spell_casting = models.BooleanField(default=False)
spell_book = models.ManyToManyField('spells.Spell', related_name='character_spells', through='SpellsReady', blank=True,)
spell_slots_1_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_2_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_3_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_4_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_5_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_6_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_7_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_8_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_9_current = models.SmallIntegerField(blank=True, null=True,)
spell_slots_1_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_2_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_3_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_4_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_5_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_6_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_7_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_8_maximum = models.SmallIntegerField(blank=True, null=True, )
spell_slots_9_maximum = models.SmallIntegerField(blank=True, null=True, )
# Special Point Tracking (Rage, Inspiration, Etc.): # TODO: Add field to check which feature they have for points and base tracking off of that.
has_point_tracking = models.BooleanField(default=False)
max_points = models.SmallIntegerField(blank=True, null=True)
current_points = models.SmallIntegerField(blank=True, null=True)
# Inventory
tools_inv = models.ManyToManyField('equipment.Tool', related_name='character_tools_inv', blank=True,)
items_inv = models.ManyToManyField('equipment.Item', related_name='character_items_inv', blank=True,)
armor_inv = models.ManyToManyField('equipment.Armor', related_name='character_armor_inv', blank=True,)
weapons_inv = models.ManyToManyField('equipment.Weapon', related_name='character_weapons_inv', blank=True,)
char_copper = models.IntegerField(blank=True, null=True)
char_silver = models.IntegerField(blank=True, null=True)
char_gold = models.IntegerField(blank=True, null=True)
char_platinum = models.IntegerField(blank=True, null=True)
slug = models.SlugField(editable=False, blank=True, null=False)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.char_name)
super().save(*args, **kwargs)
def get_prof_bonus(self):
"""
Gets the proficiency bonus for a character based on their character level.
:return: int()
"""
return int(math.ceil(self.get_char_level() / 4) + 1)
def get_ability_bonus(self, ability):
"""
Gets the bonus for a given ability score.
:return: int()
"""
score_conversion = {
'STR': self.STR_score,
'DEX': self.DEX_score,
'CON': self.CON_score,
'INT': self.INT_score,
'WIS': self.WIS_score,
'CHA': self.CHA_score,
}
return (score_conversion[ability] - 10) // 2
def get_passive_score(self, ability):
"""
Gets the passive check for a given ability score.
:return: int()
"""
return self.get_ability_bonus(ability) + 10
def get_char_level(self):
"""
Adds all class levels to get the character level.
:return: an int()
"""
class_levels = self.classlevels.all()
level = 0
for class_level in class_levels:
level += class_level.class_level
return level
def get_saving_throw_bonus(self, ability):
"""
Checks if character is proficient in saving throw and returns amount of bonus either way.
:return: int()
"""
bonus = 0
score_conversion = {
'STR': self.STR_saving_throw,
'DEX': self.DEX_saving_throw,
'CON': self.CON_saving_throw,
'INT': self.INT_saving_throw,
'WIS': self.WIS_saving_throw,
'CHA': self.CHA_saving_throw,
}
if score_conversion[ability]:
bonus += self.get_prof_bonus()
bonus += self.get_ability_bonus(ability)
else:
bonus += self.get_ability_bonus(ability)
return bonus
def get_initiative_bonus(self):
"""
Returns the total initiative bonus for a character.
:return: int()
"""
from rules.models import Feature
initiative = 0
alert = Feature.objects.get(name__iexact='Alert')
if alert in self.features.all():
initiative += 4 + self.get_ability_bonus('DEX')
else:
initiative += self.get_ability_bonus('DEX')
return initiative
def get_armor_class(self):
"""
Returns the total armor class for a character.
:return: int()
"""
armors = self.armor_inv.all()
armor_class = 0
if len(armors) > 0:
for armor in armors:
armor_class += armor.base_armor_class
if armor.dexterity_modifier is True and armor.dexterity_modifier_max == -1:
armor_class += self.get_ability_bonus('DEX')
elif armor.dexterity_modifier == True:
if self.get_ability_bonus('DEX') >= 2:
armor_class += 2
else:
armor_class += self.get_ability_bonus('DEX')
else:
armor_class += 10 + self.get_ability_bonus('DEX')
return armor_class
def __str__(self):
return self.char_name
class ClassLevel(models.Model):
"""Through table for class levels for a character."""
character = models.ForeignKey('Character', related_name='classlevels')
char_class = models.ForeignKey('rules.Class', related_name='classlevels')
class_level = IntegerMinMaxField(min_value=1, max_value=20)
def __str__(self):
return self.character.char_name
class SpellsReady(models.Model):
"""Through table to determine if a spell is just known, or known and ready."""
character = models.ForeignKey('Character', related_name='spellsready')
spells = models.ForeignKey('spells.Spell', related_name='spellsready')
spell_ready = models.BooleanField(default=False)
def __str__(self):
return self.spells.name
class Meta:
verbose_name = "Spell Ready"
verbose_name_plural = "Spells Ready"
| true |
b1b1bbabbc68d0ff14b30e62a8cd0d070aa0ab46 | Python | isabella232/moxie | /moxie/config.py | UTF-8 | 4,914 | 2.78125 | 3 | [
"MIT"
] | permissive | import yaml
import os
import logging
from contextlib import closing
from ordereddict import OrderedDict
from .route import Route
from .group import Group
def generate_address_from_index(index):
# we add 2 because:
# - 127.0.0.0 isn't a valid address
# - 127.0.0.1 is already taken
if 1 < index + 2 < 255:
return "127.0.0.{0}".format(index + 2)
else:
raise IndexError
class Config(object):
@classmethod
def load(cls, filename):
# generate blank config if file does not exist
if not os.path.exists(filename):
return Config()
with closing(open(filename, 'r')) as fp:
data = yaml.load(fp) or {}
defaults = data.get('defaults', {})
default_proxy = defaults.get('proxy')
default_ports = defaults.get('ports', [])
routes = []
for index, route in enumerate(data.get('routes', [])):
route = Route(
destination=route.get('destination'),
local_address=generate_address_from_index(index),
ports=route.get('ports', default_ports),
proxy=route.get('proxy', default_proxy),
)
if route.is_valid():
routes.append(route)
else:
logging.warn('Invalid route: destination=%s, ports=%s, proxy=%s' % (str(route.destination), str(route.ports), str(route.proxy)))
groups = {}
for index, group in enumerate(data.get('groups', [])):
if not group.get('name') in groups:
groups[group.get('name')] = group.get('destinations', [])
return Config(routes, groups, default_proxy, default_ports)
def __init__(self, routes=None, groups=None, default_proxy=None, default_ports=None):
routes = routes or []
groups = groups or {}
self.routes_by_destination = OrderedDict()
for route in routes:
self.routes_by_destination[route.destination] = route
self.groups_by_name = OrderedDict()
for name, destinations in groups.iteritems():
group = Group(
name=name,
routes=[self.routes_by_destination[destination] for destination in destinations]
)
if (group.is_valid()):
self.groups_by_name[group.name] = group
self.default_proxy = default_proxy
self.default_ports = default_ports
@property
def routes(self):
return self.routes_by_destination.values()
def add_route(self, destination, ports=None, proxy=None):
index = len(self.routes_by_destination)
if destination in self.routes_by_destination:
route = self.routes_by_destination[destination]
route.ports = list(set(route.ports).union(ports or []))
if proxy:
route.proxy = proxy
return route.is_valid()
# otherwise create
route = Route(
local_address=generate_address_from_index(index),
destination=destination,
ports=ports or self.default_ports,
proxy=proxy or self.default_proxy
)
if route.is_valid():
self.routes_by_destination[destination] = route
return True
else:
return False
def remove_route(self, destination):
if destination in self.routes_by_destination:
self.routes_by_destination[destination].stop()
del self.routes_by_destination[destination]
return True
return False
@property
def groups(self):
return self.groups_by_name.values()
def create_or_update_group(self, name, destinations):
if name not in self.groups_by_name:
self.groups_by_name[name] = Group(name)
routes = self.groups_by_name[name].routes
for destination in destinations:
if destination in self.routes_by_destination:
routes.append(self.routes_by_destination[destination])
return True
def remove_group(self, name):
if name in self.groups_by_name:
del self.groups_by_name[name]
return True
return False
def __getstate__(self):
output = {
'routes': [route.__getstate__() for route in self.routes],
'groups': [group.__getstate__() for group in self.groups]
}
defaults = {}
if self.default_proxy:
defaults['proxy'] = self.default_proxy
if self.default_ports:
defaults['ports'] = self.default_ports
if defaults:
output['defaults'] = defaults
return output
def save(self, filename):
with closing(open(filename, 'w')) as fp:
fp.write(yaml.dump(self.__getstate__()))
| true |
20af3517c0fcb0ac01b87d6fe1cc181becfc3b41 | Python | Muhammad-Ahmad-AI/Library-Management-System-in-Python | /Search_Student.py | UTF-8 | 4,396 | 2.671875 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk,Image
import os,glob
import mysql.connector
from mysql.connector import Error
class Search(Tk):
def __init__(self):
super().__init__()
f = StringVar()
g = StringVar()
self.title("Search Student")
self.maxsize(800,520)
self.canvas = Canvas(width=1366, height=768, bg='gray')
self.canvas.pack()
self.iconbitmap(r'libico.ico')
l1=Label(self,text="Search Student",bg='gray', font=("Courier new",20,'bold')).place(x=290,y=40)
l = Label(self, text="Search By",bg='gray', font=("Courier new", 15, 'bold')).place(x=180, y=100)
def insert(data):
self.listTree.delete(*self.listTree.get_children())
for row in data:
self.listTree.insert("","end",text = row[0], values = (row[1],row[2],row[3]))
def ge():
if (len(self.entry.get())) == 0:
messagebox.showinfo('Error', 'First select a item')
elif (len(self.combo.get())) == 0:
messagebox.showinfo('Error', 'Enter the '+self.combo.get())
elif self.combo.get() == 'Name':
try:
self.conn = mysql.connector.connect(host='localhost',
database='library',
user='root',
password='')
self.mycursor = self.conn.cursor()
name = self.entry.get()
self.mycursor.execute("Select * from student where name like %s",['%'+name+'%'])
pc = self.mycursor.fetchall()
if pc:
insert(pc)
else:
messagebox.showinfo("Oop's","Name not found")
except Error:
messagebox.showerror("Error", "Something goes wrong")
elif self.combo.get() == 'ID':
try:
self.conn = mysql.connector.connect(host='localhost',
database='library',
user='root',
password='')
self.mycursor = self.conn.cursor()
id = self.entry.get()
self.mycursor.execute("Select * from student where stud_id like %s", ['%' + id + '%'])
pc = self.mycursor.fetchall()
if pc:
insert(pc)
else:
messagebox.showinfo("Oop's", "Id not found")
except Error:
messagebox.showerror("Error", "Something goes wrong")
self.b= Button(self,text="Find",width=8,font=("Courier new",8,'bold'),command= ge )
self.b.place(x=400,y=170)
self.combo=ttk.Combobox(self,textvariable=g,values=["Name","ID"],width=40,state="readonly")
self.combo.place(x = 310, y = 105)
self.entry = Entry(self,textvariable=f,width=43)
self.entry.place(x=310,y=145)
self.la = Label(self, text="Enter",bg = 'gray', font=("Courier new", 15, 'bold')).place(x=180, y=140)
def handle(event):
if self.listTree.identify_region(event.x,event.y) == "separator":
return "break"
self.listTree = ttk.Treeview(self, height=13,columns=('Student Name', 'Phone Number', 'Address'))
self.vsb = ttk.Scrollbar(self,orient="vertical",command=self.listTree.yview)
self.listTree.configure(yscrollcommand=self.vsb.set)
self.listTree.heading("#0", text='Student ID', anchor='w')
self.listTree.column("#0", width=100, anchor='w')
self.listTree.heading("Student Name", text='Student Name')
self.listTree.column("Student Name", width=200, anchor='center')
self.listTree.heading("Phone Number", text='Phone Number')
self.listTree.column("Phone Number", width=200, anchor='center')
self.listTree.heading("Address", text='Address')
self.listTree.column("Address", width=200, anchor='center')
self.listTree.place(x=40, y=200)
self.vsb.place(x=743,y=200,height=287)
ttk.Style().configure("Treeview", font=('Times new Roman', 15))
Search().mainloop() | true |
d9d19bf76f045faa4e47ca4973fdab676771e04c | Python | monstertruck/ballnerds | /posts/andones/utils/plays.py | UTF-8 | 4,296 | 2.921875 | 3 | [] | no_license | import pandas as pd
import numpy as np
def get_and_ones(game_js: dict, pickteam: str) -> pd.DataFrame:
'''
For a given set of game_urls, return a dataframe with the collection of
and-one plays during those games.
'''
# A game is a list of plays
play_list = game_js['gameplaybyplay']['plays']['play']
# Currently only interested in plays that are shots or fouls
shot_or_foul = [play for play in play_list if (['fieldGoalAttempt', 'freeThrowAttempt', 'foul'] & play.keys())]
# 'flattening' the dict format to import into dataframe
for play in shot_or_foul:
fg_att = play.pop('fieldGoalAttempt', None)
if fg_att:
play = process_fg_att(play, fg_att, pickteam)
ft_att = play.pop('freeThrowAttempt', None)
if ft_att:
play = process_ft_att(play, ft_att, pickteam)
foul = play.pop('foul', None)
if foul:
# technical fouls cause problems (since they can be assigned to bench)
if foul['foulType'] == 'S.FOUL':
play = process_foul(play, foul)
play_df = pd.DataFrame(shot_or_foul)
# Cast ID variables as floats (since they have NaNs) so we can use them in our groupby
for id_var in [x for x in play_df.columns if 'ID' in x]:
play_df[id_var] = play_df[id_var].astype(float)
# Time is split into quarters - translate to one "seconds since beginning" measure
time_split = play_df['time'].str.split(':')
play_df['secs'] = (play_df['quarter'].astype(int) - 1)*720 + time_split.map(lambda x: x[0]).astype(int)*60 + time_split.map(lambda x: x[1]).astype(int)
# collect and-one candidates
plays_by_second = play_df.groupby('secs', as_index=False).sum()
and_one_plays = plays_by_second.loc[(plays_by_second['fg_made'] == 1) & (plays_by_second['ft_made'] == 1) & (plays_by_second['fl_type'] == 1) ]
and_one_plays = and_one_plays.assign( teammate = (and_one_plays['shooter_ID'] != and_one_plays['ft_shooter_ID']).astype(int))
return and_one_plays
def process_fg_att(play: dict, fg_att: dict, pickteam: str) -> dict:
'''
Process the field goal attempts.
'''
play['fg_val'] = fg_att['Points']
play['fg_made'] = int(fg_att['outcome'] == "SCORED")
play['fg_type'] = fg_att['shotType']
try:
loc_x = int(fg_att['shotLocation']['x'])
loc_y = int(fg_att['shotLocation']['y'])
# we have to translate the coordinates to plot them correctly
play['fg_loc_x_original'] = loc_x
play['fg_loc_y_original'] = loc_y
play['fg_loc_x'] = loc_x*(loc_x < 470) + (940 - loc_x)*(loc_x >= 470) - 470
play['fg_loc_y'] = loc_y - 250
except:
# occastionally locations are nonsense
play['fg_loc_x'] = np.NaN
play['fg_loc_y'] = np.NaN
if type(pickteam) is str:
play['fg_team'] = int(fg_att['teamAbbreviation'].upper() == pickteam.upper())
# get information on the shooter
for key in fg_att['shootingPlayer']:
play['shooter_' + key] = fg_att['shootingPlayer'][key]
return play
def process_ft_att(play:dict, ft_att: dict, pickteam: str) -> dict:
'''
Process the plays that are free throw attempts.
'''
play['ft_att'] = ft_att['totalAttempts']
play['ft_made'] = int(ft_att['outcome'] == "SCORED")
if type(pickteam) is str:
play['ft_team'] = int(ft_att['teamAbbreviation'].upper() == pickteam.upper())
# get information on the shooter
for key in ft_att['shootingPlayer']:
play['ft_shooter_' + key] = ft_att['shootingPlayer'][key]
return play
def process_foul(play: dict, foul: dict) -> dict:
'''
'''
play['fl_fouling_team'] = foul['teamAbbreviation']
play['fl_type'] = int(foul['foulType'] == 'S.FOUL')
if foul['isPersonal'] == 'true':
for key in foul['drawnByPlayer']:
play['fouled_' + key] = foul['drawnByPlayer'][key]
for key in foul['penalizedPlayer']:
play['fouler_' + key] = foul['penalizedPlayer'][key]
try:
play['fl_loc_x_original'] = int(foul['foulLocation']['x'])
play['fl_loc_y_original'] = int(foul['foulLocation']['y'])
except:
play['fl_loc_x'] = np.NaN
play['fl_loc_y'] = np.NaN
return play
| true |
01be46f6dbb28fcff9201353099a48d650b9fcc7 | Python | bradymadden97/ncaamarchmadness | /scrape_tournament_rankings.py | UTF-8 | 1,780 | 3.0625 | 3 | [] | no_license | # Brady Madden
# Scraping Sports-Reference.com to find NCAA Men's Basketball Tournament rankings for each team
import urllib.request
import csv
from bs4 import BeautifulSoup
years = [2016, 2015, 2014, 2013, 2012, 2011]
for year in years:
# Write current tournment year in line before the teams/seeds
with open('ncaa_tournament_data2.csv', 'a', newline="") as f:
writer = csv.writer(f, delimiter=",")
writer.writerow([year])
link = str("http://www.sports-reference.com/cbb/postseason/" + str(year) + "-ncaa.html")
page = urllib.request.urlopen(link)
soup = BeautifulSoup(page, "html.parser")
# Determine names of four regions
brackets = soup.find('div', {'class': 'switcher filter'}).find_all('div')
i = 0
teams = []
while i < 4:
teams.append(brackets[i].a.string.lower())
i += 1
# Place each region's matchups into a seperate bracket variable
bONE = soup.find('div', {'id': teams[0]}).div.div.find_all('div')
bTWO = soup.find('div', {'id': teams[1]}).div.div.find_all('div')
bTHREE = soup.find('div', {'id': teams[2]}).div.div.find_all('div')
bFOUR = soup.find('div', {'id': teams[3]}).div.div.find_all('div')
quad = [bONE, bTWO, bTHREE, bFOUR]
# For each region loop through each matchup and record the team name and seed to CSV
for region in quad:
for matchup in region:
t = matchup.find_all('div')
for each in t:
seed = each.span.string
team = each.a['href'][13:][:-10].replace("-", " ").title()
with open('ncaa_tournament_data2.csv', 'a', newline="") as f:
writer = csv.writer(f, delimiter=",")
writer.writerow([str(team), seed])
print(year, "done")
| true |
2eac5eead4cdfa947c7c9801ca00b0c3ed576cdb | Python | npd15393/Gridworld-Q-Learning | /main.py | UTF-8 | 435 | 2.734375 | 3 | [] | no_license | import time
from env import Env
from agent import Agent
# Init
env=Env((5,6))
a1=Agent(env)
env.reset()
step_cnt=0
# Train
for ep in range(1000):
exp=env.step(a1)
step_cnt=step_cnt+1
a1.update(exp)
if step_cnt==10 or exp[-1]==True:
env.reset()
step_cnt=0
print("Episode: {}".format(ep))
# Test run trained policy
env.reset()
env.setTesting()
isDone=False
while not isDone:
_,_,_,_,isDone=env.step(a1)
time.sleep(0.3)
| true |
6ea31c0c6057520327e8c45d9bb5199e18c05781 | Python | vaibhavkrishna-bhosle/DataCamp-Data_Scientist_with_python | /26-Cluster Analysis in Python /K-Means Clustering /Elbow method on uniform data.py | UTF-8 | 1,277 | 3.828125 | 4 | [] | no_license | '''In the earlier exercise, you constructed an elbow plot on data with well-defined clusters. Let us now see how the elbow plot looks on a data set with uniformly distributed points. You may want to display the data points on the console before proceeding with the exercise.
The data is stored in a Pandas data frame, uniform_data. x_scaled and y_scaled are the column names of the standardized X and Y coordinates of points.
'''
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster.vq import kmeans
uniform_data = pd.read_csv('/Users/vaibhav/Desktop/Python Projects/DataCamp-Data Scientist with python/26-Cluster Analysis in Python /K-Means Clustering /uniform_data.csv')
distortions = []
num_clusters = range(2, 7)
# Create a list of distortions from the kmeans function
for i in num_clusters:
cluster_centers, distortion = kmeans(uniform_data[['x_scaled', 'y_scaled']], i)
distortions.append(distortion)
# Create a data frame with two lists - number of clusters and distortions
elbow_plot = pd.DataFrame({'num_clusters': num_clusters, 'distortions': distortions})
# Creat a line plot of num_clusters and distortions
sns.lineplot(x='num_clusters', y='distortions', data = elbow_plot)
plt.xticks(num_clusters)
plt.show() | true |
1dc95fb30666f85e418fbc6751896e66967612de | Python | Avinash10000/SARBnet | /SARBnet_2.py | UTF-8 | 4,903 | 3.078125 | 3 | [] | no_license |
'''Open In Colab
Step 2: Running The CNN on the Raspberry Pi4B
Requirements
Run the list below and solve any dependency errors using sudo apt-get {module}'''
'''Constants'''
#These are the constant values needed for the whole code to function.
#name the temporary image path
TEMP_IMG_PATH = "img.jpg"
#directory to store images that have been classified
STORE_DIRECTORY = "Classified_Images"
#input dimensions for the images
WIDTH = 64
HEIGHT = 64
CHANNELS=3
#these are the list of classes that we have
CLASS_LIST = ['cardboard', 'compost', 'glass', 'metal', 'paper', 'plastic', 'trash']
''' need to change the next three'''
COMPOST_LIST = ['compost']
TRASH_LIST = ['trash']
CARDBOARD_LIST = ['cardboard']
GLASS_LIST = ['glass']
METAL_LIST = ['metal']
PAPER_LIST = ['paper']
PLASTIC_LIST = ['plastic']
'''Taking the Picture and PiCamera/PIR Sensor/Servo Motor Code'''
from time import sleep
import cv2
import os
import tensorflow as tf
import keras
from keras.models import load_model
from glob import glob
# Import libraries
import RPi.GPIO as GPIO
import time
import numpy as np
import io
from picamera.array import PiRGBArray
from picamera import PiCamera
from PIL import Image as Img
''' need to add PIR part'''
import picamera
import picamera.array
camera = PiCamera()
stream=PiRGBArray(camera)
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(11,GPIO.IN)
num=1
#load the pretrained model
model = keras.models.load_model("trained_model.h5")
def scale_X(X):
return X/255.0
#preprocess the image given to be classified
def process_single_img(image):
# img = cv2.imread(TEMP_IMG_PATH)
#resize and normalize images
if CHANNELS==1:
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = cv2.resize(image, (WIDTH, HEIGHT))
image = scale_X(image)
image = image.reshape(WIDTH,HEIGHT,CHANNELS)
return image
#predicts a single image given the numpy array of the image
def predict_single_img(image):
#preprocesss the image
processed_img = process_single_img(image)
#predict the image using the preloaded model
prediction = model.predict(np.array([processed_img]))
pred = np.argmax(prediction)
print(pred)
#match the numerica predicted class to the name
pred_class = CLASS_LIST[pred]
print(pred_class)
#sort into trash, recycling or compost
waste_type = "trash"
if pred_class in COMPOST_LIST:
waste_type = "compost"
elif pred_class in METAL_LIST:
waste_type= "metal"
elif pred_class in CARDBOARD_LIST:
waste_type = 'cardboard'
elif pred_class in GLASS_LIST:
waste_type = 'glass'
elif pred_class in PAPER_LIST:
waste_type = 'paper'
elif pred_class in PLASTIC_LIST:
waste_type = 'plastic'
return (waste_type , pred_class)
#store the specific given waste type in the appropriate folder with
#an enumerated name
def store_in_folder(waste_type):
parent_dir = STORE_DIRECTORY+"/"+waste_type+"/"
num = len(glob(parent_dir+"*.jpg"))
print("current num images:",num)
os.rename(TEMP_IMG_PATH, parent_dir+waste_type +str(num+1)+".jpg")
def pir_sensor():
while GPIO.input(11)==0:
time.sleep(0.5)
else:
camera.capture(stream, format="bgr")
image = stream.array
cv2.imwrite(TEMP_IMG_PATH,image)
sleep(1)
camera.stop_preview()
stream.truncate(0)
predict_single_img(image)
return (image, waste_type)
pir_sensor()
# Set GPIO numbering mode
GPIO.setmode(GPIO.BOARD)
# Set pin 12 as an output, and define as servo1 as PWM pin
GPIO.setup(12,GPIO.OUT)
servo1 = GPIO.PWM(12,50) # pin 11 for servo1, pulse 50Hz
# Start PWM running, with value of 0 (pulse off)
servo1.start(0)
def cleaning_things_at_the_end():
#Clean things up at the end
servo1.stop()
GPIO.cleanup()
print("Goodbye!")
# defining the movement (90 degrees clockwise)
# time statment can be anything
# ChangeDutyCycle = 0 removes jitters by servo motor
# adding 2 is necessary
def moving_item_to_left():
servo1.ChangeDutyCycle(12)
time.sleep(0.5)
servo1.ChangeDutyCycle(0)
time.sleep(3.5)
servo1.ChangeDutyCycle(2)
time.sleep(0.5)
servo1.ChangeDutyCycle(0)
'''defining the movement (90 degrees anti-clockwise)'''
# time statment can be anything
# ChangeDutyCycle = 0 removes jitters by servo motor
# adding 2 is necessary
def moving_item_to_right():
servo1.ChangeDutyCycle(12)
time.sleep(0.5)
servo1.ChangeDutyCycle(0)
time.sleep(0.5)
servo1.ChangeDutyCycle(2)
time.sleep(0.5)
servo1.ChangeDutyCycle(0)
if pred_class == 'cardboard' or 'trash' or 'glass' or 'paper' or 'plastic':
moving_item_to_left()
cleaning_things_at_the_end()
elif pred_class == 'compost' or 'trash':
moving_item_to_right()
cleaning_things_at_the_end()
| true |
db5a648c83c388392bf299a30b22666d62de1ab0 | Python | 4bhis1/Kate | /Senoritta 2.0.py | UTF-8 | 6,674 | 2.5625 | 3 | [] | no_license | import pyttsx3
import datetime
import socket
import win32api,win32con
import speech_recognition as sr
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
def wikipedia_search(nkb):
try:
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
except:
speak("Bad request. Error 401")
def is_connected():
try:
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection(("www.google.com", 80))
speak("Connected to internet.")
global internet
internet = True
except OSError:
speak("Sorry sir system is offline.")
k=0
def news(urlo):
news=requests.get(urlo).text
news=json.loads(news)
#print(news["articles"])
#print(news)
speak('fetching the news from server')
print('fetching the news from server')
arts=news['articles']
for arti in arts:
print(arti["title"])
#tranlation=translator.translate(arti["title"])
#print(tranlation)
speak(arti["title"])
time.sleep(2)
speak("Over")
print("khtm")
def play_music():
music_dir = 'E:\\Music'
songs = os.listdir(music_dir)
os.startfile(os.path.join(music_dir, songs[random.randint(0,51)]))
llopk=False
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please")
#speak("Say that again please...")
return "None"
return query
if __name__ == "__main__":
while True:
jklm = takeCommand().lower()
if 'senorita' in jklm and 'on' in jklm:
#print("i am on");
llopk=True
wishMe()
speak("Checking intenet connection")
internet = False
is_connected()
speak("Connecting to server sir. Wait for a while")
from win32com.client import Dispatch
#from googlesearch import search
import requests
import json
import time
import speech_recognition as sr
from datetime import date as dt
from datetime import datetime as ddt
import wikipedia
import webbrowser
import os
import smtplib
import random
speak("Conection established.")
while llopk:
query = takeCommand().lower()
query=query.replace("senoritta","")
if 'wikipedia' in query:
query = query.replace("search", "")
query = query.replace("wikipedia", "")
wikipedia_search(query)
elif 'play music'in query:
play_music()
k=1
elif 'next' in query and k==1:
play_music()
elif 'time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'date' in query:
today=dt.today()
speak(f"todays date is {today}")
elif 'day' in query:
gk=ddt.today().strftime("%A")
speak(f"todays date is {gk}")
elif 'speak' in query:
query = query.replace("speak", "")
speak(query)
elif 'write' in query and 'line' in query:
speak("Working on this project.. It will take some time")
elif 'money' in query and 'write' in query:
speak("Working on this project... It will take some time")
elif 'spell' in query:
query=query.replace("spell","")
kl=""
for i in range(0,len(query)) :
kl=kl+query[i]+" "
speak(kl)
print(kl)
elif 'news' in query:
try:
if 'sports' in query:
url="https://newsapi.org/v2/top-headlines?country=in&category=sports&apiKey=d841d6dce86044d6bda182f600f91e73"
elif "techno" in query:
url="https://newsapi.org/v2/top-headlines?country=in&category=technology&apiKey=d841d6dce86044d6bda182f600f91e73"
elif "science" in query:
url="https://newsapi.org/v2/top-headlines?country=in&category=science&apiKey=d841d6dce86044d6bda182f600f91e73"
else:
url="https://newsapi.org/v2/top-headlines?sources=google-news-in&apiKey=d841d6dce86044d6bda182f600f91e73"
news(url)
except OSError:
speak("Sorry sir system is offline.")
elif 'close' in query:
break
elif query!='':
speak('should i search it on wikipedia sir?' )
bkl=takeCommand().lower()
if 'yes' in bkl:
if internet:
wikipedia_search(bkl)
else:
speak("System is offline")
#else:
#speak('not programmed to do this work, building up myself')
elif "over" in jklm:
os.system("shutdown /s /t 1")
| true |
89cadc7544329c3983ef6c660190e41398a58edc | Python | leungwingkwan/Statistics-Python-Programs | /Example_9.3.3.py | UTF-8 | 582 | 3.234375 | 3 | [] | no_license | import numpy as np
x = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190]
y = [45, 51, 54, 61, 66, 70, 74, 78, 85, 89]
x_mean = np.mean(x)
y_mean = np.mean(y)
S_xx = 0
for xi in x:
S_xx = S_xx + (xi - x_mean) ** 2
print('S_xx = ', S_xx)
S_yy = 0
for yi in y:
S_yy = S_yy + (yi - y_mean) ** 2
print('S_yy = ', S_yy)
S_xy = 0
for i in range(len(x)):
S_xy = S_xy + (x[i] - x_mean)*(y[i] - y_mean)
print('S_xy = ', S_xy)
b = S_xy / S_xx
Q_e = S_yy - b * S_xy
n = len(x)
sigma_sqr = Q_e / (n - 2)
print('sigma_sqr =', sigma_sqr)
| true |
401d7527d2d2dc51d7d054c30890f8ff38f7857f | Python | CoREse/PocaCNV | /rmlt | UTF-8 | 594 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python3
import subprocess
TestDirRoot="test"
run=subprocess.Popen(["ls",TestDirRoot],bufsize=1,stdout=subprocess.PIPE,universal_newlines=True)
Runs=[]
while True:
line=run.stdout.readline()
if line=="":
break
if len(line)<7:
continue
if line[:7]=="TestRun":
Runs.append(line.strip())
run.wait()
Runs.sort()
ToBeRemoved=TestDirRoot+"/"+Runs[-1]
print("%s is going to be removed?[y]/n"%(ToBeRemoved))
Ans=input()
if (Ans=="" or Ans.upper()=="Y"):
subprocess.Popen(["rm","-r",ToBeRemoved]).wait()
print("%s is removed."%(ToBeRemoved))
| true |
3c4e9f2c3588e0360de3c0da18fc2b6c327e7866 | Python | HageChaduke/PythonWebServer | /CGI-BIN/RssReader/RssReader1.py | UTF-8 | 814 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python3
# conding: utf-8
from RssParser import parse_rss
from HttpHandler import Request, Response
import cgitb; cgitb.enable() # (1)
form_body = u"""
<form method="POST" action="/cgi-bin/RssReader/RssReader1.py">
RSSのURL:
<input type="text" size="40" name="url" value="%s"/>
<input type="submit" />
</form>"""
rss_parts = u"""
<h3><a href="%(link)s">%(title)s</a></h3>
<p>%(description)s</p>
"""
content = u"URLを入力してください"
req = Request()
if 'url' in req.form:
try:
rss_list = parse_rss(req.form['url'].value)
content = ""
for d in rss_list:
content += rss_parts % d
except:
pass
res = Response()
body = form_body % req.form.getvalue('url', '')
body += content
res.set_body(Response.get_htmltemplate() % body)
print(res)
| true |
94699813c35b8d86b3e0ba3718d1644c72a161c2 | Python | mottaquikarim/pydev-psets | /pset_challenging_ext/exercises/solutions/p53.py | UTF-8 | 565 | 4.4375 | 4 | [
"MIT"
] | permissive | """
Define a class named Rectangle which can be constructed by a length and width. The Rectangle class has a method which can compute the area.
"""
"""Define a class named Rectangle which can be constructed by a length and width. The Rectangle class has a method which can compute the area.
Hints:
Use def methodName(self) to define a method.
"""
class Rectangle(object):
def __init__(self, l, w):
self.length = l
self.width = w
def area(self):
return self.length*self.width
aRectangle = Rectangle(2,10)
print aRectangle.area()
| true |
ffed6bed02fc9eb01ddea8bdd991785bffabb049 | Python | Digital-Biobank/covid_variant_severity | /randomize.py | UTF-8 | 472 | 2.875 | 3 | [] | no_license | import pandas as pd
# %% Read in data random rows
df = pd.read_parquet("data/00_77142-vcf_wide.parquet")
# %% Randomize rows
random_rows = df.sample(
frac=1,
random_state=42,
axis="index"
).set_axis(df.index, axis="index")
# %% Randomize columns
random_cols = random_rows.sample(
frac=1,
random_state=42,
axis="columns"
).set_axis(df.columns, axis="columns")
# %% Save randomized data
random_cols.to_parquet("00_77142-vcf_wide_random.parquet")
| true |
edec3c1c9f569b43cee617d37a2ec219bf86f26e | Python | iuga-paula/Programarea-algorimilor | /lab4_2/my_module.py | UTF-8 | 316 | 3.140625 | 3 | [] | no_license | def citeste(n, v):
n = int(input("dati n= "))
s = input("dati lista= ")
for x in s.split():
v.append(int(x))
def afisare(v):
for x in v:
print(x, end=" ")
def valpoz(v):
poz = [x for x in v if x > 0]
return poz
def semn(v):
for x in v:
v = [-x for x in v]
| true |
2dfbcaacb9026ad389e1b072f28bc5151e127f84 | Python | jacksonpradolima/travistorrent-tools | /bin/travis_plot_repo_analysis.py | UTF-8 | 2,000 | 2.859375 | 3 | [] | no_license | import argparse
import json
import math
import matplotlib.pyplot as plt
import numpy as np
import operator
import pandas as pd
def generate_plot(data, filename, dataname, title):
filename = filename.replace(".json", "") + "-" + dataname + ".png"
# print("Mean:", np.mean(data), "\nMin:", min(data), "\nMax:", max(data))
plt.figure()
plt.boxplot([data], showmeans=True, showfliers=False)
plt.title(filename.split("/")[-2])
plt.ylabel('delay (min)')
plt.xticks([1], [title])
plt.grid(True)
plt.savefig(filename)
def pre_processing(filename):
# Get the dataset in json format
r = []
with open(filename, 'r') as f:
r = json.loads(f.read().replace('\n', ''))
# parse to pandas
df = pd.DataFrame(r)
# convert columns to datetime
df.started_at = pd.to_datetime(df.started_at)
df.finished_at = pd.to_datetime(df.finished_at)
# If the build was canceled, we have only the finished_at value
df.started_at.fillna(df.finished_at, inplace=True)
# Sort by commits arrival and start
df = df.sort_values(by=['started_at'])
# Convert to minutes only valid build duration
duration = [x/60 for x in df.duration.tolist() if x > 0]
# Difference between commits arrival - Convert to minutes to improve the view
diff_date = [(df.started_at[i] - df.started_at[i+1]).seconds/60 for i in range(len(df.started_at) - 1)]
generate_plot(diff_date, filename, "diff-date", "Interval Between Dates")
generate_plot(duration, filename, "build-duration", "Build Duration")
if __name__ == '__main__':
ap = argparse.ArgumentParser(description='Extract commit details')
ap.add_argument('-r', '--repository', dest='repository', type=str, required=True, help='Directory of the project to analyse')
args = ap.parse_args()
pre_processing("{}/repo-data-travis.json".format(args.repository))
# to test
# pre_processing("../build_logs/iluwatar@java-design-patterns/repo-data-travis.json")
| true |
a77e59bf2e809a3d0cde4f090bab1d4b524c2ef5 | Python | thebenjhoward/cpsc-322-project-proposal | /mysklearn/myutils.py | UTF-8 | 21,065 | 3.953125 | 4 | [] | no_license | ##############################################
# Programmer: Elizabeth Larson (starter code by Dr. Gina Sprint)
# Class: CPSC 322-01, Spring 2021
# Programming Assignment #6
# 04/14/21
# I didn't attempt the bonus.
#
# Sources:
# Checking if all list values are the same (case 2 decision trees): https://www.geeksforgeeks.org/python-check-if-all-elements-in-a-list-are-same/
#
# Description: This program computes reusable general-purpose functions
##############################################
import random # For majority voting leaf node "flip a coin" solution (if the clash result is 50%/50%)
import math # For log calculations
def compute_euclidean_distance(v1, v2):
"""Calculate the euclidean distance between two vectors
Args:
v1(list of numeric vals): First vector
v2(list of numeric vals): Second vector
Returns:
dist(float): The distance between the vectors
"""
# Just look at the first two points in the vectors
new_v1 = [v1[0], v1[1]]
new_v2 = [v2[0], v2[1]]
assert len(new_v1) == len(new_v2)
dist = (sum([(new_v1[i] - new_v2[i]) ** 2 for i in range(len(new_v1))])) ** (1/2) # Get the square root by taking this formula to the 1/2 power
return dist
def convert_to_DOE(values):
"""Convert a list of values (MPG, for this dataset) to the DOE values listed in the table in step 1's notebook/write-up
Args:
values (list of float): The values were are converting
Returns:
converted_values (list of int): These conversted values on a scale of [1-10]
"""
converted_values = []
for value in values:
if value <= 13: # ≤13
converted_values.append(1)
elif value > 13 and value <= 14: # 14
converted_values.append(2)
elif value > 14 and value <= 16: # 15-16
converted_values.append(3)
elif value > 16 and value <= 19: # 17-19
converted_values.append(4)
elif value > 19 and value <= 23: # 20-23
converted_values.append(5)
elif value > 23 and value <= 26: # 24-26
converted_values.append(6)
elif value > 27 and value <= 30: # 27-30
converted_values.append(7)
elif value > 30 and value <= 36: # 31-36
converted_values.append(8)
elif value > 36 and value <= 44: # 37-44
converted_values.append(9)
elif value < 44: # ≥45
converted_values.append(10)
return converted_values
def normalize_data(values):
"""Normalize a group of values to a 0.0-1.0 scale
Args:
values(list of obj): Data we want to normalize
Returns:
noramlized_values(float): These values after calulations
"""
normalized_values = []
# value - smallest value in the dataset
min_value = min(values)
for index in range(len(values)):
normalized_values.append(values[index] - min_value)
# value / largest value in the dataset
max_value = max(normalized_values)
for index in range(len(normalized_values)):
normalized_values[index] = normalized_values[index] / max_value
return normalized_values
def calculate_accuracy_and_error_rate(matrix):
"""Uses a confusion matrix to determine the amount of correct and incorrect guesses
Use these values to compute the accuracy and error rate of the matrix
Args:
matrix(list of list of obj): The confusion matrix we're checking
Returns:
accuracy(float): How many guesses were correct (decimal form of %)
error_rate(float): How many guesses were incorrect (decimal form of %)
"""
# Add up all values in the datasets
total = 0.0
for value in matrix:
for value_index in range(len(value)):
total += value[value_index]
if total != 0.0: # Only do this calulating if there was at least one correct prediction
# Keep track of the correctly guessed ones (where actual is 1 and pred is 1 and so on)
# Also keep track of incorrect guesses: times when the predicted guessed
correct_guesses_total = 0
incorrect_guesses_total = 0
for row_index in range(len(matrix)):
for col_index in range(len(matrix[row_index])):
if (row_index + 1) == (col_index + 1): # e.g. row_index=0 and col_index=0 would be the pairing for predicting 1 and being right... the diagonal from 0,0 to N,N on the matrix
correct_guesses_total += matrix[row_index][col_index]
break # Now stop checking the cols and go to the next row
elif matrix[row_index][col_index] != 0: # Skip 0 values because these aren't predictions
incorrect_guesses_total += matrix[row_index][col_index]
# Now, calculate the accuracy and error rate
accuracy = correct_guesses_total / total
error_rate = incorrect_guesses_total / total
else: # Nothing was correct
accuracy = 0.0
error_rate = 1.0
return accuracy, error_rate
def calculate_distance_categorical_atts(X, y, v):
"""Calculate the predicting class of a vector that's categorical
Args:
X(list of list of obj): X_train (the dataset)
y(list of obj): y_train (col we're predicting on)
v(list of numeric vals): Vector values
Returns:
dist(obj): The predicted value
"""
# Go through each row in X and find the "closest" value (i.e. the attribute with the most matching values)
num_matching_atts = []
for row_index in range(len(X)):
matching_atts_count = 0
for col_index in range(len(v)):
if v[col_index] == X[row_index][col_index]: # Found a match!
matching_atts_count += 1
num_matching_atts.append(matching_atts_count)
# Find the row that has the most matches on it
row_with_most_matching_atts = num_matching_atts.index(max(num_matching_atts))
dist = y[row_with_most_matching_atts]
return dist
def all_same_class(instances):
"""Check if all instance labels match the first label
Args:
instances(list of lists): Instance set we're checking
Returns:
True or False, depending on if all of the instance labels match the first label
"""
first_label = instances[0][-1]
for instance in instances:
if instance[-1] != first_label:
return False
return True # Otherwise, all instance labels matched the first label
def select_attribute(instances, available_attributes, domains, y_train_col_index):
"""Pick an attrubite to split on based on entropy calculations
Args:
instances(list of lists): Instance set we're calculating the entropy of
available_attributes(list): Potential attributes we can split on
domains(dict): Possible values for each col (e.g. "yes" and "no")
y_train_col_index(int): Col index of the y_train attribute (not for splitting)
Returns:
available_attributes[att_to_split_on_index](string): The name of the attribute we're splitting on
"""
# Calculate the attribute domains dictionary (e.g. standing can be 1 or 2)
e_news = []
domains_list = list(domains.items())
for index in range(len(available_attributes)):
if y_train_col_index != index: # Skip the att we're trying to predict on (e.g. interviews_well)
e_news.append(calculate_e_new(index, y_train_col_index, instances, domains_list[index], domains_list[y_train_col_index]))
# Choose the smallest of the four and split on that, but also check for duplicate e_news calculations (occurs in the interview test dataset)
try:
att_to_split_on_index = e_news.index(min(e_news))
for e_new_index in range(len(e_news)):
if (e_new_index + 1) < len(e_news):
if e_news[e_new_index] == e_news[e_new_index + 1] and e_new_index == att_to_split_on_index:
att_to_split_on_index = e_new_index + 1
except ValueError: # For when e_news is empty
att_to_split_on_index = 0
return available_attributes[att_to_split_on_index]
def partition_instances(instances, split_attribute, headers, domains):
"""Break up a set of instances into partitions using the split attribute
Args:
instances(list of lists): Instance set we're partitioning
split_attribute(string): Attribute name we're going to be splitting on
headers(list): Attribute names, corresponds with the instances
domains(dict): Possible values for each col (e.g. "yes" and "no")
Returns:
partitions(dict): Partitions organized by attibute value (the key)
"""
# Comments refer to split_attribute "level" in the interview test set
attribute_domain = domains[split_attribute] # ["Senior", "Mid", "Junior"]
attribute_index = headers.index(split_attribute) # 0
partitions = {} # key (attribute value): value (partition)
for attribute_value in attribute_domain:
partitions[attribute_value] = []
for instance in instances:
if instance[attribute_index] == attribute_value:
partitions[attribute_value].append(instance)
return partitions
def tdidt(current_instances, available_attributes, headers, domains):
"""Create a tree given a set of instances
Handles 3 cases (listed in the comments below)
Args:
current_instances(list of lists): Instances we're looking at
available_attributes(list): Attribute names we can still split on
headers(list): All attribute names
domains(dict): Possible values for all atts
Returns:
A constructed tree (as a list of lists of lists...)
"""
# Select an attribute to split on, then remove if from available attributes
split_attribute = select_attribute(current_instances, available_attributes, domains, (len(available_attributes) - 1))
available_attributes.remove(split_attribute)
tree = ["Attribute", split_attribute]
# Group data by attribute domains (creates pairwise disjoint partitions)
partitions = partition_instances(current_instances, split_attribute, headers, domains)
# For each partition, repeat unless one of the following base cases occurs:
# CASE 1: All class labels of the partition are the same => make a leaf node
# CASE 2: No more attributes to select (clash) => handle clash w/majority vote leaf node
# CASE 3: No more instances to partition (empty partition) => backtrack and replace attribute node with majority vote leaf node
for attribute_value, partition in partitions.items():
values_subtree = ["Value", attribute_value]
if len(partition) > 0 and all_same_class(partition): # Case 1
leaf_subtree = ["Leaf", partition[-1][-1], len(partition), len(current_instances)]
values_subtree.append(leaf_subtree)
elif len(partition) > 0 and len(available_attributes) == 0: # Case 2
leaf_value = perform_majority_voting(current_instances, domains) # Perform majority voting
# Create a leaf node based on this
leaf_subtree = ["Leaf", leaf_value, len(partition), len(current_instances)]
values_subtree.append(leaf_subtree)
elif len(partition) == 0: # Case 3
leaf_value = perform_majority_voting(current_instances, domains) # Perform majority voting
# Create a leaf node based on this
leaf_subtree = ["Leaf", leaf_value, len(current_instances), len(current_instances)]
values_subtree.append(leaf_subtree)
else: # All base cases are false... time to recurse!
subtree = tdidt(partition, available_attributes.copy(), headers, domains)
values_subtree.append(subtree)
tree.append(values_subtree)
return tree
def perform_majority_voting(clashing_instances, domains):
"""Look for a leaf node value for clashing instances
Looks for the value that occurs most frequently in the dataset
If it'd an even split, flip a coin (pick a number 0-the length of the y_train domain)
Args:
clashing_instances(list of lists): The instances we're looking for the leaf node value of
domains(dict): Possible values for each col (e.g. "yes" and "no")
Returns:
The leaf node value
"""
is_even_split = True
# What is our most popular label in this clash?
domains_list = list(domains.items())
possible_domain_values = domains_list[-1][1]
domain_counts = [] # Parallel to possible_domain_values
for domain_value in possible_domain_values:
count = 0
for value in clashing_instances:
if value[-1] == domain_value:
count += 1
domain_counts.append(count)
# Check if all of the counts are the same (if so, we have an even_split... and if not, find the )
count_to_check = domain_counts[0]
for count in domain_counts:
if count_to_check != count:
is_even_split = False
break # Stop searching! We found a difference
if is_even_split: # Both the same? 50/50? Flip a coin
coin = random.randint(0, len(possible_domain_values) - 1) # Random number 0-the number of values in y_train domain (e.g. "yes" and "no would be 0-1")
return possible_domain_values[coin]
else: # Otherwise, return the value that occurs the most
index_of_largest_domain_count = domain_counts.index(max(domain_counts))
return possible_domain_values[index_of_largest_domain_count]
def calculate_entropy(priors, entropy_values):
"""Calculate weighted average of partition entropies
Priors and entropy values are parallel lists
Args:
priors(list): Total occurances in the dataset
entropy_values(list): Calculated entropy values
Returns:
avg(int): Average of the priors and entropy values
"""
avg = 0.0
for i in range(len(entropy_values)):
avg = avg + (priors[i] * entropy_values[i])
return avg
def calulate_entropy_for_one_partition(values):
"""Calculate the entropy of a partition given values
Args:
values(list): The values we're calculating the entropy of
Returns:
e(float): Calculated entropy
"""
e = -(values[0] * math.log(values[0], 2))
index = 1 # Start at index 1 since we've already saved [0] in e
while index < len(values):
e = e - (values[index] * math.log(values[index], 2))
index += 1
return e
def calculate_e_new(col_index, y_train_col_index, instances, domain, y_train_domain):
"""Calculate entropy stats for a domain (priors, entropy for each, total entropy)
Args:
col_index(int): Col index of the att we're calulating the entropy of
y_train_col_index(int): y_train col index
instances(list of lists): The data table
domain(dict): Possible values of the att we're calulating the entropy of
y_train_domain(dict): Possible values of the y_train value
Returns:
e_new(float): Total entropy
"""
# Find the total number of instances in the dataset
total = len(instances)
# Load priors (aka how many times do Senior/Mid/Junior appear total?)
priors = []
for domain_value in domain[1]: # domain[1] gives a list of domain values
count = 0
for instance in instances:
if instance[col_index] == domain_value:
count += 1
priors.append(count/total)
# Entropy of the each domain value (e.g. e of Senior, Mid, and Junior for level)
# Check for matches (e.g. all cases of Senior and False, then Senior and True...)
entropy_values = []
for domain_value in domain[1]:
values_to_calc = []
for y_train_domain_value in y_train_domain[1]:
count = 0
total = 0
for instance in instances:
if instance[col_index] == domain_value:
if instance[y_train_col_index] == y_train_domain_value:
count += 1 # Both values match! Incremeant the count (numerator)
total += 1 # Either way, incremeant the total (denominator)
if total == 0:
values_to_calc.append(0.0)
else:
values_to_calc.append(count/total)
try:
e = calulate_entropy_for_one_partition(values_to_calc)
except ValueError: # For when the calc is undefined
e = 0.0
entropy_values.append(e)
# Weighted average of its partition entropies
e_new = calculate_entropy(priors, entropy_values)
return e_new
def predict_recursive_helper(tree, X_test):
"""Predict the leaf node based on X_test values
Handles cases where the y_test attribute is split on
Args:
tree(list of lists): The tree we're checking
X_test(list of lists): Values we're predicting for (subtree that doesn't include the attibute we're predicting)
Returns:
Either the leaf value or a recursive call to this function
"""
label = tree[0] # e.g. Attribute, Value, or Leaf
if label == "Attribute":
# Get the index of this attribute using the name (i.e. att0 is at index [0] in the attribute names)
att_index = 0 # Default is 0
for letter in tree[1]:
if letter != "a" and letter != "t":
att_index = letter
att_index = int(att_index)
# In case we split on the class label
if att_index >= len(X_test):
return tree[2][2][1]
# Grab the value at this index and see if we have a match going down the tree
instance_value = X_test[att_index]
i = 2
while i < len(tree):
value_list = tree[i]
if value_list[1] == instance_value: # Recurse when a match is found
return predict_recursive_helper(value_list[2], X_test)
i += 1
else:
return tree[1] # Grab the value of the leaf
def make_rule_for_one_branch(tree, attribute_names, class_name, rule):
"""Grab a list of strings that represents one branch's rule (see args for formatting)
Assumes that ruleis already populated with the split attribue info (["IF", "att0", "=", "value"]) upon initial call
Args:
tree(list of lists): The tree/subtree we're looking at
attribute_names(list of str or None): A list of attribute names to use in the decision rules
(None if a list is not provided and the default attribute names based on indexes (e.g. "att0", "att1", ...) should be used).
class_name(str): A string to use for the class name in the decision rules
("class" if a string is not provided and the default name "class" should be used).
rule(list of strings): A rule, formatted like IF att0 == value AND ... THEN class_label = True (but with each element in an encapsulating list)
Returns:
list of strings: The full branch's rule
"""
label = tree[0] # e.g. Attribute, Value, or Leaf
if label == "Leaf": # We've hit the end of a branch
rule.append("THEN")
rule.append(class_name)
rule.append("==")
rule.append(tree[1]) # The value of the leaf
return rule
elif label == "Attribute":
rule.append("AND")
if attribute_names == None:
rule.append(tree[1])
else: # [-1]st spot of att labels is the index
att_index = int(tree[1][-1])
rule.append(attribute_names[att_index])
rule.append("==")
# There will be more to the initial tree beyond the leaf we run into here, because of this attribute split (e.g. one rule where phd = yes and one where phd = no)
index = 2 # Values start at index 2
new_rules = []
while index < len(tree): # Go through the values on each partition (e.g. Junior, Mid and Senior)
# Calculate the branch (initial attribute that's already in there is passed in as rule)
new_rule = make_rule_for_one_branch(tree[index], attribute_names, class_name, rule)
new_rules.append(new_rule)
index += 1
if index < len(tree): # Check if we've hit the end of the tree (and if so, don't add any more rules)
rule = []
if attribute_names == None:
rule = [tree[1], "=="]
else: # [-1]st spot of att labels is the index
att_index = int(tree[1][-1])
rule = [attribute_names[att_index], "=="]
return new_rules
else: # Otherwise, it's a value
rule.append(tree[1])
return make_rule_for_one_branch(tree[2], attribute_names, class_name, rule) # Recurse on subtree | true |
30a92a08694a500b5c218db4a77cb20dae691d15 | Python | valleyceo/code_journal | /1. Problems/l. Design/h. Application - Design a Food Rating System.py | UTF-8 | 2,074 | 4.34375 | 4 | [] | no_license | # 2353. Design a Food Rating System
'''
Design a food rating system that can do the following:
Modify the rating of a food item listed in the system.
Return the highest-rated food item for a type of cuisine in the system.
Implement the FoodRatings class:
FoodRatings(String[] foods, String[] cuisines, int[] ratings) Initializes the system. The food items are described by foods, cuisines and ratings, all of which have a length of n.
foods[i] is the name of the ith food,
cuisines[i] is the type of cuisine of the ith food, and
ratings[i] is the initial rating of the ith food.
void changeRating(String food, int newRating) Changes the rating of the food item with the name food.
String highestRated(String cuisine) Returns the name of the food item that has the highest rating for the given type of cuisine. If there is a tie, return the item with the lexicographically smaller name.
Note that a string x is lexicographically smaller than string y if x comes before y in dictionary order, that is, either x is a prefix of y, or if i is the first position such that x[i] != y[i], then x[i] comes before y[i] in alphabetic order.
'''
from sortedcontainers import SortedList
class FoodRatings:
# O(nlog(n)) time
def __init__(self, foods: List[str], cuisines: List[str], ratings: List[int]):
self.foodMap = {}
self.cuisineMap = defaultdict(SortedList)
for f, c, r in zip(foods, cuisines, ratings):
self.foodMap[f] = (c, r)
self.cuisineMap[c].add((-r, f))
# O(log(n)) time
def changeRating(self, food: str, newRating: int) -> None:
c, r = self.foodMap[food]
self.foodMap[food] = (c, newRating)
self.cuisineMap[c].remove((-r, food))
self.cuisineMap[c].add((-newRating, food))
# O(1) time
def highestRated(self, cuisine: str) -> str:
return self.cuisineMap[cuisine][0][1]
# Your FoodRatings object will be instantiated and called as such:
# obj = FoodRatings(foods, cuisines, ratings)
# obj.changeRating(food,newRating)
# param_2 = obj.highestRated(cuisine)
| true |
31603e170bb6d18df4dcac6817d8e8cf26bca2e4 | Python | HuiZhangDB/LofiBox | /processing/lofiBox/lofiBox.pyde | UTF-8 | 4,436 | 2.5625 | 3 | [
"MIT"
] | permissive | add_library('serial')
add_library('sound')
# add_library('oscP5')
class capPad(object):
"""postion of a pad is the center position of its bottom side"""
def __init__(self, posX, posY, rotate_ang=0):
super(capPad, self).__init__()
self.posX = posX
self.posY = posY
self.rotate_ang = rotate_ang
self.upSide = 40
self.downSide = 110
self.height = 80
self.activated = False
self.soundType = 0
def display(self):
noStroke()
if self.activated:
color = 0xFFFF968D
else:
color = 255
fill(color)
pushMatrix()
translate(self.posX, self.posY)
rotate(self.rotate_ang)
quad(-self.upSide/2,-self.height, self.upSide/2, -self.height, self.downSide/2, 0, -self.downSide/2, 0)
popMatrix()
def activate(self):
self.activated = True
# if self.soundType=='melody':
# if not self.soundfile.isPlaying():
# self.soundfile.play()
# elif self.soundType=='drum':
# self.soundfile.play()
if self.soundType:
if not self.soundfile.isPlaying():
self.soundfile.loop()
def deactivate(self):
self.activated = False
# if self.soundType=='melody':
# self.activated = self.soundfile.isPlaying()
# else:
# self.activated = False
if self.soundfile.isPlaying():
self.soundfile.stop()
def addSound(self, soundfile, soundType):
self.soundfile = soundfile
self.soundType = soundType
def read_touch_from_port(myPort):
if myPort.available()>0:
state_str = myPort.readStringUntil(10) #10==\n
if state_str != None and len(state_str) >= 4 and '\t' in state_str:
# usually state_str == (u'Cn\r\n', 6)
print(state_str)
for i in range(8):
if capPins[i] in state_str:
capPads[i].activate()
else:
capPads[i].deactivate()
def setup():
global myPort
global IF_USE_ARDUINO
global oct_img
global capPads
global capPins
global fft
global amp
fullScreen()
# for test:
# size(640,640)
background(0,0,0)
IF_USE_ARDUINO = True
# print(height, width)
imageMode(CENTER)
oct_img = loadImage("octagon.png")
oct_diameter = 350
oct_img.resize(0,oct_diameter)
capPads = []
for i in range(8):
rad = i*TWO_PI/8
posX = width/2 + cos(rad) * oct_diameter/2
posY = height/2 + sin(rad) * oct_diameter/2
rotate_ang = 3*PI/2 + rad
cap = capPad(posX, posY, rotate_ang)
capPads.append(cap)
if IF_USE_ARDUINO:
portName = Serial.list()[len(Serial.list()) - 1]
# portName = u'/dev/tty.usbmodem14101'
myPort = Serial(this, portName, 9600)
capPins = ['C4', 'C2', 'C1', 'C3', 'C5', 'C8', 'C7', 'C6']
# drums = [f for f in os.listdir('data/drum') if f[-4:]=='.wav']
# melodys = [f for f in os.listdir('data/melody') if f[-4:]=='.wav']
# for i in range(5):
# sf = SoundFile(this, os.path.join('data/drum', drums[i]))
# capPads[i].addSound(sf, 'drum')
# for i in range(5,8):
# sf = SoundFile(this, os.path.join('data/melody', melodys[i-5]))
# capPads[i].addSound(sf, 'melody')
wavs = ['bell_shaker.wav', 'top70.wav', 'melody70.wav', 'creek_with_snaps.wav', 'lofi-all-fx.wav', 'lofidrum.wav', 'lofichord.wav', 'lofiVoice.wav']
for i in range(8):
sf = SoundFile(this, wavs[i])
capPads[i].addSound(sf, 'loop')
bands = 64
fft = FFT(this, bands)
inSound = AudioIn(this)
inSound.start()
fft.input(inSound)
amp = Amplitude(this)
amp.input(inSound)
def mousePressed():
print(mouseX, mouseY)
def keyPressed():
# 49~56 == 1~8
if keyCode in range(49,57):
capPads[keyCode-49].activate()
def keyReleased():
# 49~56 == 1~8
if keyCode in range(49,57):
capPads[keyCode-49].deactivate()
def draw():
background(0,0,0)
if IF_USE_ARDUINO:
read_touch_from_port(myPort)
else:
if not keyPressed:
for i in range(5,8):
capPads[i].deactivate()
image(oct_img, width/2, height/2)
for capP in capPads:
capP.display()
draw_spec_amp()
def draw_spec_amp():
spectrum = fft.analyze()
spectrum_2_show = spectrum[:32]
rectMode(CENTER)
colorMode(HSB)
num_bars = len(spectrum_2_show)
for i in range(num_bars):
barWidth = width / num_bars
xPos = barWidth * i
c = 255 / num_bars * i
fill(c, 100, 100);
rect(xPos, height, barWidth-5, spectrum[i]*(height/2*5))
# Get Volume
vol = amp.analyze()
#volume indicator for center, left, right speaker
noStroke()
fill(40)
ellipse(width/2,height/2, 200*vol, 200*vol)
| true |
5adb5d601bf42971e2531ac12b22a2072f044b96 | Python | Ziyear/python_learn | /day_08_数据结构和算法/3.数据结构_保存学生.py | UTF-8 | 471 | 3.078125 | 3 | [] | no_license | # 集合-元组
student_list_tuple = [
("zhangsan", 18, "河南"),
("lisi", 19, "北京")
]
# 集合-字典
student_list_dic = [
{
"name": "zhangsan",
"age": 18,
"add": "河南"
},
{
"name": "lise",
"age": 19,
"add": "北京"
}
]
# 字典-字典
student_dic_dic = {
"zhangsan": {
"age": 18,
"add": "河南"
},
"lisi": {
"age": 19,
"add": "北京"
}
}
| true |
9006ba80c7266e3551e2fcd295a0790b06098ab6 | Python | KonradChlupka/LeetCode | /solutions/bitwise_and_of_numbers_range/main.py | UTF-8 | 205 | 2.765625 | 3 | [] | no_license | class Solution:
def rangeBitwiseAnd(self, m: int, n: int) -> int:
for i in range(32):
if m == n:
break
m >>= 1
n >>= 1
return m << i
| true |
bc5a72524d89f7a8445f9a65239288a050006f82 | Python | dkothari777/Research | /diffApi2.0.py | UTF-8 | 1,100 | 2.84375 | 3 | [] | no_license | #!/usr/bin/python
from sys import *
if len(argv) < 3:
print "Usage: " + argv[0] + " file1 file2"
print "File1 : main api file (generally the smaller one)"
print "File2 : the trace with hooks applied file (generally the larger one)"
exit(0)
file1 = open(argv[1], "r")
file2 = open(argv[2], "r")
line1 = file1.readlines()
line2 = file2.readlines()
buffr = ""
x = 0
y = 0
s = 0
while(x != len(line1)):
if( y == len(line2)):
j = line1[x].rsplit()
print "\t Did not find " + j[0] + " at line " + str(x)
x = x+1
y = s
print "...Moving on file1 at line " +str(x) + " and file2 at line " + str(y)
buffr = ""
elif(line1[x] == line2[y]):
if(buffr != ""):
print buffr
j = line1[x].rsplit()
print j[0]
buffr = ""
x = x + 1
y = y + 1
else:
if(buffr == ""):
s = y+1
j = line2[y].rsplit()
if(buffr == ""):
buffr = "\t\t\t" + j[0]
else:
buffr = buffr + "\n\t\t\t" + j[0]
y = y + 1
print buffr
| true |
bd4a2b9d39a853d8653f2473133476cdb0d746da | Python | vlaksi/OsnovneRacunarstva-BMI | /Predavanja/01 Racunari i programi/primeri/primer1.py | UTF-8 | 44 | 2.953125 | 3 | [] | no_license | def saberi(x,y):
print(x+y)
saberi(3,4) | true |
91fd9175561c4638fc7d8ee5274ee2e44805bf88 | Python | KadeNethercott/JoustLandiaPygame | /JoustLandia/JoustL/TextBox.py | UTF-8 | 7,760 | 2.765625 | 3 | [] | no_license | #!usr/bin/python
import random, time, pygame, copy, sys, os
from pygame.locals import *
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (155, 0, 0)
WHITE = (255, 255, 255, 100)
class TextBox:
def __init__(self):
self.string = ''
def startUp(self, width, height, surf, message):
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.init()
FPSCLOCK = pygame.time.Clock()
COLOR1 = RED
COLOR2 = RED
COLOR3 = BLACK
addToString = ''
self.string=''
blinkWaitTime = 0
blinkXPos = width/2-250
##create fonts
font = pygame.font.Font(os.path.join('graphics','FancyText.ttf'),55)
font2 = pygame.font.Font(os.path.join('graphics','FancyText3.ttf'),40)
nameSurfaceObj = font.render(message, True, RED)
nameRectObj =nameSurfaceObj.get_rect()
## sound for button clicking
BUTTONCLICKSOUND = pygame.mixer.Sound(os.path.join('sounds', 'buttonClick.wav'))
closeRectObj =nameSurfaceObj.get_rect()
## Images and locations
paper = pygame.image.load(os.path.join('graphics','oldPaperFullShadow.png'))
paperRectObj = paper.get_rect()
paperRectObj.center = (width/2, height/2)
nameRectObj.center = (width/2, (height/2)-55)
errorSurfaceObj = font.render("Not A Valid Key", True, RED)
errorRectObj =errorSurfaceObj.get_rect()
errorRectObj.center = (width/2, (height/2))
nameErrorSurfaceObj = font.render('Please Enter a Name', True, COLOR1)
nameErrorRectObj = nameErrorSurfaceObj.get_rect()
nameErrorRectObj.center = ((width/2),( (height/2)))
while(True): ## textbox loop
##images that change based on user input
submitSurfaceObj = font.render('Submit', True, COLOR1)
submitRectObj = submitSurfaceObj.get_rect()
submitRectObj.center = ((width/2),( (height/2) +40))
closeSurfaceObj = font.render('X', True, COLOR2)
closeRectObj =closeSurfaceObj.get_rect()
closeRectObj.center = ( ((width/2)+285), ((height/2)-45))
blinkLineSurfaceObj = font2.render('|', True, COLOR3)
blinkLineRectObj =blinkLineSurfaceObj.get_rect()
blinkLineRectObj.center = ( blinkXPos, ((height/2)+5))
stringSurfaceObj = font2.render(self.string, True, BLACK)
stringRectObj =stringSurfaceObj.get_rect()
stringRectObj.topleft = ( ((width/2)-250), ((height/2)-34))
surf.blit(paper, paperRectObj)
surf.blit(nameSurfaceObj, nameRectObj)
surf.blit(closeSurfaceObj, closeRectObj)
surf.blit(submitSurfaceObj, submitRectObj)
surf.blit(blinkLineSurfaceObj, stringRectObj.topright)
surf.blit(stringSurfaceObj, stringRectObj)
##highlight buttons
mouseX, mouseY = pygame.mouse.get_pos()
if(submitRectObj.collidepoint((mouseX, mouseY))):
COLOR1 = BLACK
else:
COLOR1 = RED
if(closeRectObj.collidepoint((mouseX, mouseY))):
COLOR2 = BLACK
else:
COLOR2 = RED
for event in pygame.event.get(): ## event loop
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
mousex, mousey = event.pos
if submitRectObj.collidepoint( (mousex, mousey) ):
if(len(self.string)>0): ## if the player entered a string then return it
BUTTONCLICKSOUND.play()
pygame.time.wait(500)
return self.string
else: ## if nothing entered but still sumbited then show error
BUTTONCLICKSOUND.play()
pygame.time.wait(500)
surf.blit(nameErrorSurfaceObj, nameErrorRectObj)
pygame.display.update()
pygame.time.wait(1000)
elif closeRectObj.collidepoint( (mousex, mousey) ): ## if player clicks the x button, exits with false
BUTTONCLICKSOUND.play()
pygame.time.wait(500)
return False
if event.type == KEYUP:
key = pygame.key.name(event.key) ## allow for upper case letters
if pygame.key.get_mods() & KMOD_SHIFT:
self.string+=key.upper()
elif(key == 'backspace' or key == 'delete'): ## allow deletion of characters
if(len(self.string)):
lastKey = self.string[-1]
self.string = self.string[:-1]
elif(key == 'return'): ## return works as submit
if(len(self.string)>0):
BUTTONCLICKSOUND.play()
pygame.time.wait(500)
return self.string
else:
surf.blit(nameErrorSurfaceObj, nameErrorRectObj)
pygame.display.update()
pygame.time.wait(1000)
elif(key == 'tab' or key == 'space'): ## don't allow tab or space keys
surf.blit(errorSurfaceObj, errorRectObj)
pygame.display.update()
pygame.time.wait(400)
print "not a valid key"
elif(key == "right shift" or key == "left shift"): ## don't throw an error on shift keys
pass
else:
try:
self.string+= chr(event.key) ## add characters to the string
except:## show error if key not allowed as input
surf.blit(errorSurfaceObj, errorRectObj)
pygame.display.update()
pygame.time.wait(400)
print 'not a valid key'
blinkWaitTime = blinkWaitTime + FPSCLOCK.get_time()
if(blinkWaitTime > 360):
if(COLOR3 == WHITE):
COLOR3 = BLACK
else:
COLOR3=WHITE
blinkWaitTime = 0
pygame.display.update()
FPSCLOCK.tick(25)
def getInput(self):
return self.string
def test():
pygame.init()
surf = pygame.display.set_mode((1100, 700))
surf.fill(GRAY)
myBox = TextBox()
myBox.startUp(800, 700, surf, 'Enter Name')
surf.fill(GRAY)
pygame.display.update()
while(True):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if __name__ == '__main__':
test()
| true |
b3152a138ad883aba347eccfec517dc0d7161769 | Python | yeleman/bolibana | /auth/utils.py | UTF-8 | 3,336 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# encoding=utf-8
# maintainer: rgaudin
import unicodedata
import random
import re
from django.contrib.auth.models import User
PASSWORD_LENGTH = 8
PASSWORD_LENGTH_SAMPLE = 4
USERNAME_MIN_LENGTH = 4
USERNAME_MAX_LENGTH = 8
def random_password():
""" random password suitable for mobile typing """
return ''.join([random.choice('abcdefghijklmnopqrstuvwxyz1234567890')
for i in range(PASSWORD_LENGTH)])
def random_sample_password():
""" random sample password suitable for mobile typing """
num_chars = PASSWORD_LENGTH_SAMPLE
letters = 'abcdefghijklmnopqrstuvwxyz'
index = random.randint(0, len(letters) - 1)
password = letters[index]
num_chars -= 1
while num_chars:
num_chars -= 1
index += 1
try:
password += letters[index]
except IndexError:
password += letters[index - 26]
postfix = random.randint(0, 9)
password += str(postfix)
return(password)
def username_from_name(first_name, last_name):
""" available username to use on User forged from first and last name """
def new_slug(text, salt=None):
""" assemble text and salt providing optimum length """
if salt:
username = text[:(USERNAME_MAX_LENGTH - salt.__len__())] + salt
else:
username = text[:USERNAME_MAX_LENGTH]
if username.__len__() < USERNAME_MIN_LENGTH:
username = "{0:{1}<{2}}".format(username, "a", USERNAME_MIN_LENGTH)
return username
def is_available(username):
""" DB check for username use """
return User.objects.filter(username=username).count() == 0
def jdoe(first, last):
""" first name initial followed by last name format """
return u"%s%s" % (first[0], last)
def johndoe(first, last):
""" first name followed by last name format """
return u"%s%s" % (first, last)
def iterate(username):
""" adds and increment a counter at end of username """
# make sure username matches length requirements
username = new_slug(username)
if not is_available(username):
# find the counter if any
sp = re.split(r'([0-9]+)$', username)
if sp.__len__() == 3:
# increment existing counter
username = sp[0]
salt = unicode(int(sp[1]) + 1)
else:
# start counter at 1
salt = '1'
# bundle counter and username then loop
return iterate(new_slug(username, salt))
else:
# username is available
return username
def string_to_slug(s):
raw_data = s
try:
raw_data = unicodedata.normalize('NFKD',
raw_data.decode('utf-8',
'replace'))\
.encode('ascii', 'ignore')
except:
pass
return re.sub(r'[^a-z0-9-]+', '', raw_data.lower()).strip()
# normalize first and last name to ASCII only
first_name = string_to_slug(first_name.lower())
last_name = string_to_slug(last_name.lower())
# iterate over a jdoe format
return iterate(jdoe(first_name, last_name))
| true |
93d8b4f124c864f25ff0fb51586465abd1dfe224 | Python | fmder/chessmate | /chessmate/features/engine.py | UTF-8 | 3,632 | 2.921875 | 3 | [] | no_license | from typing import Union
import chess.engine
MATERIAL_VALUE = {
"p": -1, "n": -3, "b": -3, "r": -5, "q": -9, "k": 0,
"P": 1, "N": 3, "B": 3, "R": 5, "Q": 9, "K": 0
}
stockfish_path = "/usr/local/bin/stockfish"
def _start_engine(path) -> Union[chess.engine.SimpleEngine, None]:
try:
return chess.engine.SimpleEngine.popen_uci(path)
except FileNotFoundError:
return None
class EngineFeature:
# TODO: need config file
engine = _start_engine(stockfish_path)
def _get_line(self, board, depth) -> list[chess.Move]:
if self.engine is None:
raise RuntimeError
analysis = self.engine.analyse(board, chess.engine.Limit(depth=depth))
return analysis["pv"]
def _get_pov_score(self, board, pov, depth, mate_score=1000):
if self.engine is None:
raise RuntimeError
analysis = self.engine.analyse(board, chess.engine.Limit(depth=depth))
pov_score = analysis["score"].pov(pov).score(mate_score=mate_score)
return pov_score
def __del__(self):
if self.engine is not None:
self.engine.close()
class RawMaterialScores(EngineFeature):
"""Computes the raw material scores at given plies following the best
line for this move."""
def __init__(self, *, depth=None, relative_plies=None):
if (depth is None) == (relative_plies is None):
raise ValueError("One and only one of depth and plies should be provided")
if depth is not None:
self.depth = depth
self.use_ply = [True] * depth
elif relative_plies is not None:
self.depth = max(relative_plies) + 1
self.use_ply = [i in relative_plies for i in range(self.depth)]
self._feature_length = sum(self.use_ply) + 1
def _raw_material_score(self, board, pov) -> float:
score = sum(MATERIAL_VALUE[p.symbol()] for _, p in board.piece_map().items())
if pov == chess.BLACK:
# Invert black score
score *= -1
return score
def __call__(self, board: chess.Board) -> list[float]:
# We are in a situation where we evaluate a given move (already played here).
# So if we want to evaluate the value of a move it is the value as the
# other player (which is ours!)
us = not board.turn
# First move is already done, get current material score
material_scores: list[float] = []
if len(self.use_ply) > 1 and self.use_ply[0]:
material_scores.append(self._raw_material_score(board, us))
print(f"us = {'white' if us else 'black'}")
line = self._get_line(board, self.depth + 2)
for move, use_ply in zip(line, self.use_ply):
board.push(move)
if not use_ply:
continue
material_scores.append(self._raw_material_score(board, us))
if len(material_scores) < self._feature_length:
material_scores += [float("nan")] * (self._feature_length - len(material_scores))
return material_scores
class EngineEstimate(EngineFeature):
"""Computes the engine estimate score for this move """
def __init__(self, depth, mate_score=318):
self.depth = depth
self.mate_score = mate_score
def __call__(self, board) -> float:
# We are in a situation where the move has already been played
# So if we want to evaluate the value this move value it is seen as the
# previous player point of view
us = not board.turn
return self._get_pov_score(board, us, self.depth, self.mate_score)
| true |
33adbc66f9bbba11e3ea41ecc2abe3946cd6bef5 | Python | lagrassa/rl-erase | /old/erase_controller_pkg/scripts/cv/masking.py | UTF-8 | 1,149 | 3.09375 | 3 | [] | no_license | #first case: marker and not white, set to 1
#second case, not marker and not white, set to prior
#third case: white and not marker, set to 0
#fourth case: white and marker: set to 0.5
import numpy as np
import cv2
import pdb
def p_erased_fast(prior,marker, white, lr =1):
not_white = cv2.bitwise_not(white)
not_marker = cv2.bitwise_not(marker)
uncertain = prior*cv2.bitwise_and(not_white, not_marker)/255
def_white = cv2.bitwise_and(white, not_marker)/255
confused = 0.5*cv2.bitwise_and(marker, white)/255
new = uncertain + def_white + confused
return new*lr + prior*(1-lr)
if __name__ == "__main__":
#run test
#test case:
# prior 0.5
# marker white
marker = np.array([[0,255],[255,0]]).astype(np.uint8)
white = np.array([[0,255],[0,255]]).astype(np.uint8)
prior = np.array([[0.42,0],[0,0]])
result = p_marked(prior, marker,white)
correct_result = np.array([[0.42,0.5],[0,1]])
if np.array_equal(result, correct_result):
print("lr = 1 test passed!")
else:
print "Your array ",result
print "Correct array ",correct_result
| true |
64498ee9519bda12f033f8f92af8cd52656d03cb | Python | boredhero/BDW-LP-Kitchen-Sink | /downloads/scrape.py | UTF-8 | 592 | 2.765625 | 3 | [] | no_license | import cfscrape
import subprocess
def main():
links_file = open('dl_client.txt', 'r')
lines = links_file.readlines()
scraper = cfscrape.create_scraper()
count = 0
# Strip newline char
for line in lines:
count += 1
print(line)
if(line != ''):
cookie_arg, user_agent = cfscrape.get_cookie_string(line)
cmd = "curl --cookie {cookie_arg} -A {user_agent} {url}"
print(subprocess.check_output(cmd.format(cookie_arg=cookie_arg, user_agent=user_agent, url=line), shell=True))
if __name__ == "__main__":
main() | true |
ff1ab5e846fa005635d03afb11a7d1f8adfa8429 | Python | worksking/algrithm | /QuickSort.py | UTF-8 | 2,093 | 3.703125 | 4 | [] | no_license | import numpy as np
import time
# def QuickSort(ls, start, end):
# '''
# 快速排序
# 算法描述:
# 1.从数列中挑出一个元素,称为 “基准”(pivot);
# 2.重新排序数列,所有元素比基准值小的摆放在基准前面,所有元素比基准值大的摆在基准的后面(相同的数可以到任一边)。
# 在这个分区退出之后,该基准就处于数列的中间位置。这个称为分区(partition)操作;
# 3.递归地(recursive)把小于基准值元素的子数列和大于基准值元素的子数列排序。
# 参考资料:
# https://www.cnblogs.com/AlwinXu/p/5424905.html
# https://www.cnblogs.com/kunpengv5/p/7833361.html
# https://www.toutiao.com/a6622232676961108487/?app=news_article
# '''
# if start < end:
# left = start
# right = end
# base = ls[left]
# while left < right:
# while left < right and ls[right] >= base:
# right -= 1
# ls[left] = ls[right]
# while left < right and ls[left] <= base:
# left += 1
# ls[right] = ls[left]
# ls[left] = base
# QuickSort(ls, start, left-1)
# QuickSort(ls, left+1, end)
# return ls
def Partition(ls, start, end):
left = start
right = end
base = ls[left]
while left < right:
while left < right and ls[right] >= base:
right -= 1
while left < right and ls[left] <= base:
left += 1
if left < right:
ls[left], ls[right] = ls[right], ls[left]
ls[start]= ls[right]
ls[right] = base
return right
def QuickSort(ls, start, end):
if end > start:
k = Partition(ls, start, end)
QuickSort(ls, start, k-1)
QuickSort(ls, k+1, end)
return ls
if __name__ == '__main__':
a = np.random.permutation(10)
print(a,'\n')
b = QuickSort(a, 0, len(a)-1)
print(b)
| true |
baf87f1f0fe00fc3963a57a031c53397df02b3cf | Python | Loulou95/210CT | /Vowels.py | UTF-8 | 160 | 3.609375 | 4 | [] | no_license | mytext = 'beautiful!'
vowels = ['a', 'e', 'i', 'o', 'u']
result = ''
for letter in mytext:
if letter not in vowels:
result += letter
print(result)
| true |
2090287aa781559f4927faf230b9fbc71f203de1 | Python | nayan-gujju/Django-Practice | /gs39CustomAndBuilt-inValidations/enroll/forms.py | UTF-8 | 467 | 2.5625 | 3 | [] | no_license | from django import forms
from django.core import validators
def start_with_s(value):
if value[0] != 's':
raise forms.ValidationError('Email should be start with s........')
class StudentInfo(forms.Form):
#this is built in validators
name = forms.CharField(validators=[validators.MaxLengthValidator(10), validators.MinLengthValidator(5)])
#this is custom validators
email = forms.EmailField(validators=[start_with_s])
| true |
049d23fd661a3c11c8c90f230d47d4f64aca9ab3 | Python | MakerIris/DesmondAI | /Cameron ai assitant v3.py | UTF-8 | 13,152 | 2.53125 | 3 | [] | no_license | import sys
import time
import os
import pyttsx
import webbrowser
import speech_recognition as sr
import Tkinter as tk
API_AI_CLIENT_ACCESS_TOKEN = "Your Api.ai key goes here or" # api.ai keys are 32-character lowercase hexadecimal strings
WIT_AI_KEY = "your wit.ai key goes here"
r = sr.Recognizer()
engine = pyttsx.init()
voices = engine.getProperty('voices')
def say(text):
try:
genfile = open('gender.txt', 'r')
gend = genfile.read()
genfile.close()
if gend == "male":
engine.say(text)
print text
engine.runAndWait()
elif gend == "female":
engine.setProperty('voice', voices[1].id)
engine.say(text)
print text
engine.runAndWait()
except IOError:
engine.say(text)
print text
engine.runAndWait()
def say2(text, text2):
try:
genfile = open('gender.txt', 'r')
gend = genfile.read()
genfile.close()
if gend == "male":
engine.say(text + text2)
print text + text2
engine.runAndWait()
elif gend == "female":
engine.setProperty('voice', voices[1].id)
engine.say(text + text2)
print text + text2
engine.runAndWait()
except IOError:
engine.say(text + text2)
print text + text2
engine.runAndWait()
comaskfile = 'comask.txt'
namefile = 'name.txt'
# OR subprocess.call([r"C:\Windows\system32\shutdown.exe", "/t", str(time)])
# specified the absolute path of the shutdown.exe
# The path may vary according to the installation.
app_list = "Unity Foldit MusicBee GW2 Arduino Chrome Firefox NXT Filezilla iTunes Steam OBS VLC Skype Python27 Word Dropbox"
command = ""
command_list = "leave help open (app name) applications Calculator web Command Line Update Settings"
try:
nafile = open(namefile, 'r')
name = nafile.read()
nafile.close()
say2("Welcome back, ", name)
except IOError:
say("Hello, I am Cameron, your Personal AI Assistant")
say("what is your name?")
name = raw_input('>> ')
say2("Hi ", name)
nafile = open(namefile, 'w')
nafile.write(name)
nafile.close()
try:
comask = open(comaskfile, 'r')
com = comask.read()
comask.close()
if com != "no":
while True:
say("Would you like me to open the command list?(yes or no)")
with sr.Microphone() as source:
print "Say command"
audio = r.listen(source)
try:
command = r.recognize_api(audio, client_access_token=API_AI_CLIENT_ACCESS_TOKEN)
except sr.RequestError as e:
command = r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.RequestError as e:
command = r.recognize_google(audio)
time.sleep(1)
if command == "yes" :
print command_list
break
elif command == "no":
say("Would you like me to leave?(yes or no)")
with sr.Microphone() as source:
print "Say command"
audio = r.listen(source)
try:
command = r.recognize_api(audio, client_access_token=API_AI_CLIENT_ACCESS_TOKEN)
except sr.RequestError as e:
command = r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.RequestError as e:
command = r.recognize_google(audio)
if commandA == "yes":
sys.exit()
else:
say("Ok")
break
else:
say("Please say yes or no")
say("I think you said")
say(command)
except IOError:
while True:
say("Would you like me to open the command list?(yes or no)")
with sr.Microphone() as source:
print "Say command"
audio = r.listen(source)
try:
command = r.recognize_api(audio, client_access_token=API_AI_CLIENT_ACCESS_TOKEN)
except sr.RequestError as e:
command = r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.RequestError as e:
command = r.recognize_google(audio)
time.sleep(1)
if command == "yes" :
print command_list
break
elif command == "no":
say("Would you like me to leave?(yes or no)")
with sr.Microphone() as source:
print "Say command"
audio = r.listen(source)
try:
command = r.recognize_api(audio, client_access_token=API_AI_CLIENT_ACCESS_TOKEN)
except sr.RequestError as e:
command = r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.RequestError as e:
command = r.recognize_google(audio)
if commandA == "yes":
sys.exit()
else:
say("Ok")
break
else:
say("Please say yes or no")
say("I think you said")
say(command)
while True:
command = ""
with sr.Microphone() as source:
print "say command"
audio = r.listen(source)
try:
command = r.recognize_api(audio, client_access_token=API_AI_CLIENT_ACCESS_TOKEN)
except sr.RequestError as e:
command = r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.RequestError as e:
command = r.recognize_google(audio)
if command == "leave" or command == "live" or command == "leave it":
say("ok")
sys.exit()
elif command == "applications":
print app_list
elif command == "help":
print command_list
elif command == "open Unity" or command == "open unity" or command == "Open unity" or command == "Open Unity":
os.startfile('C:\Program Files\Unity\Editor\Unity')
elif command == "open foldit" or command == "open forget it" or command == "open phone app" or command == "open photos":
os.startfile('C://Foldit/Foldit')
elif command == "open guild wars 2":
os.startfile('C://Program Files (x86)/Guild Wars 2/Gw2')
elif command == "open Arduino" or command == "open we know" or command == "open agree now" or command == "open are doing now" or command == "open arduino" or command == "open or do you know":
os.startfile('C://Program Files (x86)/Arduino/arduino')
elif command == "open Chrome" or command == "open chrome":
os.system('start chrome')
elif command == "open iTunes" or command == "open itunes":
os.startfile('C://Program Files/iTunes/iTunes')
elif command == "open Steam" or command == "open steam":
os.system('start steam:')
elif command == "open Firefox" or command == "open firefox" or command == "open fire fox":
os.startfile('C://Program Files (x86)/Mozilla Firefox/firefox')
elif command == "open Filezilla" or command == "open file zelda" or command == "open file zilla" or command == "open filezilla" or command == "open files ella":
os.startfile('C://Program Files/FileZilla FTP Client')
elif command == "open NXT" or command == "open the t" or command == "open x t" or command == "open nxt" or command == "open n x t" or command == "open m a t" or command == "open a t":
os.startfile('C://Program Files (x86)/LEGO Software/LEGO MINDSTORMS Edu NXT/MINDSTORMSNXT')
elif command == "open OBS" or command == "open up yes" or command == "open o b s" or command == "open the yes":
os.startfile('C://Program Files (x86)/OBS/OBS')
elif command == "open VLC" or command == "open v l c" or command == "open v l v" or command == "open d l c" or command == "open we'll see" or command == "open v o c" or command == "open c" or command == "open the l c":
os.startfile('C://Program Files (x86)/VideoLAN/VLC/vlc')
elif command == "open Skype" or command == "open skype":
os.startfile('C://Program Files (x86)/Skype/Phone/Skype')
elif command == "Calculator" or command == "calculator":
os.system('calc.exe')
elif command == "open python 2.7" or command == "open place on two point seven" or command == "open play on two point seven" or command == "open python two point seven" or command == "open my phone two point seven" or command == "open play store two point seven":
os.startfile('C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Python 2.7\IDLE (Python GUI)')
elif command == "command line" or command == "command":
say("Entering Command Line")
while True:
say("Enter a command for windows or exit")
command = raw_input(">> ")
if command == "exit":
say("exiting")
break
os.system(command)
elif command == "open word":
os.startfile('C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Word 2016')
elif command == "web":
say("going into web mode")
while True:
say("say website with out https:// or exit")
with sr.Microphone() as source:
print "Say command"
audio = r.listen(source)
try:
website = r.recognize_api(audio, client_access_token=API_AI_CLIENT_ACCESS_TOKEN)
except sr.RequestError as e:
website = r.recognize_wit(audio, key=WIT_AI_KEY)
except sr.RequestError as e:
command = r.recognize_google(audio)
if website == "exit":
say("exiting")
break
webbrowser.open("https://" + website)
elif command == "open dropbox" or command == "open drop box":
os.startfile('C:\Program Files (x86)\Dropbox\DropboxOEM\DropboxOEM')
elif command == "open musicBee" or command == "open music bee" or command[:10] == "open music":
os.startfile('C:\Program Files (x86)\MusicBee\MusicBee')
elif command == "update":
webbrowser.open("https://github.com/Leocarson/DesmondAI")
say("If any, download the new python file")
elif command == "settings" or command == "set lights":
say("for easy changing of settings this section will not use voice")
while True:
say("here are settings you can change or type exit to exit")
print "Your Name Voice Gender Command List"
setting = raw_input(">> ")
if setting == "Your Name" or setting == "your name" or setting == "Your name" or setting == "your Name":
name = raw_input(">> ")
say ("ok your name is ")
say (name)
say ("I'll remember that")
try:
nafile = open(namefile, 'w')
nafile.write(name)
nafile.close()
break
except (NameError, ValueError, RuntimeError, IOError):
cats = "cats"
elif setting == "Command List" or command == "command list" or command == "Command list" or command == "command List":
say ("would you like to have me ask for the command list (y/n)")
CL = raw_input('>> ')
if CL == "y" or CL == "Y":
comask = open(comaskfile, 'w')
comask.write("y")
comask.close()
else:
say ("ok, i will not say the command list at the beginning of the program")
comask = open(comaskfile, 'w')
comask.write('no')
comask.close()
elif setting == "Voice Gender" or setting == "voice gender" or setting == "voice Gender" or setting == "Voice gender":
while 1:
say("would you like male or female")
gen = raw_input(">> ")
if gen == "female" or gen == "Female":
say("ok")
genfile = open('gender.txt', 'w')
genfile.write("female")
genfile.close()
break
else:
say("ok")
genfile = open('gender.txt', 'w')
genfile.write("male")
genfile.close()
break
elif command == "":
say("cats")
else:
say("searching for")
say(command)
webbrowser.open(command)
say("found it")
| true |
f32155e82b4979d3dd8bc36073f0e7161fcd705f | Python | Canyanwu/BattleGame | /battle.py | UTF-8 | 4,763 | 4.25 | 4 | [] | no_license | """ Implementation of the Battler class and its methods.
This method reads and creates an army for each player, say army1 and army1,
sets them in stack formation or Queue formation depending on the formation
and calls internal method __conduct_combat(self, army1, army2, 0) and returns
the winner
methods implemented:
gladiatorial_combat(self, player_one: str, player_two: str)
fairer_combat(self, player_one: str, player_two: str)
__conduct_combat(self, army1: Army, army2: Army, formation: int)
"""
__author__ = "Chukwuudi (Chuchu) Anyanwu"
from army import Army
class Battle:
"""creates two player with stack of armies and method to battle"""
def gladiatorial_combat(self, player_one: str, player_two: str) -> int:
""" This method reads and creates an army for each player, say army1
and army1, sets them in stack formation
@complexity: Best O(n) and worst O(n)
"""
FORMATION = 0 # constant
army_1 = Army() # creating an instance of Army
army_1.choose_army(player_one, FORMATION) # creating player stack
army_2 = Army() # creating an instance of Army
army_2.choose_army(player_two, FORMATION) # creating player stack
return self.__conduct_combat(army_1, army_2, FORMATION)
def fairer_combat(self, player_one: str, player_two: str) -> int:
""" Conducts a battle between two armies in queue formation and whenever a
fighter survives, it gets appended at the end of the queue and at the end it
return the winner from the conduct_combat method
@complexity: Best O(n) and worst O(n)
"""
FORMATION = 1
army_1 = Army() # creating an instance of Army
army_1.choose_army(player_one, FORMATION) # creating player stack
army_2 = Army() # creating an instance of Army
army_2.choose_army(player_two, FORMATION) # creating player stack
return self.__conduct_combat(army_1, army_2, FORMATION)
def __conduct_combat(self, army1: Army, army2: Army, formation: int) -> int:
""" conducts battle between two armies based on the formation and return the winner
the worst-case run-time complexity for queue and stack operations is O(1)
@complexity: Best O(n) and worst is O(n) for both formation 0 and formation 1
where n is length of the army force.
"""
if formation == 0:
while len(army1.force) != 0 and len(army2.force) != 0: # if both are stack are still not empty
pop_play1 = army1.force.pop() # pop the player one army
pop_play2 = army2.force.pop() # pop the player two army
x = pop_play1.attack_damage() # get player one attack damage
y = pop_play2.attack_damage() # get player two attack damage
pop_play1.defend(x) # decrease player one life based on the player 2 attack damage
pop_play2.defend(y) # decrease player two life based on the player 1 attack damage
if pop_play1.is_alive():
army1.force.push(pop_play1) # push back army if still alive
if pop_play2.is_alive():
army2.force.push(pop_play2)
if army1.force.is_empty() and army2.force.is_empty():
return 0 # draw
elif not army1.force.is_empty() and army2.force.is_empty():
return 1 # Player one win
else:
return 2 # Player two
if formation == 1:
while len(army1.force) != 0 and len(army2.force) != 0:
pop_play1 = army1.force.serve() # relax the player one army in the front
pop_play2 = army2.force.serve() # relax the player two army in the front
x = pop_play1.attack_damage()
y = pop_play2.attack_damage()
pop_play1.defend(x)
pop_play2.defend(y)
if pop_play1.is_alive():
army1.force.append(pop_play1) # return player one army at the rear of the queue
if pop_play2.is_alive():
army2.force.append(pop_play2) # return player two army at the rear of the queue
if army1.force.is_empty() and army2.force.is_empty():
return 0 # draw
elif not army1.force.is_empty() and army2.force.is_empty():
return 1 # Player one win
else:
return 2 # Player two win
# TESTING TESTING TESTING TESTING TESTING TESTING TESTING TESTING TESTING
# battle = Battle()
# print(battle.gladiatorial_combat("James", "John"))
#
# battle = Battle()
# print(battle.fairer_combat("Judas", "Joshua"))
| true |
b30ffb3d59fc7d223ec9384ac3bcdcc0e398b93b | Python | twcxjq/python_29 | /Day7_20210604/py_io.py | UTF-8 | 720 | 3.40625 | 3 | [] | no_license | # -*- coding:utf8 -*- #
# -----------------------------------------------------------------------------------
# ProjectName: python_29
# FileName: py_io
# Author: TianChangJun
# Datetime: 2021/6/4 11:28
# Description:
# -----------------------------------------------------------------------------------
import os
# 打印路径下所有的目录和文件(隐藏文件也列出)
for i in os.listdir(r"c:\\"):
print(i)
# 读取文件
f1 = open(r"c:\text11.txt", mode="r")
# 读取内容
data = f1.read()
print(type(data), data) # 字符串类型
f1.close()
# 创建文件mode=a(追加)或者mode=w(覆盖)表示创建文件
f2 = open(r"d:\text22.txt", mode="a")
f2.write(data + "\n")
f2.close()
| true |
0eeabcb0714d5af8d404c0f35f8c44e2ab2170bd | Python | ismelyanets/python_hw | /controltask_8.py | UTF-8 | 212 | 3.359375 | 3 | [] | no_license | lst = [1, 2, 3, 4, 5, 9, 6, 7]
print('-------------')
print('ID:', id(lst))
print(lst)
print('-------------')
a = lst.index(min(lst))
b = lst.index(max(lst))
lst[a], lst[b] = lst[b], lst[a]
print(lst, id(lst)) | true |
0d9b77cbc76a35345c31167d54fec5cf79df6856 | Python | typesupply/feaTools | /Lib/feaTools/writers/fdkSyntaxWriter.py | UTF-8 | 7,332 | 2.84375 | 3 | [
"MIT"
] | permissive | """
Basic FDK syntax feature writer.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from .baseWriter import AbstractFeatureWriter
class FDKSyntaxFeatureWriter(AbstractFeatureWriter):
def __init__(self, name=None, isFeature=False):
self._name = name
self._isFeature = isFeature
self._indentationLevel = 0
self._instructions = []
def write(self, linesep="\n"):
lines = []
if self._name:
lines.append("")
if self._isFeature:
t = self._whitespace(self._indentationLevel-1) + "feature %s {" % self._name
lines.append(t)
else:
t = self._whitespace(self._indentationLevel-1) + "lookup %s {" % self._name
lines.append(t)
for instrution in self._instructions:
if isinstance(instrution, FDKSyntaxFeatureWriter):
t = instrution.write(linesep)
lines.append(t)
else:
t = self._whitespace() + instrution
lines.append(t)
if self._name:
t = self._whitespace(self._indentationLevel-1) + "} %s;" % self._name
lines.append(t)
lines.append("")
return linesep.join(lines)
def _whitespace(self, level=None):
if level is None:
level = self._indentationLevel
return " " * level
def _list2String(self, aList):
final = []
for i in aList:
if isinstance(i, list):
i = "[%s]" % self._list2String(i)
final.append(i)
return " ".join(final)
def _subwriter(self, name, isFeature):
return FDKSyntaxFeatureWriter(name, isFeature=isFeature)
def feature(self, name):
feature = self._subwriter(name, True)
feature._indentationLevel = self._indentationLevel + 1
self._instructions.append(feature)
return feature
def lookup(self, name):
lookup = self._subwriter(name, False)
lookup._indentationLevel = self._indentationLevel + 1
self._instructions.append(lookup)
return lookup
def featureReference(self, name):
t = "feature %s;" % name
self._instructions.append(t)
def lookupReference(self, name):
t = "lookup %s;" % name
self._instructions.append(t)
def classDefinition(self, name, contents):
t = "%s = [%s];" % (name, self._list2String(contents))
self._instructions.append(t)
def lookupFlag(self, rightToLeft=False, ignoreBaseGlyphs=False, ignoreLigatures=False, ignoreMarks=False):
values = []
if rightToLeft:
values.append("RightToLeft")
if ignoreBaseGlyphs:
values.append("IgnoreBaseGlyphs")
if ignoreLigatures:
values.append("IgnoreLigatures")
if ignoreMarks:
values.append("IgnoreMarks")
if not values:
values = "0"
values = ", ".join(values)
t = "lookupflag %s;" % values
self._instructions.append(t)
def gsubType1(self, target, replacement):
if isinstance(target, list):
target = "[%s]" % self._list2String(target)
if isinstance(replacement, list):
replacement = "[%s]" % self._list2String(replacement)
t = "sub %s by %s;" % (target, replacement)
self._instructions.append(t)
def gsubType2(self, target, replacement):
gsubType4(target, replacement)
def gsubType3(self, target, replacement):
if isinstance(target, list):
target = "[%s]" % self._list2String(target)
if isinstance(replacement, list):
replacement = "[%s]" % self._list2String(replacement)
t = "sub %s from %s;" % (target, replacement)
self._instructions.append(t)
def gsubType4(self, target, replacement):
if isinstance(target, list):
target = self._list2String(target)
if isinstance(replacement, list):
replacement = self._list2String(replacement)
t = "sub %s by %s;" % (target, replacement)
self._instructions.append(t)
def gsubType6(self, precedingContext, target, trailingContext, replacement):
if isinstance(precedingContext, list):
precedingContext = self._list2String(precedingContext)
if isinstance(target, list):
finalTarget = []
for t in target:
if isinstance(t, list):
t = "[%s]" % self._list2String(t)
finalTarget.append(t + "'")
target = " ".join(finalTarget)
else:
target += "'"
if isinstance(trailingContext, list):
trailingContext = self._list2String(trailingContext)
if isinstance(replacement, list):
replacement = self._list2String(replacement)
# if the replacement is None, this is an "ignore"
if replacement is None:
if precedingContext and trailingContext:
t = "ignore sub %s %s %s;" % (precedingContext, target, trailingContext)
elif precedingContext:
t = "ignore sub %s %s;" % (precedingContext, target)
elif trailingContext:
t = "ignore sub %s %s;" % (target, trailingContext)
else:
t = "ignore sub %s;" % target
# otherwise it is a regular substitution
else:
if precedingContext and trailingContext:
t = "sub %s %s %s by %s;" % (precedingContext, target, trailingContext, replacement)
elif precedingContext:
t = "sub %s %s by %s;" % (precedingContext, target, replacement)
elif trailingContext:
t = "sub %s %s by %s;" % (target, trailingContext, replacement)
else:
t = "sub %s by %s;" % (target, replacement)
self._instructions.append(t)
def gposType1(self, target, value):
value = "%d %d %d %d" % value
t = "pos %s <%s>;" % (target, value)
self._instructions.append(t)
def gposType2(self, target, value, needEnum=False):
left, right = target
if isinstance(left, list):
left = "[%s]" % self._list2String(left)
if isinstance(right, list):
right = "[%s]" % self._list2String(right)
t = "pos %s %s %d;" % (left, right, value)
if needEnum:
t = "enum %s" % t
self._instructions.append(t)
def languageSystem(self, languageTag, scriptTag):
t = "languagesystem %s %s;" % (scriptTag, languageTag)
self._instructions.append(t)
def script(self, scriptTag):
t = "script %s;" % scriptTag
self._instructions.append(t)
def language(self, languageTag, includeDefault=True):
if not includeDefault and languageTag != "dflt":
t = "language %s exclude_dflt;" % languageTag
else:
t = "language %s;" % languageTag
self._instructions.append(t)
def include(self, path):
t = "include(%s)" % path
self._instructions.append(t)
def subtableBreak(self):
t = "subtable;"
self._instructions.append(t)
def rawText(self, text):
self._instructions.append(text)
| true |
c7d60ede9a2990487b7fab75a068bb56ac7faf3c | Python | Jenny-Jo/AI | /homework/DS_4_.py | UTF-8 | 2,980 | 3.390625 | 3 | [] | no_license | from typing import List
Vector = List[float]
height_weight_age = [70, 170, 40]
grades= [95,80,75,62]
def add(v: Vector, w: Vector) -> Vector:
assert len(v) == len(w), "vectors must be the same length"
return [ v_i + w_i for v_i, w_i in zip(v,w)]
assert add([1,2,3],[4,5,6]) == [5,7,9]
def subtract(v : Vector, w: Vector) -> Vector:
assert len(v) == len(w), "vectors must be the same length"
return [v_i - w_i for v_i, w_i in zip(v,w)]
assert subtract([5,7,9], [4,5,6]) == [1,2,3]
def vector_sum(vectors: List[Vector])-> Vector:
assert vectors, "no vectors provided!"
num_elements = len(vectors[0])
assert all(len(v) == num_elements for v in vectors), "different sizes!"
return [sum(vector[i] for vector in vectors)
for i in range(num_elements)]
assert vector_sum([[1,2],[3,4],[5,6],[7,8]]) == [16,20]
def scalar_multiply(c: float, v: Vector)->Vector:
return [c*v_i for v_i in v]
assert scalar_multiply(2,[1,2,3]) == [2,4,6]
def vector_mean(vectors : List[Vector]) -> Vector:
n = len(vectors)
return scalar_multiply(1/n, vector_sum(vectors))
assert vector_mean([[1, 2], [3, 4], [5, 6]]) == [3,4]
def dot(v: Vector, w: Vector) ->float:
assert len(v) == len(w), "vectors must be same length"
return sum(v_i*w_i for v_i, w_i in zip(v,w))
assert dot ([1,2,3], [4,5,6]) == 32
def sum_of_squares(v:Vector) -> float:
return dot(v,v)
assert sum_of_squares([1,2,3]) == 14
import math
def magnitude(v: Vector) ->float:
return math.sqrt(sum_of_squares(v))
assert magnitude([3, 4]) == 5
def squared_distance(v: Vector, w: Vector) ->float:
return math.sqrt(squared_distance(v,w))
def distance(v:Vector, w:Vector)->float:
return magnitude(subtract(v,w))
# 4.2
Matrix = List[List[float]]
A = [[1,2,3],
[4,5,6]]
B = [[1,2],
[3,4],
[5,6]]
from typing import Tuple
def shape(A: Matrix) ->Tuple [int,int]:
num_rows = len(A)
num_cols = len(A[0]) if A else 0
return num_rows, num_cols
assert shape([[1,2,3],[4,5,6]]) ==(2,3)
def get_row(A: Matrix, i:int) -> Vector:
return A[i]
def get_colomn(A:Matrix, j: int) -> Vector:
return [A_i[j]
for A_i in A]
from typing import Callable
def make_matrix(num_rows : int,
num_cols:int,
entry_fn: Callable[[int,int], float]) ->Matrix:
return [[entry_fn(k, j)]]
for j in range(num_cols)]
for i in range(num_rows)]
def identity_matrix(n:int) -> Matrix:
return make_matrix(n, n, lambda i,j: 1 if i == j else 0)
assert identity_matrix(5) == [[1,0,0,0,0],
[0,1,0,0,0]
[0,0,1,0,0]
[0,0,0,1,0]
[0,0,0,0,1]]
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4), (4, 5), (5, 6), (5, 7), (6, 8), (7, 8),(8, 9)]
friends_of_five = [ i
for i, is_friend in enumerate(friend_matrix[5])]
if is_friend]
| true |
4c7300511b405bbaf2fe79c6825726017222c1b6 | Python | NeerajaLanka/100daysofcode | /Day20 snake/score_board1.py | UTF-8 | 646 | 3.46875 | 3 | [] | no_license | from turtle import Turtle
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.hideturtle()
self.goto(0,270)
self.color("white")
self.score = 0
self.update_score()
def update_score(self):
self.write(f"score_board :{self.score}",align="center",font=("Arial",12,"normal"))
def increment(self):
self.score+=1
self.clear()
self.update_score()
def prints(self):
self.color("white")
self.goto(0,0)
self.write("game over",align="center",font=("Arial",16,"normal"))
| true |
e10161981a64ef1f25600f9255b21943d81d52fe | Python | jllovet/devops-toolkit | /aws/cloudformation/manager.py | UTF-8 | 3,945 | 2.65625 | 3 | [] | no_license | import boto3
from botocore.exceptions import ClientError
import json
import sys
import argparse
#TODO: Change default behavior with no args to describe rather than create
#TODO: Add Create flag
def parse_cli():
try:
parser = argparse.ArgumentParser()
parser.add_argument(
"-n",
"--name",
default="cf-stack",
help="path to file containing parameters for cloudformation template",
)
parser.add_argument(
"-t",
"--template",
default="./template.yml",
help="path to file containing cloudformation template",
)
parser.add_argument(
"-p",
"--params",
help="path to file containing parameters for cloudformation template",
)
parser.add_argument(
"-del",
"--delete",
action="store_true",
help="optional flag indicating whether to delete the cloudformation stack",
)
describe_group = parser.add_mutually_exclusive_group()
describe_group.add_argument(
"-desc",
"--describe",
action="store_true",
help="optional flag to describe cloudformation stacks",
)
describe_group.add_argument(
"-da",
"--describe_all",
action="store_true",
help="optional flag to describe all cloudformation stacks",
)
describe_group.add_argument(
"-l",
"--list",
action="store_true",
help="optional flag to list all cloudformation stack names",
)
return parser.parse_args()
except argparse.ArgumentError as err:
print(str(err))
sys.exit(2)
def stack_exists(name, required_status='CREATE_COMPLETE'):
# See: https://stackoverflow.com/questions/23019166/boto-what-is-the-best-way-to-check-if-a-cloudformation-stack-is-exists
try:
data = client.describe_stacks(StackName=name)
except ClientError:
return False
return data['Stacks'][0]['StackName'] == name
# TODO: Add flag for using status
# return data['Stacks'][0]['StackStatus'] == required_status
def get_stack_name_list(data):
stack_ids = []
for stack in data['Stacks']:
stack_ids.append(stack['StackName'])
return stack_ids
if __name__ == "__main__":
client = boto3.client('cloudformation')
cli_args = parse_cli()
stack_name = cli_args.name
if cli_args.list:
cli_args.describe_all = True
if cli_args.describe_all:
cli_args.describe = True
describe_function = client.describe_stacks()
if cli_args.describe:
if not cli_args.describe_all:
describe_function = client.describe_stacks(StackName=stack_name)
try:
data = describe_function
if cli_args.list:
stack_names = get_stack_name_list(data)
result = stack_names
else:
result = data
print(result)
except ClientError:
print(f"Stack with id {stack_name} does not exist")
sys.exit()
if cli_args.delete:
result = client.delete_stack(
StackName=stack_name
)
print(result)
sys.exit()
if cli_args.params:
with open(cli_args.params, 'r') as f:
parameters = json.load(f)
else:
parameters = []
with open(cli_args.template, 'r') as f:
template = f.read()
if stack_exists(stack_name):
result = client.update_stack(
StackName=stack_name,
TemplateBody=template,
Parameters=parameters
)
print(result)
else:
result = client.create_stack(
StackName=stack_name,
TemplateBody=template,
Parameters=parameters
)
print(result)
| true |
197551b510427e88b20cf1e91a62c36da64e2b9b | Python | darshita1108/files | /threading.py | UTF-8 | 369 | 3.609375 | 4 | [] | no_license | import threading
class Messenger(threading.Thread):
def run(self):
for _ in range(10):#when we do not care of variable but just want to loop 10 times we use _
print(threading.currentThread().getName())
x = Messenger(name="send messages")
y = Messenger(name="received messages")
x.start()#goes to class and loooks for run
y.start() | true |
f8f5e99c72383f641e58dc0c56450d6a91e755dc | Python | tylerherrin/top-log-calculator | /calculate.py | UTF-8 | 2,272 | 2.71875 | 3 | [] | no_license | import sys
import re
# Given the output below, grab non-zero %CPU values then calculate average and max values.
#
# [30-07-21 16:36:50]
# top - 16:36:50 up 2 days, 23:29, 2 users, load average: 0.40, 0.
# Tasks: 188 total, 1 running, 119 sleeping, 0 stopped, 0 zomb
# %Cpu(s): 0.3 us, 0.1 sy, 0.1 ni, 99.6 id, 0.0 wa, 0.0 hi, 0.
# KiB Mem : 15922328 total, 1041508 free, 2796408 used, 12084412 b
# KiB Swap: 0 total, 0 free, 0 used. 12510164 a
#
# PID USER PR NI VIRT RES SHR S %CPU %MEM
# 3019 freeswi+ -2 19 3383308 155416 16908 S 13.3 1.0
# 28625 kurento 20 0 6841200 84516 28824 S 13.3 0.5
# 3122 bigblue+ 20 0 5967088 322028 21344 S 6.7 2.0
# 1 root 20 0 225540 9504 6824 S 0.0 0.1
# 2 root 20 0 0 0 0 S 0.0 0.0
# 3 root 0 -20 0 0 0 I 0.0 0.0
# 4 root 0 -20 0 0 0 I 0.0 0.0
# 6 root 0 -20 0 0 0 I 0.0 0.0
#
# Top Log is generated with:
# while ( sleep 60 ) ; do (printf "\n\n\n" && date +"[%d-%m-%y %T]" && /
# top -bn 1 -o %CPU | head -n 15 && printf "\n\n\n") >> top_log.txt ; done
try:
file_path = sys.argv[1]
try:
with open(sys.argv[1], "r") as top_log_file:
top_log_string = top_log_file.read()
except FileNotFoundError:
print(file_path + " does not exist! Exiting!")
exit(1)
except IndexError:
print("No filepath provided! Exiting!")
exit(1)
# Regex used to match relevant %CPU values.
line_regex = re.compile(r"(PID\sUSER.*)?\s*\d+\s+\w+\+?\s+-?\d+\s+-?\d+\s+\d+(?:\.\d+\D)?\s+\d+\s+\d+\s+\w\s+(?!0\.0)(\d+\.?\d*)")
cpu_values = line_regex.findall(top_log_string)
samples = []
sample_values = []
for index, values in enumerate(cpu_values):
if values[0] and index != 0:
samples.append(sample_values)
sample_values = []
sample_values.append(float(values[1]))
sample_averages = []
for values in samples:
sample_averages.append(sum(values) / len(values))
avg_cpu = format(sum(sample_averages) / len(sample_averages), ".2f")
max_cpu = format(max(sample_averages), ".2f")
print("Average CPU Usage: " + avg_cpu + "%")
print("Maximum CPU Usage: " + max_cpu + "%")
| true |
a59effa8d3527d1ae90febb19c440cfce6df4ba5 | Python | fenix3846/calculator | /calculator.py | UTF-8 | 873 | 4.4375 | 4 | [] | no_license | def add(x , y):
return x+y
def subtract(x , y):
return x-y
def divide(x , y):
return x/y
def multiply(x , y):
return x*y
print("select operation")
print("1.add")
print("2.subtract")
print("3.divide")
print("4.multiply")
while True:
choice = input("enter choice:")
if choice in ('1', '2', '3', '4'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '4':
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("Invalid Input") | true |
ee0aec3b8ab0cde437e44951b79558d002b29614 | Python | hzcodec/python_goodies | /fsm/fsm3.py | UTF-8 | 992 | 3.0625 | 3 | [] | no_license | # Auther : Heinz Samuelsson
# Date : 2017-03-30
# File : fsm3.py
# Reference : http://gnosis.cx/publish/programming/charming_python_4.html
# Description : FSM
# Python ver : 2.7.3 (gcc 4.6.3)
from time import sleep
from statemachine import *
DLY = 0.2
# statemachine m
def state1(val):
val += 1
print "m : STATE1 State:", val
newState = "STATE2";
sleep(DLY)
return (newState, val)
def state2(val):
print "m : STATE2 State:", val
newState = "STATE3";
sleep(DLY)
return (newState, val)
def state3(val):
print "m : STATE3 State:", val
if (val == 7):
newState = "Out_of_Range";
else:
newState = "STATE1";
sleep(DLY)
return (newState, val)
if __name__ == "__main__":
m = StateMachine()
m.add_state("STATE1", state1)
m.add_state("STATE2", state2)
m.add_state("STATE3", state3)
m.add_state("OUT_OF_RANGE", None, end_state=1)
m.set_start("STATE1")
m.run(0)
| true |
cd9b409c91f74c1096f16fb0bc67b5f684a8abdf | Python | shubhampachori12110095/shuca | /lib/DecodeGreedyCoverage.py | UTF-8 | 3,228 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
import copy
class DecodeGreedyCoverage:
def __init__(self, length, K, n, vectors, weight):
self.K = K
self.length = length
self.n = n
self.vectors = vectors
self.weight = weight
self.sentences = self.__MakeSentences(length, n, vectors, weight)
def __MakeSentences(self, length, n, vectors, weight):
sentences = []
for i in range(1, n + 1):
sentence = Sentence(id = i,
length = length[i],
term_vector = vectors[i],
weight = weight)
sentences.append(sentence)
return sentences
def __UpdateSentenceWeight(self, weight):
for sentence in self.sentences:
sentence.score = sentence.CalculateScore(sentence.term_vector,
weight)
def __UpdateWeight(self, weight):
for key, value in self.current_sentences.term_vector.items():
weight[key] = 0
return weight
def GetScore(self):
return self.current_sentences.GetScore()
def GetSolution(self):
solution = [False for i in range(0, self.n + 1)]
for i in self.current_sentences.id:
solution[i] = True
solution[0] = False
return solution
def Search(self):
self.current_sentences = Sentence(0, 0, {}, {})
weight = copy.deepcopy(self.weight)
while len(self.sentences) > 0:
self.sentences.sort()
sentence = self.sentences.pop()
if sentence.length + self.current_sentences.length <= self.K:
self.current_sentences.Update(sentence, self.weight)
weight = self.__UpdateWeight(weight)
self.__UpdateSentenceWeight(weight)
class Sentence:
def __init__(self, id, length, term_vector, weight):
self.id = [id]
self.length = length
self.term_vector = term_vector
self.score = self.CalculateScore(term_vector, weight)
def __cmp__(self, other):
if self.score > other.score:
return 1
elif self.score < other.score:
return -1
else:
return 0
def CalculateScore(self, term_vector, weight):
score = 0
for key, value in term_vector.items():
score = score + weight[key]
return score
def GetScore(self):
return self.score
def Update(self, sentence, weight):
self.id.extend(sentence.id)
self.length = self.length + sentence.length
self.term_vector.update(sentence.term_vector)
self.score = self.CalculateScore(self.term_vector, weight)
if __name__ == '__main__':
length = [0, 6, 5, 4]
K = 9
n = 3
vectors = [{},
{'a':1, 'b':1},
{'b':1},
{'a':1, 'c':1}]
weight = {'a':10, 'b':5, 'c':2, 'd':'7'}
dgc = DecodeGreedyCoverage(length, K, n, vectors, weight)
dgc.Search()
#solution = dgc.GetSolution()
print dgc.GetScore()
print dgc.GetSolution()
| true |
6825cb985efa2b0dfea0b685414912ecb5e91acb | Python | legenddaniel/python-practices | /Try8.py | UTF-8 | 4,201 | 4.03125 | 4 | [] | no_license | for letter in "Mom":
print(letter) #letter没有语法意义,可换成任何词
print()
for guy in ["Daniel","David","Bobby"]:
print(guy)
print(str(guy.upper().isupper()).upper()) #布尔算符必须转换成字符串
print("print后会发现首先按矩阵内元素排序,再按print列表排序")
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
]
print([[row[i] for row in matrix] for i in range(4)]) #利用排序顺序不同进行的矩阵行列交换
#相当于以下代码
new_matrix = []
for i in range(4):
inter = []
for row in matrix:
inter.append(row[i])
new_matrix.append(inter)
print(new_matrix)
print(list(zip(*matrix))) #注意新list以tuple组成
print()
print(range(3))
for index in range(3):
print(index)
for index in range(1,3):
print(index)
for index in range(1,10,3):
print(index)
print()
guys = ["Daniel","David","Bobby"]
for index in range(len(guys)):
print(index, guys[index])
print(guys[index])
knights = {'gallahad': 'the pure', 'robin': 'the brave'}
for k, v in knights.items(): # 提取key和value
print(k, v)
guy_enumerate = dict(enumerate(guys)) #将iterate结果变成dict
print((guy_enumerate)) # tuple和list都是逗号,元以小括号括起来
for guy in guy_enumerate:
print(guy) #打出来的是序号
guy_enumerate2 = list(enumerate(guys,start=1)) #将iterate结果变成list,1为起始计数
print((guy_enumerate2))
for guy in guy_enumerate2:
print(guy) #打出来的带括号
questions = ['name', 'quest', 'favorite color']
answers = ['lancelot', 'the holy grail', 'blue']
for q, a in zip(questions, answers):
print('What is your {0}? It is {1}.'.format(q, a))
print()
for index in range(3):
if index == 0:
print(00)
elif index == 1:
print(str(00))
else:
print("00")
print()
def powering(base,power):
result = 1
for index in range(power):
result = result * base
return result
print(powering(2,3))
print()
file = open("For Try8", "r") #r w a r+
print(file.readable()) # T F F T
print(file.writable()) # F T T T 没有appendable
# print(file.read()) 这几行read只能运行一个,此行代码是原始还原
# print(file.readlines()[1])
for items in file.readlines(): #此行代码是分行显示,每行之间有空格
print(items)
file = open("For Try8","a")
print(file.write("\nCCC")) #每次运行都会多一个CCC,而且多个数字?
file = open("For Try8","w")
print(file.write("CCC")) #覆盖了
# file = open("For","w") #如果打开一个不存在的文件会自动创建然后写入
file.close()
print()
import tools
print(tools.roll_dice(10))
print()
from tools import Student
student1 = Student("Jim", "Business", 3.1)
print(student1.name)
print(student1.honor())
print()
from tools import parrot
parrot(1000) # 1 positional argument
print()
parrot(voltage=1000) # 1 keyword argument
print()
parrot(voltage=1000000, action='VOOOOOM') # 2 keyword arguments
print()
parrot(action='VOOOOOM', voltage=1000000) # 2 keyword arguments
print()
parrot('a million', 'bereft of life', 'jump') # 3 positional arguments
print()
parrot('a thousand', state='pushing up the daisies') # 1 positional, 1 keyword
'''
parrot() # required argument missing
parrot(voltage=5.0, 'dead') # non-keyword argument after a keyword argument
parrot(110, voltage=220) # duplicate value for the same argument
parrot(actor='John Cleese') # unknown keyword argument
'''
print()
from tools import Question
question_prompt = [
"What color are apples?\n(a) Red\n(b) Purple",
"What color are bananas?\n(a) Red\n(b) Yelllow",
"What color are strawberries?\n(a) Green\n(b) Red",
]
questions = [
Question(question_prompt[0], "a"),
Question(question_prompt[1], "b"),
Question(question_prompt[2], "b"),
]
def run(questions):
score = 0
for question in questions:
answer = input(question.prompt)
if answer == question.answer:
score += 1
print("You got "+ str(score) + "/" + str(len(questions)) + " correct")
run(questions) | true |
d4d3c10079786b3baf6ea3c4f83797d8a58f2c16 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_59/329.py | UTF-8 | 1,153 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
1BA.py
Created by Lisbeth Nilsen on 2010-05-22.
To run, download Python 2.6 interpreter from http://python.org/
"""
import sys
import os
def calc(f):
result = 0
(n, m) = f.readline().split()
print n, m
dirs = ["/"]
#Initial directories
for i in range(int(n)):
dirs.append(f.readline().strip())
#print dirs
#New directories
for i in range(int(m)):
newDir = f.readline().strip()
newDirs = newDir.split('/')
#print newDirs
#Skip first, empty string
for d in range(1, len(newDirs) + 1):
#print "Checking:", d, "/".join(newDirs[1:d])
if "/" + "/".join(newDirs[1:d]) in dirs:
pass #print "/" + "/".join(newDirs[1:d]), "in dirs"
else:
result += 1
dirs.append("/" + "/".join(newDirs[1:d]))
#print "/" + "/".join(newDirs[1:d]), "added to dirs"
#print dirs
return result
f = open('A-large.in', 'r')
lines = f.readline()
c = int(lines.split()[0])
of = open('output_a_large.txt', 'w')
for idx in range(c):
of.write('Case #%(idx)i: %(i)i\n' % {'idx': idx + 1, 'i': calc(f)})
f.close()
of.close() | true |
9135302fa7b3031a2facff53dd340d05622298d5 | Python | IronMan61693/dummy_project | /adventure_threadsignal/Common_Functions.py | UTF-8 | 213 | 3.46875 | 3 | [] | no_license | import random
def dice_roller(quantity, die_sides, modifier):
total_result = modifier
for roll in range(0, quantity):
die_roll = random.randint(0, die_sides)
total_result += die_roll
return (total_result) | true |
c763426778dd6d6d49b1bd2a613c5211996b1cb1 | Python | ualberta-smr/PyMigBench | /code/core/Arguments.py | UTF-8 | 1,829 | 2.8125 | 3 | [
"MIT"
] | permissive | import argparse
class Arguments:
def __init__(self, query: str,
data_type: str = None,
filters: list[str] = None,
output_format: str = None):
self.query = query
self.data_type = data_type
self.filters = parse_filters(filters)
self.output_format = output_format
def __str__(self):
return str(self.__dict__)
def parse_filters(filter_list: list[str]):
if not filter_list:
return {}
dict = {}
for filter in filter_list:
attr, value = filter.split("=")
dict[attr] = value
return dict
def build_arguments() -> Arguments:
parser = argparse.ArgumentParser(description="query PyMigBench")
parser.add_argument("query", nargs='?', default="summary",
choices=["summary", "count", "list", "detail", "s", "c", "l", "d"],
type=str.lower,
help="The query you want to run")
parser.add_argument("-d", "-dt", "--data-type",
help="The type of data that you want to fetch. "
"Summary does not accept any data type."
"Other queries accept exactly one data type.",
choices=["lp", "mg"])
parser.add_argument("-f", "--filters", required=False, nargs='+',
help="Additional filters. You can pass zero or more filters in <property>=<value>."
"Summary query ignores all filters")
parser.add_argument("-o", "--output-format", required=False, default="yaml",
type=str.lower,
choices=["yaml", "json"],
help="Output format")
dict = vars(parser.parse_args())
return Arguments(**dict)
| true |
4ff9579af003c514ee9bda302bdaee900cbf05d7 | Python | vinodekbote/Cracking-the-Coding-Interview | /dictionary_of_words.py | UTF-8 | 190 | 2.75 | 3 | [] | no_license | __author__ = 'rakesh'
'''
Q2. Given a dictionary of words, find all possible sentences that can be formed from a string with no spaces.
Exp : Iamcoolguy
Answer: I am cool guy.
'''
| true |
627bbb959e5d917a6f7a6a9f45b296c1ca51560c | Python | jrschmiedl/CheckOutBot | /BestBuyCheckOutBot.py | UTF-8 | 915 | 2.671875 | 3 | [] | no_license | import time
from selenium import webdriver
# webdriver from Chrome
PATH = "C:\Program Files (x86)\chromedriver.exe"
chrome = webdriver.Chrome(PATH)
# Link to the page
PAGE = "https://www.bestbuy.com/site/madden-nfl-21-playstation-4-playstation-5/6407594.p?skuId=6407594"
chrome.get(PAGE)
purchaseButton = False
while not purchaseButton:
try:
addToCart = addButton = chrome.find_element_by_class_name("btn-prim")
print("Button is not ready at this time.")
time.sleep(1)
chrome.refresh()
except:
addToCart = addButton = chrome.find_element_by_class_name("fulfillment-add-to-cart-button")
print("Button was clicked.")
addToCart.click()
purchaseButton = True
goToCart = addToCartButton = chrome.find_element_by_class_name("btn btn-secondary btn-sm btn-block ")
goToCart.click()
print("Go to Cart was clicked")
| true |
f68ec2e4b875426ce7c73ce9988d7597b2a1b2a1 | Python | calannap/dsmicroservice | /train.py | UTF-8 | 1,067 | 2.625 | 3 | [] | no_license | from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
def train_model(train_images, train_labels, test_images, test_labels):
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
| true |
138e692d656a597274ff61d6f60bea9fafacfa02 | Python | agustin-mallea/redes | /LAB2/connection.py | UTF-8 | 8,357 | 2.953125 | 3 | [] | no_license | # encoding: utf-8
# Copyright 2014 Carlos Bederián
# $Id: connection.py 455 2011-05-01 00:32:09Z carlos $
import socket
from constants import *
import os
# import ipdb
class Connection(object):
"""
Conexión punto a punto entre el servidor y un cliente.
Se encarga de satisfacer los pedidos del cliente hasta
que termina la conexión.
"""
def __init__(self, socket, directory):
"""Crea una conexion asociada al descriptor fd."""
self.socket = socket # socket
self.input = '' # cola de entrada
self.output = '' # cola de salida
self.dir = directory # direccion de carpeta a compartir
self.remove = False # flag para señalar al servidor
# que la conexión terminó
def handle_output(self):
# Aquí esta la única llamada a `socket.send` del programa
# Saca datos de la cola de salida
# debería ser llamado por AsyncServer cuando `poll` dice que
# está lista para mandar
self.socket.send(self.output)
self.output = ''
def handle_input(self):
# Aquí esta la única llamada a `socket.recv` del programa
# Mete datos en la cola de entrada
# debería ser llamado por AsyncServer cuando `poll` dice que hay
# datos
# ipdb.set_trace(context=6)
eolatlast = False
message = self.socket.recv(X4096X)
if len(message) == 0:
self.remove = True
return
mlist = message.split(EOL)
if (mlist[len(mlist) - 1]) == '':
eolatlast = True
for x in range(0, len(mlist)):
if '\n' in mlist[x]:
self.kick(BAD_EOL)
break
if eolatlast is False and x == len(mlist) - 1:
self.input = self.input + (mlist[x])
else:
if self.checkcommad(mlist[x]) == KP_GOING:
self.execute(mlist[x])
elif self.checkcommad(mlist[x]) == STOP:
self.execute(mlist[x])
break
else:
if (mlist[x]) != '':
self.getmessage(INVALID_COMMAND)
def events(self):
# Devuelve los eventos (POLLIN, POLLOUT) que le interesan
# a la conexión en este momento
pass
def kick(self, error_message):
self.getmessage(error_message)
self.remove = True
def listing(self, cmlist):
"""
Envía secuencia de líneas terminadas en \r\n con
el nombre de los archivos disponibles en el directorio compartido.
"""
# ipdb.set_trace(context=6)
if (cmlist.count(' ') > 0):
self.kick(BAD_REQUEST)
else:
cmlist = cmlist.split(' ')
if len(cmlist) != 1:
self.getmessage(INVALID_ARGUMENTS)
else:
try:
self.getmessage(CODE_OK)
for item in os.listdir(self.dir):
self.output += item + " " + EOL
self.output += EOL
except OSError:
self.getmessage(FILE_NOT_FOUND)
def metadata(self, cmlist):
"""
Envía el tamaño en bytes el archivo pedido, si éste se encuentra
en el directorio.
"""
if (cmlist.count(' ') > 0):
self.kick(BAD_REQUEST)
else:
cmlist = cmlist.split(' ')
if len(cmlist) != 2 or not self.checkfilename(cmlist[1]):
self.getmessage(INVALID_ARGUMENTS)
else:
name = cmlist[1]
try:
size = os.path.getsize(os.path.join(self.dir, name))
self.getmessage(CODE_OK)
self.output += str(size) + EOL
except OSError:
self.getmessage(FILE_NOT_FOUND)
def chunks(self, fpath, offset, size):
f = open(fpath, 'r')
f.seek(offset)
stop = False
if size < X4096X:
yield f.read(size)
else:
while (not stop):
chunk = f.read(X4096X)
yield chunk
stop = len(chunk) < X4096X or chunk == ""
f.close()
def slice(self, cmlist):
"""
Envía una secuencia de dígitos indicando la longitud de la cadena
de datos del fragmento en bytes, un espacio y la cadena de datos
de longitud indicada, seguida de \r\n. Al finalizar envía nuevamente
un terminador \r\n.
"""
if (cmlist.count(' ') > 0):
self.kick(BAD_REQUEST)
return
cmlist = cmlist.split(' ')
if len(cmlist) != 4 or not self.checkfilename(cmlist[1]):
self.getmessage(INVALID_ARGUMENTS)
return
name = cmlist[1]
if (not os.path.exists(self.dir + "/" + name)):
self.getmessage(FILE_NOT_FOUND)
return
try:
offset = int(cmlist[2])
size = int(cmlist[3])
except ValueError:
self.getmessage(INVALID_ARGUMENTS)
return
filesize = os.path.getsize(os.path.join(self.dir, name))
if (offset < 0 or size <= 0 or offset + size > filesize):
self.getmessage(BAD_OFFSET)
return
fpath = os.path.join(self.dir, name)
self.getmessage(CODE_OK)
for chunk in self.chunks(fpath, offset, size):
self.output += str(len(chunk)) + ' ' + chunk + EOL
self.output += '0' + ' ' + EOL
def quit(self, cmlist):
"""
Cierra la conexion con el cliente.
"""
try:
if (cmlist.count(' ') > 0):
self.kick(BAD_REQUEST)
else:
cmlist = cmlist.split(' ')
if len(cmlist) != 1:
self.getmessage(INVALID_ARGUMENTS)
else:
self.kick(CODE_OK)
except Exception as e:
print e
def checkcommad(self, string):
"""
Checkea si el comando valido.
"""
stringlist = string.split(' ')
command = stringlist[0]
if (command == 'get_file_listing' or command == 'get_metadata'
or command == 'get_slice'):
return KP_GOING
elif command == 'quit':
return STOP
else:
return False
def execute(self, string):
"""
Ejecuta un comando.
"""
stringlist = string.split(' ')
command = stringlist[0]
if command == 'get_file_listing':
self.listing(string)
elif command == 'get_metadata':
self.metadata(string)
elif command == 'get_slice':
self.slice(string)
elif command == 'quit':
self.quit(string)
def checkfilename(self, filename):
"""
Checkea si el filename esta compuesto por caracteres validos.
"""
for c in filename:
if c not in VALID_CHARS:
return False
return True
def getmessage(self, error):
self.output += str(error) + ' ' + error_messages[error] + EOL
'''
def handle(self):
"""
Atiende eventos de la conexión hasta que termina.
"""
messagebuffer = ''
eolatlast = False
while self.connected:
message = self.socket.recv(X4096X)
if len(message) == 0:
self.connected = False
break
if messagebuffer != '':
message = messagebuffer + message
messagebuffer = ''
mlist = message.split(EOL)
if (mlist[len(mlist) - 1]) == '':
eolatlast = True
for x in range(0, len(mlist)):
if '\n' in mlist[x]:
self.kick(BAD_EOL)
elif eolatlast is False and x == len(mlist) - 1:
messagebuffer = messagebuffer + (mlist[x])
elif self.checkcommad(mlist[x]):
self.execute(mlist[x])
else:
if (mlist[x]) != '':
self.getmessage(INVALID_COMMAND)
eolatlast = False
'''
| true |
915d3f3689b60d5a79759845a3665c0fb51c105b | Python | LuChenyang3842/algorithm-question | /数组/moveString.py | UTF-8 | 259 | 2.828125 | 3 | [] | no_license | class Solution:
def LeftRotateString(self, s, n):
s_list = list(s)
move = len(s_list) % n
if move == 0:
return s
s_left = ''.join(s_list[])
s_left = ''.join(s_list[move:])
return s_left + s_right | true |
cfdb6d8d8e2113d4a08b59cc84e9f1796fe8c1c1 | Python | fa-me/gitlab-burndown | /burndown.py | UTF-8 | 4,266 | 2.78125 | 3 | [
"MIT"
] | permissive | import sys
import gitlab
import collections
import datetime
import dateutil.parser
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import scipy.interpolate as interpolate
import scipy.signal as signal
import pandas as pd
def get_issues(gitlab_url, gitlab_secret, project, since):
gl = gitlab.Gitlab(gitlab_url, gitlab_secret)
proj = gl.projects.get(project)
done = False
page = 1
all_issues = []
while not done:
issues = proj.issues.list(order_by='created_at',
sort='desc',
page=page,
per_page=20)
if len(issues) == 0:
break
page += 1
all_issues += issues
return all_issues
milestone_lookup = dict()
def issue_to_dict(issue):
# open time
open_time = dateutil.parser.parse(issue.created_at)
close_time_raw = issue.attributes['closed_at']
if close_time_raw is not None:
close_time = dateutil.parser.parse(close_time_raw)
else:
close_time = None
# milestone
if issue.milestone is not None:
milestone_id = issue.milestone['iid']
if issue.milestone['start_date'] is None:
ms_start_date = None
else:
ms_start_date = dateutil.parser.parse(issue.milestone['start_date'])
milestone_lookup.update({milestone_id: {
'title': issue.milestone['title'],
'start_date': ms_start_date,
'due_date': dateutil.parser.parse(issue.milestone['due_date'])
}})
else:
milestone_id = None
return dict({
'iid': issue.get_id(),
'open_time': open_time,
'close_time': close_time,
'milestone_id': milestone_id
})
def issues_to_dataframe(issues):
return pd.DataFrame(map(issue_to_dict, issues))
def get_timestamps(from_, to_, freq='D'):
return pd.date_range(from_, to_, freq=freq).tolist()
def get_bracket_counter(df, col):
def in_time_bracket(lower, upper):
return np.count_nonzero(np.logical_and(df[col] > lower,
df[col] <= upper))
return in_time_bracket
def get_start_due_date(df):
milestone_ids = set(df.milestone_id)
milestone_id = milestone_ids.pop()
milestone = milestone_lookup[milestone_id]
start_date = milestone['start_date']
due_date = milestone['due_date']
return start_date, due_date
def accumulated_number_of_items(df, freq='D'):
earliest = df.open_time.min()
tz = earliest.tz
now = pd.Timestamp.now(tz=tz)
latest = max(df.open_time.max(),
df.close_time.max(),
now)
timestamps = get_timestamps(earliest, latest, freq)
timestamps.append(latest) # assert latest is explicitly added
count_opened = get_bracket_counter(df, 'open_time')
count_closed = get_bracket_counter(df, 'close_time')
timestamp_brackets = list(zip(timestamps[:-1], timestamps[1:]))
opened = [count_opened(a, b)
for a, b in timestamp_brackets]
closed = [count_closed(a, b)
for a, b in timestamp_brackets]
return timestamps[1:], opened, closed
def plot_data(df, freq='D', title=None):
t, opened, closed = accumulated_number_of_items(df, freq)
opened_cum = np.cumsum(opened)
plt.figure(figsize=(12,4))
start_date, due_date = get_start_due_date(df)
if start_date is None:
start_date = min(t)
plt.plot([start_date, due_date], [0, opened_cum[-1]], '--', color='0.5')
plt.fill_between(t, opened_cum, color='0.9')
plt.fill_between(t, np.cumsum(closed))
if title is not None:
plt.title(title)
plt.xlim(min(t), max(t), due_date)
plt.ylim(0, max(opened_cum))
plt.tight_layout()
plt.show()
def main(gitlab_url=None, gitlab_secret=None, project=None, since=None):
issues = get_issues(gitlab_url, gitlab_secret, project, since)
data = issues_to_dataframe(issues)
for milestone_id, subdata in data.groupby('milestone_id'):
milestone_name = milestone_lookup[milestone_id]['title']
plot_data(subdata, freq='H', title=milestone_name)
if __name__ == "__main__":
pass
| true |
6207e496100226867b39f5308c7a294f9444867a | Python | UBC-Rocket/UBCRocketGroundStation | /tests/test_stream_filter.py | UTF-8 | 748 | 2.5625 | 3 | [] | no_license | from connections.sim.stream_filter import ReadFilter, WriteFilter, A
from io import BytesIO
def test_passthrough():
test_data = [x for x in range(0, 255)]
test_stream = BytesIO()
# Write test data to stream
write_filter = WriteFilter(test_stream)
for b in test_data:
write_filter.write(bytes([b]))
# Check all were written
num = test_stream.tell()
assert num == len(test_data) * 2
# Check all are within correct range
test_stream.seek(0)
encoded = test_stream.read(num)
for b in encoded:
assert A <= b < (A + 16)
# Check reverse filter
test_stream.seek(0)
read_filter = ReadFilter(test_stream, 0)
for b in test_data:
assert b == read_filter.read(1)[0]
| true |
7290ebd5ddcb99fa3eaae03ac5594ceeedd6ca1d | Python | andrewrkeyes/artfario-importer | /artfario_importer/met_api/MetAPI.py | UTF-8 | 522 | 2.625 | 3 | [] | no_license |
import requests
class MetAPI(object):
FULL_COLLECTION_URL = '/public/collection/v1/objects'
def __init__(self, met_api_url):
#https://collectionapi.metmuseum.org
self.met_api_url = met_api_url
def get_full_collection(self):
try:
full_collection_url = self.met_api_url + self.FULL_COLLECTION_URL
data = requests.get(full_collection_url)
return data
except requests.RequestException as e:
print(e)
raise e
| true |
d2e1514dce22395a858abbd9ec300ff579779780 | Python | Tianyi94/EC602-Assignments | /asgn7/sounds_example.py | UTF-8 | 784 | 2.765625 | 3 | [] | no_license | import scipy.io.wavfile as wavfile
import PyQt4.QtGui as qt
import time
import numpy
import matplotlib.pyplot as pyplot
def read_wave(fname,debug=False):
frame_rate,music = wavfile.read(fname)
if debug:
print(frame_rate,type(music),music.shape,music.ndim)
if music.ndim>1:
nframes,nchannels = music.shape
else:
nchannels = 1
nframes = music.shape[0]
return music,frame_rate,nframes,nchannels
def wavplay(fname):
qt.QSound.play(fname)
music,frame_rate,nframes,nchannels = read_wave(fname)
time.sleep(nframes/frame_rate)
fname = "classical.wav"
music,frame_rate,nframes,nchannels = read_wave(fname)
wavplay('classical.wav')
if nchannels > 1:
music = music.sum(axis=1)
pyplot.plot(music)
pyplot.show()
| true |
a6b8ec456a2b1e3499ea83b7c3565c4957a170a4 | Python | chapman-cpsc-230/hw1-agust105 | /exercises/interest_rate.py | UTF-8 | 356 | 3.4375 | 3 | [
"MIT"
] | permissive | """
File: <interest_rate.py>
Copyright (c) 2016 <Francis Agustin>
License: MIT
<How much money 1000 euros have grown to after
three years with 5 percent interest?>
"""
A = 1000.0 #euros
P = 5.0 #pecent interest
n = 3.0 #years
x = A*pow(1+P/100, n)
print "In", n, "years,", A, "euros with", P, "percent interest will grow to", x, "euros."
| true |
e0e197509c440e6459a1b88988668893b14ea48f | Python | TimoLin/pyScriptFoam | /hsFM2Cantera/transientFM.py | UTF-8 | 4,437 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
"""
Run FlameMaster transient flame solver for a series of chi values
in the upper branch with consideration of radiation.
"""
import sys, os, subprocess
def listTool(solFolder):
# Call FlameMaster ListTool to get "chi_st" vs "Tmax"
if 'syms' not in os.listdir("./"):
# Create syms file
with open('syms','w') as f:
f.write("chi_st\nTmax")
else:
# Check syms file content
with open('syms','r') as f:
lines = f.readlines()
if lines[0] != 'chi_st\n':
# Create syms file
with open('syms','w') as f:
f.write("chi_st\nTmax")
status = subprocess.call('$HOME/FlameMaster/Bin/bin/ListTool -M -s syms -r temp.dout '+solFolder+'/*', shell=True)
chi_upper = []
if status == 0:
from operator import itemgetter
chi_st = []
Tmax = []
with open('temp.dout', 'r') as f:
lines = f.readlines()
for line in lines[2:]:
chi_st.append(float(line.split()[0]))
Tmax.append(float(line.split()[1]))
indexT = sorted(enumerate(Tmax), key=itemgetter(1),reverse=True)
for i,value in indexT:
chi_upper.append(chi_st[i])
if chi_st[i] == max(chi_st):
break
return chi_upper
def transFM(chiUpperBranch, startFile):
# Run transient flame solver for upper branch with radiation
# Prepare FM transient flame solver input file
# Here we use "FMUnsteady.input"
# Check settings in the input file
with open('FMUnsteady-template.input') as f:
lines = f.readlines()
flagRad = False
for line in lines:
if 'NumberOfOutputs' in line:
nOutputs = int(line.split('=')[-1])
if nOutputs > 50:
print(" Warning: NumberOfOutputs is too many. Consider reducing it to 20~50?")
elif 'ComputeWithRadiation is' in line:
if line.split()[-1] != "TRUE" or "#" in line:
print(" Error: Radiation flag shall be 'TRUE'")
print(" Setting in file: "+line)
print(" Abort!")
sys.exit()
else:
flagRad = True
if not flagRad:
print(" Error: Radiation flag shall be set like below")
print(" ComputeWithRadiation is True")
print(" Abort!")
sys.exit()
print(" FMUnsteay.input check passed! Hah")
for i, chi in enumerate(chiUpperBranch):
outDir = "Chi-"+str(chi)
if not os.path.exists(outDir):
os.makedirs(outDir)
chi_st = str(chi)
# Generate input file from template
sedCmd = 'sed -e s#@Output@#'+outDir+'#'+ \
' -e s#@Startfile@#'+startFile[i]+'#' \
' -e s#@chi_st@#'+str(chi)+'#' \
' FMUnsteady-template.input > FMUnsteady-'+str(chi)+'.input'
#print(sedCmd)
status = subprocess.call(sedCmd,shell=True)
#print(status)
if status != 0:
print(" Error in command: "+sedCmd)
print(" Abort!")
sys.exit()
fmCmd = '$HOME/FlameMaster/Bin/bin/FlameMan -i'+' FMUnsteady-'+str(chi)+'.input'
status = subprocess.call(fmCmd,shell=True)
if status != 0:
print(" Error in command: "+fmCmd)
print(" Abort!")
sys.exit()
def startfileList(chiUpperBranch, solFolder):
# Find start FM files for the transient flame solver
fileList = os.listdir(solFolder)
startFile = [""]*len(chiUpperBranch)
for file in fileList:
if 'chi' in file and 'Tst' in file:
# Get chi value from file name
# e.g.: CH4_p01_0chi00005tf0300to0300Tst1944
chi = float(file[file.index('chi')+3:file.index('tf')])
if chi in chiUpperBranch:
startFile[chiUpperBranch.index(chi)] = solFolder+'/'+file
for i in range(len(startFile)):
if startFile[i] == "":
print( " Can't find start file for Chi_st:",chiUpperBranch[i])
if "" in startFile:
print(" Abort!")
sys.exit()
return startFile
def main():
solFolder = sys.argv[sys.argv.index('-dir')+1]
chiUB = listTool(solFolder)
sFiles = startfileList(chiUB, solFolder)
transFM(chiUB,sFiles)
if __name__ == '__main__':
main()
| true |
b33a66552544cad13a8d5ba9fe77ec63ec56efc8 | Python | brein62/computing-stuff | /Practical Exam Papers/promo 2016/q1.py | UTF-8 | 1,442 | 3.34375 | 3 | [] | no_license | raceData = open("RACE.txt", "r")
raceArray = [] #store race data in a 2-D array
for eachRacer in raceData:
racerData = eachRacer[:-1].split(",")
runnerID = racerData[0]
countryCode = racerData[1]
runnerName = racerData[2]
raceTime = racerData[3]
raceArray.append(racerData)
print("|{0:^15}|{1:^10}|{2:^20}|{3:^15}".format("Runner ID", "Country", "Name", "Race Time"))
print("-"*73)
sortedRaceArray = raceArray #for bubble sorting
###a simple bubble sort algorithm (can use insertion sort, quick sort, etc.)
##i = 1
##while i < len(sortedRaceArray):
## for racer in range(len(sortedRaceArray) - 1):
## if sortedRaceArray[racer][3] > sortedRaceArray[racer + 1][3]: #if faster, swap.
## temp = sortedRaceArray[racer]
## sortedRaceArray[racer] = sortedRaceArray[racer + 1]
## sortedRaceArray[racer + 1] = temp
## i += 1
#insertion sort (thanks mr ng)
for i in range(len(sortedRaceArray)):
x = sortedRaceArray[i]
j = i
while j > 0 and sortedRaceArray[j - 1][3] > x[3]:
sortedRaceArray[j] = sortedRaceArray[j - 1]
j = j - 1
sortedRaceArray[j] = x
for eachRacer in range(10):
print("|{0:^15}|{1:^10}|{2:^20}|{3:^15}".format(sortedRaceArray[eachRacer][0], sortedRaceArray[eachRacer][1], sortedRaceArray[eachRacer][2], sortedRaceArray[eachRacer][3]))
raceData.close()
| true |
0e554f1e8b31cfe32e18357c8934027052ab4a87 | Python | GersonFeDutra/Python-exercises | /CursoemVideo/2020/world_3/ex079.py | UTF-8 | 571 | 4 | 4 | [] | no_license | numbers: list = []
def want_continue() -> bool:
answer: str = ''
while answer not in ('y', 'n'):
answer = input('Do you want to continue? (y/n) ').lower()
return answer == 'y'
while True:
number: float = float(input('Enter a number: '))
if number not in numbers:
numbers.append(number)
print('Value added successfully!')
else:
print('Value duplicated, will not be added.')
if not want_continue():
break
numbers.sort()
print('\nAll the entered values are:', numbers)
| true |
78ffeef29c4071f92cbeed529c01fc33ccb97649 | Python | ConnorStarke/13DGT | /Connor's Comics.py | UTF-8 | 1,356 | 3.1875 | 3 | [] | no_license | #Imports
from tkinter import *
from tkinter import ttk
import random
#Class Code
#Functions and Setup
#GUI Code
root = Tk()
root.title("Comical Comics")
words = StringVar()
#Left Frame
left_frame = ttk.LabelFrame(root, text=" SELL ")
left_frame.grid(row=0, column=0, padx=10, pady=10, sticky="NSEW")
#Creating the Dropdown Menu
chosen_option = StringVar()
options = ['Super Dude', 'Lizard Man', 'Water Woman']
option_menu = ttk.OptionMenu(left_frame, chosen_option, options[0], *options)
option_menu.grid(row=1, column=0, padx=10, pady=10)
#Right Frame
right_frame = ttk.LabelFrame(root, text=" STOCK ")
right_frame.grid(row=0, column=1, padx=10, pady=10, sticky="NSEW")
#Setting the Message Text Variable
Stock_text = StringVar()
Stock_text.set("Da Comics")
#Packing the Message Label
message_label = ttk.Label(right_frame, textvariable=Stock_text, wraplength=250)
message_label.grid(row=4, column=0, columnspan=2, padx=10, pady=10)
#Bottom Frame
L_inside_frame = ttk.LabelFrame(root, text="TEXT")
L_inside_frame.grid(row=5, column=0, columnspan=2, padx=10, pady=10, sticky="SNEW")
#Settinging the Message Text Variable
bottom_text = StringVar()
bottom_text.set("Connor Starke 2021 CC")
#Pack the Message Label
message_label = ttk.Label(L_inside_frame, textvariable=bottom_text, wraplength=250)
message_label.grid(row=5, column=0, padx=10, pady=10)
| true |
8cfa4d81b5c3842d8510c65137449d77ec422d25 | Python | pergo88/pysynth-flask | /pysynth/lmx2594.py | UTF-8 | 2,593 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
"""
======================================================
lmx2594.py
======================================================
:Author: Bobby Smith
:email: bobby@epiqsolutions.com
:Description:
Driver for controlling the LMX2594
"""
import cp2130
import os
import time
from data_registers import data_registers
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
class Lmx2594(object):
"""
"""
def __init__(self, dev=None):
"""
"""
if not dev:
self.dev = cp2130.find()
time.sleep(0.1)
self.dev.channel1.clock_frequency = 1000000
else:
self.dev = dev
self.default_registers()
def default_registers(self):
""" load the default register set
"""
self.reg = data_registers.RegisterMap(THIS_DIR + "/reg_maps/lmx2594.xml")
def read_all_registers(self):
""" read every single register
"""
for addr in self.reg.addresses:
val = self.read_reg(addr)
self.reg[addr].value = val
print("0x{:0>4x} = 0x{:>04x}".format(addr, val))
# for field in self.reg.__dict__:
# self.__setattr__(field, field.value)
def write_all_registers(self):
""" write every register starting with the highest
address, writing addr 0x00 last
"""
lst = list(self.reg.addresses)
lst.sort()
lst.reverse()
for addr in lst:
self.write_reg(addr, self.reg[addr].value)
# write R0 one last time
self.write_reg(0x00, self.reg[0x00].value)
def write_reg(self, reg_addr, reg_data_int):
"""
write a single register
:Parameters:
reg_addr (int) - register address
reg_data_int (int) - 16 bit data to write
"""
r_w_n = 0
b = bytearray(0)
b.extend([reg_addr])
ms_byte = (reg_data_int & 0xFF00) >> 8
ls_byte = (reg_data_int & 0x00FF)
b.extend([ms_byte])
b.extend([ls_byte])
ret = self.dev.channel1.write(b)
return ret
def read_reg(self, reg_addr, num_bytes=3):
"""
"""
r_w_n = 1
b = bytearray(0)
b.extend([(r_w_n << 7) + reg_addr])
b.extend([0x00])
b.extend([0x00])
ret = self.dev.channel1.write_read(b)
ret_int = int((ret[1] << 8) + ret[2])
return ret_int
class Cp2130SpiDevice(object):
"""
"""
def __init__(self):
self.dev = cp2130.find()
self.spi = self.dev.channel1
| true |
323a72b928a9117fd45dfa43076836df299e79ce | Python | sommervold/fourier-rekker | /vector.py | UTF-8 | 739 | 3.3125 | 3 | [] | no_license | import cmath
class Vector:
def __init__(self, speed, constant):
self.speed = speed
self.id = -1
self.pos = complex(0,0)
self.endpoint = complex(0,0)
self.constant = constant
def calculate(self, t, pos):
# calculate the vector
self.pos = pos
self.endpoint = pos + self.constant * cmath.exp(self.speed * 2 * cmath.pi * complex(0,1) * t)
return self.endpoint
def draw(self, canvas):
# Draw the vector on the canvas
x0,y0 = self.pos.real, self.pos.imag
x1,y1 = self.endpoint.real, self.endpoint.imag
canvas.coords(self.id, x0,y0,x1,y1)
def create(self, canvas):
self.id = canvas.create_line(0,0,0,0, width=3)
| true |
b61e6480c14d510c5b82fbade1bb7d266ee24d08 | Python | wanyuks/ner_pytorch | /models.py | UTF-8 | 1,792 | 2.625 | 3 | [] | no_license | # coding: utf-8
import torch
from torch import nn
from torchcrf import CRF
# 定义使用字向量+词向量的lstm模型
class NER(nn.Module):
def __init__(self, hidden_size, char_emb, word_emb, tags_num, dropout_rate):
super(NER, self).__init__()
self.hidden_size = hidden_size
self.dropout_rate = dropout_rate
self.tag_num = tags_num
self.char_emd = nn.Embedding.from_pretrained(char_emb, freeze=False, padding_idx=0)
self.word_emd = nn.Embedding.from_pretrained(word_emb, freeze=False, padding_idx=0)
self.lstm = nn.LSTM((self.char_emd.embedding_dim + self.word_emd.embedding_dim), self.hidden_size,
batch_first=True, bidirectional=True)
self.dp = nn.Dropout(self.dropout_rate)
self.hidden2tag = nn.Linear(self.hidden_size * 2, self.tag_num)
self.crf = CRF(self.tag_num, batch_first=True)
def forward(self, char, word, mask=None):
# char = inputs[0]
# word = inputs[1]
if not mask:
mask = torch.ne(char, 0)
embedding = torch.cat((self.char_emd(char), self.word_emd(word)), dim=-1)
outputs, hidden = self.lstm(embedding)
outputs = self.dp(outputs)
# 得到发射矩阵
outputs = self.hidden2tag(outputs)
return torch.LongTensor(self.crf.decode(outputs, mask))
def log_likelihood(self, char, word, tags, mask=None):
# char = inputs[0]
# word = inputs[1]
if not mask:
mask = torch.ne(char, 0)
embedding = torch.cat((self.char_emd(char), self.word_emd(word)), dim=-1)
outputs, hidden = self.lstm(embedding)
outputs = self.hidden2tag(outputs)
outputs = self.dp(outputs)
return -self.crf(outputs, tags, mask)
| true |
97c31f220e174ae789c8efb81053e484f72aa463 | Python | SymmetricChaos/NumberTheory | /Rationals/FareySequence.py | UTF-8 | 432 | 3.1875 | 3 | [
"MIT"
] | permissive | from Rationals.RationalType import Rational
def farey_sequence(n):
"""Rationals less than one with denominator less than n"""
assert type(n) == int
assert n > 0
S = [Rational(0,1), Rational(1,n)]
while True:
A = S[-2]
B = S[-1]
k = (n + A.d) // B.d
p = k * B.n-A.n
q = k * B.d-A.d
if p > n:
break
S.append(Rational(p,q))
return S
| true |
745b6bf78b4d70794621565285dc4e44f7c270cd | Python | Gnanender-reddy/python_program | /AlgorithmPrograms/Bubblesort.py | UTF-8 | 732 | 3.6875 | 4 | [] | no_license | """
@Author : P.Gnanender Reddy
@Since : Dec'2019
@Description:This code is for bubbleSort.
"""
from com.bridgelabz.AlgorithmPrograms.util import bubblesort
try:
nums = [] #declaring list
n = int(input("Enter the list size : ")) #taking list from user
for i in range(0, n): #taking values in range of 0 to n
print("Enter number at location", i, ":") #printing number at location
item = int(input()) #user input
nums.append(item) # append() function is used for adding values to list
bubblesort(nums) #calling selection sort function
except ValueError:
print("enter valid data")
| true |
f6f29c170f4746049efd89a65c1d258068c05910 | Python | HeeZJee/saylaniPython | /assignment_2/answer_2.py | UTF-8 | 235 | 4.125 | 4 | [] | no_license | """Write a program which take input from user and identify that the given
number is even or odd?"""
num = int(input("Enter any number: "))
if num%2 == 0:
print("You entered even number.")
else:
print("You entered odd number.") | true |
3913ff492c0488bd136c6b2f5bcc126582b75296 | Python | neo20/pystudy | /scrolled_text/draw.py | UTF-8 | 564 | 2.921875 | 3 | [
"MIT"
] | permissive | import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label(u'你好 世界 你好 世界',
font_name = '宋体-简',
font_size = window.height*0.6,
x=window.width//2 , y=window.height//2+ 50,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
def update(dt):
label.x -= dt * 200
if (label.x) < -1000:
label.x = 0
print(dt)
pyglet.clock.schedule_interval(update, 0.02)
pyglet.app.run()
| true |
a009839c80c737876e176943ef5e982c23b69a0a | Python | ZordoC/How-to-Think-Like-a-ComputerScientist-Learning-with-Pytho3-and-Data-Structures | /Chapter3_experimentation.py | UTF-8 | 733 | 3.328125 | 3 | [] | no_license | import turtle
window = turtle.Screen()
window.bgcolor('lightgreen')
window.title('2nd GUI')
alex = turtle.Turtle()
alex.color("hotpink")
alex.speed(1)
for i in range(4):
alex.forward(50)
alex.left(90)
colors = ["yellow", "red", "purple", "blue"]
for color in colors:
alex.color(color)
alex.forward(50)
alex.left(90)
alex.shape("turtle")
alex.penup()
alex.forward(100)
alex.pendown()
alex.penup()
size=20
for _ in range(30):
alex.stamp()
size = size + 3
alex.forward(size)
alex.right(24)
alex.color('green')
window.mainloop()
n=1027371
while n != 1:
print(n,end=", ")
if n % 2 == 0:
n = n //2
else:
n = n * 3 + 1
print(n, end= ".\n")
| true |
9681b7475b01848ca322a50f00a0e0ec5a307b79 | Python | bobmayuze/RPI_Education_Material | /CSCI_1100/Week_3/Lab_2/Check_Point_3.py | UTF-8 | 228 | 3.328125 | 3 | [] | no_license | Four_Letter_word = input('Enter a four letter word:')
Number_Of_Asterik = len(Four_Letter_word)
print('***','*'*Number_Of_Asterik,'***', sep='')
print('**',Four_Letter_word ,'**')
print('***','*'*Number_Of_Asterik,'***', sep='') | true |
58a0cad0c9df2ef660b2191d4611a82b3bbc857a | Python | ivanmilevtues/merinjei_bot | /merinjeiweb/merinjei_classification/preprocess/PreprocessQuestions.py | UTF-8 | 2,381 | 2.609375 | 3 | [
"MIT"
] | permissive | import re
import nltk
import pickle
import numpy as np
from collections import Counter
from merinjei_classification.preprocess.PreprocessData import PreprocessData
from merinjei_classification.preprocess.decorators import not_none
class PreprocessQuestions(PreprocessData):
def __init__(self, sub_dirs: list, file_names: list,
main_dir='merinjei_classification/data'):
super().__init__(sub_dirs, file_names, main_dir )
self.labels = ['ABBR', 'DESC', 'PROCEDURE', 'HUM', 'LOC', 'NUM']
def init_features(self):
files = self.open_files(self.paths)
pattern = r'([^a-zA-Z0-9_\'])+'
features = set()
for file in files:
lines = file.readlines()
for line in lines:
tokens = re.split(pattern, line)[1:] # we take everything without the label
tokens = list(filter(None, tokens))
tags = [pos for _, pos in nltk.pos_tag(tokens)]
features.update(self._reduce_tokens(tokens))
features.update(tags)
self.close_files(files)
self.features = list(features)
return self.features
@not_none('features')
def init_dataset(self):
files = self.open_files(self.paths)
pattern = r'([^a-zA-Z0-9_\'])+'
dataset = []
for file in files:
lines = file.readlines()
for line in lines:
tokens = re.split(pattern, line)
label, tokens = tokens[0], tokens[1:]
label = self._pick_label(label)
if label is None:
continue
tokens = list(filter(None, tokens))
# print(tokens)
pos_tags = [pos for _, pos in nltk.pos_tag(tokens)]
tokens += pos_tags
tokens = Counter(word for word in tokens)
dataset.append(self._words_to_array(tokens, label))
self.close_files(files)
self.dataset = np.array(dataset)
return self.dataset
@not_none('labels')
def save_labels(self, file="data/processed_data/question_labels.pickle"):
with open('file', 'wb') as f:
pickle.dump(self.labels, f)
def _pick_label(self, label):
for indx in range(len(self.labels)):
if self.labels[indx] in label:
return indx
| true |
0103fdbb5d11b0c150d8acda2cc1b1373e7fa62d | Python | stefanosimao/ML-Project | /3/main_final.py | UTF-8 | 1,566 | 2.875 | 3 | [] | no_license | import numpy as np
import csv
from UtilityFunctions import ReadData_char
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import OrdinalEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
train_features, train_labels, test_features = ReadData_char()
ordinalEncoder = OrdinalEncoder(dtype= np.int64)
ordinalEncoder.fit(train_features)
train_enc = ordinalEncoder.transform(train_features)
test_enc = ordinalEncoder.transform(test_features)
one = OneHotEncoder()
one.fit(train_enc)
train_enc = one.transform(train_enc).toarray()
test_enc = one.transform(test_enc).toarray()
def neuralNet():
std_scaler = StandardScaler()
std_scaler.fit(train_enc)
X_train_trsf = std_scaler.transform(train_enc)
X_test_trsf = std_scaler.transform(test_enc)
mlpclass = MLPClassifier(hidden_layer_sizes=(1000), verbose=10, max_iter=1000, tol=0.000001, alpha=0.000001, batch_size='auto', random_state=10)
mlpclass.fit(X_train_trsf, train_labels)
y_pred = mlpclass.predict(X_test_trsf)
return y_pred
y_pred = neuralNet()
output_data = []
for i in range (len(y_pred)):
output_data.append([y_pred[i]])
#we return the data to the csv file
filename = "result_final_SGS.csv"
# writing to csv file
with open(filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the data rows
csvwriter.writerows(output_data)
| true |
62ef359ef8782b637bc2726b1a041d03e13448f2 | Python | PaulSabia/Promesse_de_don | /app.py | UTF-8 | 3,537 | 2.59375 | 3 | [] | no_license | from flask import Flask, render_template, request, url_for, flash, redirect
from pymongo import MongoClient
import pymongo
import datetime
#from connecteur import Connecteur
class Connecteur:
@classmethod
def connection(cls):
cls.client = MongoClient("mongodb+srv://user:user@promessededon.sw4vx.mongodb.net/database?retryWrites=true&w=majority")
cls.db = cls.client.PromesseDeDon
cls.col = cls.db.Dons
@classmethod
def deconnection(cls):
cls.client.close()
@classmethod
def insertion(cls, post):
# Voir pour également insérer la date et l'heure
cls.connection()
cls.col.insert_one(post)
cls.deconnection()
@classmethod
def get_db(cls):
cls.connection()
result = list(cls.col.find())
cls.deconnection
return result
@classmethod
def somme_donation(cls):
cls.connection()
somme = list(cls.col.aggregate([{'$group': {'_id':'null','montant':{'$sum': '$montant'}}}]))
somme = somme[0]
cls.deconnection()
return somme
@classmethod
def get_info(cls, prenom, nom):
cls.connection()
info = list(cls.col.find({'prenom': prenom, 'nom': nom}))
info = info[0]
cls.deconnection()
return info
@classmethod
def get_donation_user(cls, prenom, nom):
cls.connection()
dons = list(cls.col.aggregate([{'$match':{'$and':[{'prenom':prenom},{'nom':nom}]}},{'$group': {'_id':'null','montant':{'$sum': '$montant'}}}]))
cls.deconnection()
return dons[0]
# APPLICATION
app = Flask(__name__)
app.config['SECRET_KEY'] = 'madriz'
@app.route('/')
def index():
return render_template('index.html')
@app.route('/form', methods=('GET','POST'))
def form():
if request.method == 'POST':
prenom = request.form['prenom']
nom = request.form['nom']
mail = request.form['mail']
telephone = request.form['telephone']
montant = request.form['montant']
if not prenom or not mail or not telephone or not montant:
flash('Champs manquant requis !')
else:
date = datetime.datetime.now()
# date = f"{date.day}/{date.month}/{date.year} - {date.hour}:{date.minute}"
post = {'prenom': prenom, 'nom': nom, 'mail':mail, 'telephone':telephone, 'montant':int(montant), "date": date}
Connecteur.insertion(post)
return redirect(url_for('index'))
return render_template('form.html')
@app.route('/historique')
def historique():
donnateurs = Connecteur.get_db()
# donnateurs['date'] = f"{donnateurs['date'].day}/{donnateurs['date'].month}/{donnateurs['date'].year} - {donnateurs['date'].hour}:{donnateurs['date'].minute}"
somme = Connecteur.somme_donation()
return render_template('historique.html', donnateurs=donnateurs, somme=somme)
@app.route('/<prenom>/<nom>/admin', methods=('GET','POST'))
def admin(prenom, nom):
if request.method == 'POST':
user = request.form['user']
passwd = request.form['passwd']
if user=='admin' and passwd=='admin':
info = Connecteur.get_info(prenom, nom)
dons = Connecteur.get_donation_user(prenom, nom)
return render_template('info.html', info=info, dons=dons)
return render_template('admin.html')
if __name__ == '__main__':
app.run(debug=True)
| true |
64bf1af55768513cec528655e46992255f188f67 | Python | AndreisSirlene/Python-Exercises-Curso-em-Video-World-1-2-and-3 | /World1/Challenge010.py | UTF-8 | 184 | 3.453125 | 3 | [] | no_license | n=int(input('This is how much I have in my wallet now in Real (R$): '))
print('with this amount of R${} I can buy only ${:.2f}, as the exchange rate today is $1=3.27'.format(n,n/5.77)) | true |
b5a60cbdf924f5794fd97495f6555e121cec5374 | Python | kimberlynestor/MGH | /tnc_laminar/thick/vec_funcs.py | UTF-8 | 27,725 | 2.734375 | 3 | [] | no_license | """
Name: Kimberly Nestor
Date: 07/2021
Description: This module is to support plot.str_vec.py and
compute_strvec_thickcsv.py, where the goal is to plot straight line vectors.
Energy minimization is used to determine the optimal place for the vectors on
the pial and gwb surface. The function vec_coords is based on Bruce and Marty's
algorithm, found in the paper below. Edge detection is used to obtain all
coordinates along the labelled lamianr boundaries.
(Fischl and Sereno, 2018)
https://www.sciencedirect.com/science/article/pii/S1053811918300363
"""
import os
import sys
import csv
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2 #opencv-python
from sklearn.cluster import KMeans
import sklearn.metrics as sk
from sklearn.preprocessing import StandardScaler, LabelEncoder
import matplotlib.transforms as transforms
import matplotlib.image as mpimg
from PIL import Image
import warnings
# pd.set_option("display.max_rows", None, "display.max_columns", None)
warnings.filterwarnings("ignore")
def layer_coords(label_lst): #full path
"""This function takes as input a list of paths to the manual layer labels.
Uses edge detection to determine all coordinates along the line. kMeans
to cluster our unwanted points. Returns dict of coords for each layer."""
#if a fundus then do this block
gyrus_check = all(i.__contains__("fundus") for i in label_lst)
if gyrus_check:
for layer in label_lst:
#read data
df_layer = pd.read_csv(layer)
df_layer = df_layer.iloc[1:,0]
df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \
df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]
#compute slope
yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]
xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]
layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]
#split lam label into three
split = math.floor(len(df_layer['X'].values)/3)
df_layer_right = df_layer[0:split]
df_layer_left = df_layer[-split:]
df_layer_middle = df_layer[split:-split]
plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000'
# plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)
plt.axis('off')
plt.savefig('layer_contour.png')
# plt.show()
plt.close()
#read, convert to grayscale, find edges
layer_img = cv2.imread('layer_contour.png')
layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)
layer_edges = cv2.Canny(layer_img_grey, 30, 200)
#find contours
contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# cv2.imshow('contour', layer_edges)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#order contours
contours = [np.squeeze(i) for i in contours]
df_contours = pd.DataFrame(contours)
contours_ord = df_contours.loc[6].values, df_contours.loc[4].values, \
df_contours.loc[3].values, df_contours.loc[2].values, \
df_contours.loc[1].values, df_contours.loc[0].values, \
df_contours.loc[5].values
contours_ord = np.squeeze(contours_ord)
#plot all layers and add coordinate data to dict
lay_coords_dict = {}
for laycon, i in zip(contours_ord, list(range(len(contours)))):
#split coordinates into top and bottom edge
if i == 0: #0 == pial
c_idx = int(np.floor(len(laycon)/2))
coords_top = np.array(list(reversed(laycon[:c_idx])))
lay_coords_dict[i] = coords_top[10:]
# print(coords_top)
else:
c_idx = int(np.floor(len(laycon)/2))
coords_top = np.array(list(reversed(laycon[c_idx:])))
lay_coords_dict[i] = coords_top[5:-7]
#plot coords
# for key, val in lay_coords_dict.items():
# plt.plot([i[0] for i in val], [i[1] for i in val], lw=1.75)
# plt.gca().invert_yaxis()
# plt.show()
# plt.close()
# sys.exit()
#delete edge detect image and return dict
rm_img_cmd = "rm layer_contour.png"
os.system(rm_img_cmd)
return(lay_coords_dict)
#for crown data do this block
else:
for layer in label_lst:
#read data
df_layer = pd.read_csv(layer)
df_layer = df_layer.iloc[1:,0]
df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \
df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]
#compute slope
yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]
xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]
layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]
#split lam label into three
split = math.floor(len(df_layer['X'].values)/3)
df_layer_right = df_layer[0:split]
df_layer_left = df_layer[-split:]
df_layer_middle = df_layer[split:-split]
plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000', lw=5
# plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)
plt.axis('off')
plt.savefig('layer_contour.png')
# plt.show()
plt.close()
#read, convert to grayscale, find edges
layer_img = cv2.imread('layer_contour.png')
layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)
layer_edges = cv2.Canny(layer_img_grey, 30, 200)
#find contours
contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# cv2.imshow('contour', layer_edges)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#plot all layers and add coordinate data to dict
lay_coords_dict = {}
for laycon, i in zip(contours, list(range( len(contours) ) )[::-1] ):#7
#split coordinates into top and bottom edge
# print(laycon)
coords_lst = [list(ii) for i in laycon for ii in i] # 0 == GWB
# print(coords_lst)
c_split = math.floor(len(coords_lst)/4)
coords_top = coords_lst[:c_split][::-1] + coords_lst[-c_split:][::-1]
lay_coords_dict[i] = coords_top
df_coords = pd.DataFrame(coords_top, columns=['X', 'Y'])
# print(df_coords)
#plot using all coordinates
plt.plot(df_coords['X'].values, df_coords['Y'].values, lw=3)
plt.gca().invert_yaxis()
# plt.show()
plt.close()
# use k means to get rid of extra coords on short lines
for i in list(range(1,6)):
# kMEANS clustering, separate short line bottom half
df_short = pd.DataFrame(lay_coords_dict[i], columns=['X', 'Y']) #1=L1,
# plt.scatter( df_short['X'].values, df_short['Y'].values, s=5 )
# plt.gca().invert_yaxis()
# plt.show()
#scale data
scaler = StandardScaler()
scaler.fit( df_short[['X', 'Y']].values )
short_scale = scaler.transform( df_short[['X', 'Y']].values )
init = np.array([[0.514, -0.629], [-1.101, 1.344]])
#predict
# kmeans_classifier = KMeans(n_clusters=2, init=init) #fixed centroids
kmeans_classifier = KMeans(n_clusters=2)
y_kmeans = kmeans_classifier.fit_predict(short_scale)
centroids = kmeans_classifier.cluster_centers_
inertia = kmeans_classifier.inertia_
#update df
df_short.insert(2, column='kClass', value=y_kmeans)
#df scaled
df_short_scale = pd.DataFrame(short_scale, columns=['X', 'Y'])
df_short_scale.insert(2, column='kClass', value=y_kmeans)
"""
#plot data points for k means, clusters
colmap = {0: '#029386', 1: '#D2691E', 2: '#A52A2A'}
for i in range(2):
new_df = df_short_scale[df_short_scale['kClass']==i]
plt.scatter(new_df['X'].values, new_df['Y'].values, s=20, \
label='cluster' + str(i+1), color=colmap[i])
#plot centroids
for i in range (2):
plt.scatter(centroids[i][0], centroids[i][1], marker='x', s=500, \
label='centroid' + str(i+1), color=colmap[i])
plt.legend()
plt.gca().invert_yaxis()
plt.show()
"""
#new df for clean data, take centroid with more data points
num_class0 = len(df_short[df_short['kClass']==0])
num_class1 = len(df_short[df_short['kClass']==1])
if num_class0 > num_class1:
df_short_clean = df_short[df_short['kClass']==0]
lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\
df_short_clean['Y'].values)]
else:
df_short_clean = df_short[df_short['kClass']==1]
lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\
df_short_clean['Y'].values)]
#plot clean short line
# plt.scatter(df_short_clean['X'].values, df_short_clean['Y'].values, s=20)
# plt.gca().invert_yaxis()
# plt.show()
#delete edge detect image and return dict
rm_img_cmd = "rm layer_contour.png"
os.system(rm_img_cmd)
return(lay_coords_dict)
def vec_coords(label_coords, LAMBDA=1, spacing=1):
""" This function takes as input a dictionary of coordinates for each layer \
(output from label_coords). The function uses the pial and gwb coordinates \
to plot straight vectors using energy minimization to determine the opimal \
point on the gwb to plot a vector from point x on the pial surface. This is \
a 2D version of the algorithm Bruce and Marty published.
LAMBDA: determines how clumped together the vectors are
spacing: determines how far apart vectors are """
#LAMBDA = 1
#SPACING = 8
SPACING = spacing
coords_pial = np.array(label_coords[0])
coords_gwb = np.array(label_coords[6]) #[::SPACING]
##### Normal Vector Pial
#derivatives and velocity
x_der = np.gradient(coords_pial[:,0])
y_der = np.gradient(coords_pial[:,1]) #col slicing, R, np.array, [:,0]
velo = np.array([[x_der[i], y_der[i]] for i in range(x_der.size)])
#displacement, tangent
displ = np.sqrt( x_der * x_der + y_der * y_der ) #speed, time
tang = np.array([1/displ] *2 ).transpose() * velo
#outward point surface normal, from tang flip, make first neg, opv
pial_normal = [ [y*-1, x] for x, y in zip(tang[:,0], tang[:,1]) ]
##### Normal Vector GWB
#derivatives and velocity
x_der = np.gradient(coords_gwb[:,0])
y_der = np.gradient(coords_gwb[:,1])
velo = np.array([[x_der[i], y_der[i]] for i in range(x_der.size)])
#displacement, tangent
displ = np.sqrt( x_der * x_der + y_der * y_der )
tang = np.array([1/displ] *2 ).transpose() * velo
#outward point surface normal, owv
gwb_normal = [ [y*-1, x] for x, y in zip(tang[:,0], tang[:,1]) ]
plot_coords_lst = []
used_energy_lst = []
##### FIND ENERGY
# for each coord on the pial surface, x
for x in range(len(coords_pial)):
pial = coords_pial[x]
#find vector pial to gwb, unit length, tv
if x == 0:
min_energy = []
normal_term_lst = []
vec_dist_lst = []
parallel_term_lst = []
vec_dist_lst = []
for v in range(len(coords_gwb)):
#find vector distance from pial to gwb
gwb = coords_gwb[v]
vec_pial_gwb = np.array(gwb) - np.array(pial)
vec_mag = np.array(vec_pial_gwb[0]**2 + vec_pial_gwb[1]**2)
unit_vec_dist = vec_pial_gwb/vec_mag
vec_dist_lst.append(unit_vec_dist)
#find dot product for tv and owhite, tv and opial
dot_prod1 = np.dot(vec_dist_lst[v], gwb_normal[v])
dot_prod2 = np.dot(vec_dist_lst[v], pial_normal[x])
#normal term for each v
normal_term_v = (1 - np.abs(dot_prod1)) + (1 - np.abs(dot_prod2))
normal_term_lst.append(normal_term_v)
#parallel term for each v
# if x == 0:
#find dot product, using self distance
dot_prod3 = np.dot(vec_dist_lst[v], vec_dist_lst[v])
parallel_term_v = (1 - np.abs(dot_prod3))
parallel_term_lst.append(parallel_term_v)
#energy, no summation
ind_energy = list(enumerate(np.array([((1-LAMBDA)*n) + (LAMBDA*p) for n, p in \
zip(normal_term_lst, parallel_term_lst)]).T))
#find local minima energy
for i in range(len(ind_energy)):
curr = ind_energy[i]
fut = ind_energy[i+1]
if fut[1] > curr[1]:
min_energy.append(curr)
used_energy_lst.append(curr)
break
# append coordinates to plot straight vector from pial to gwb, min energy
gwb_idx = min_energy.pop()[0]
# gwb_idx = min_energy[-1][0]
plot_coords_lst.append([pial, list(coords_gwb[gwb_idx])])
elif x > 0:
min_energy = []
normal_term_lst = []
vec_dist_lst = []
parallel_term_lst = []
vec_dist_lst = []
# used_start = int(used_energy_lst[-1][0])+20
used_start = used_energy_lst[-1][0]
for v in list( range(used_start, len(coords_gwb)-1) ):
#find vector distance from pial to gwb
gwb = coords_gwb[v]
vec_pial_gwb = np.array(gwb) - np.array(pial)
vec_mag = np.array(vec_pial_gwb[0]**2 + vec_pial_gwb[1]**2)
unit_vec_dist = vec_pial_gwb/vec_mag
vec_dist_lst.append(unit_vec_dist)
#find dot product for tv and owhite, tv and opial
dot_prod1 = np.dot(vec_dist_lst[-1], gwb_normal[v])
dot_prod2 = np.dot(vec_dist_lst[-1], pial_normal[x])
#normal term for each v
normal_term_v = (1 - np.abs(dot_prod1)) + (1 - np.abs(dot_prod2))
normal_term_lst.append(normal_term_v)
#parallel term for each v
#find dot product, using neighbour vector distance
knear_vec_dist = np.array(plot_coords_lst[-1][1]) - np.array(plot_coords_lst[-1][0])
dot_prod3 = np.dot(vec_dist_lst[-1], knear_vec_dist)
parallel_term_v = (1 - np.abs(dot_prod3))
parallel_term_lst.append(parallel_term_v)
#energy, no summation
ind_energy = list( enumerate(np.array([ ((1-LAMBDA)*n) + (LAMBDA*p) for n, p in \
zip(normal_term_lst, parallel_term_lst)]).T, used_energy_lst[-1][0])) #v
#find local minima energy, and associated coordinate
for i in range(len(ind_energy)):
try:
curr = ind_energy[i]
fut = ind_energy[i+1]
except(IndexError):
continue
if fut[1] > curr[1]:
min_energy.append(curr)
used_energy_lst.append(curr)
# print("curr energy = ", curr)
break
try:
gwb_idx = min_energy.pop()[0] #+ 20 #atleast deltaX apart
plot_coords_lst.append([pial, list(coords_gwb[gwb_idx])])
# print("energy coordinates = ", list( map(list, [pial, coords_gwb[gwb_idx]])) )
except(IndexError):
continue
"""
#encourage atleast one space between each end point coordinate
energy_idx = [i[0] for i in used_energy_lst]
new_energy_idx = []
energy_idx_cp = energy_idx.copy()
count = 0
same_count = 0
# loop to remove repeat indices, makes list two short
while count < len(energy_idx):
energy_concat = []
i = count
curr = energy_idx_cp[i]
if energy_idx_cp[i] not in new_energy_idx:
new_energy_idx.append(curr)
same_count = 0
else:
energy_idx_cp = energy_idx_cp[:i] + list((np.array(energy_idx_cp[i:]) \
+ same_count))
same_count+=1
count+=1
"""
#encourage even space between each end point coordinate
energy_idx = [i[0] for i in used_energy_lst]
new_energy_idx = list(map(math.floor , np.linspace(energy_idx[0] , \
len(coords_gwb[energy_idx[0]: len(coords_gwb)]), num=len(energy_idx))))
# new_plot_coords_lst = [[list(i[0]), list(coords_gwb[j])] for i, j in \
# zip(plot_coords_lst, new_energy_idx)]
new_plot_coords_lst = []
for i, j in zip(plot_coords_lst, new_energy_idx):
try:
pial_gwb_plot = [list(i[0]), list(coords_gwb[j])]
new_plot_coords_lst.append(pial_gwb_plot)
except(IndexError):
continue
#space vectors according to SPACING var
new_plot_coords_lst = new_plot_coords_lst[::SPACING]
return(new_plot_coords_lst)
def vec_thick(layer_coords, vector_coords, tissue_img):
""" This function takes as input coordinates for the straight vectors
and a path to the original tissue image from the microscope the layers
boundaries are based on. This image is used to determine the distance of
the vectors and returns a list of distances in mm corresponding to the
original input vector coordinates."""
# print('vector_coords = ', vector_coords)
vec_euc_lst = []
#plot layers
for layer in layer_coords:
plt.plot([i[0] for i in layer_coords[layer]], [i[1] for i in layer_coords[layer]], \
lw=2.5, c='#964000') #7F50CD
#plot ind straight vectors
for coords in vector_coords:
try:
fig = plt.plot([i[0] for i in coords], [i[1] for i in coords], c="#767676", \
alpha=1, lw=1.2)#767676
except(IndexError):
continue
euc = np.linalg.norm(np.array(coords[0]) - np.array(coords[1]))
vec_euc_lst.append(euc)
# print('vec_euc_lst = ', vec_euc_lst)
#find bbox euc distance
vec_xlims = plt.gca().get_xlim()
vecx_euc_dist = np.linalg.norm( np.array(vec_xlims)[0] - np.array(vec_xlims)[1] )
plt.gca().invert_yaxis()
# plt.show()
plt.close()
#plot tissue img
tiss_img = plt.imshow(mpimg.imread(tissue_img))
plt.gca().invert_yaxis()
# plt.show()
plt.close()
#get tiss_img dimensions, find distance in mm, 4x microscope 1px = 1.85um
img_px_dim = Image.open(tissue_img).size
img_mm_dim = (img_px_dim[0] * 1.85) / 1000
"""
#size of axes in pixels *dpi, 4.96 3.696
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
print("bbox in inches = ", bbox.width, bbox.height)
#grayscale
img = Image.open(tissue_img).convert("L")
arr = np.asarray(img)
plt.imshow(arr, cmap='gray', vmin=0, vmax=255)
plt.gca().invert_yaxis()
plt.show()
"""
#euc to mm conversion
euc_mm_unit = img_mm_dim / vecx_euc_dist
vec_mm_dist_lst = [euc * euc_mm_unit for euc in vec_euc_lst]
return(vec_mm_dist_lst)
def roi_vecs_v1(vec_coords, vec_thick, region):
"""This function takes as input the coordinates for straight vectors
and the associated thickmess values from the pial surface to the gwb.
The function then returns the coordinates of the vectors only in our
small ROI, where the crown and fundus are closest to a slope of 0."""
if region =='fundus':
#find gradients for thickness vectors
vec_thick_gradients = list(np.gradient(vec_thick))
min_val, min_idx = min(vec_thick_gradients), vec_thick_gradients.\
index(min(vec_thick_gradients))
vec_thick_grad_enum = list(enumerate(np.gradient(vec_thick)))
"""
#plot gradient descent curve
for i in vec_thick_grad_enum:
print(i)
plt.plot(vec_thick_gradients, marker='o', ms=2.5)
plt.show()
"""
#use gradient descent to find start and stop vectors
for i in reversed(vec_thick_grad_enum[:min_idx]):
if i[1] < min_val + 0.01:
vec_start = i
elif i[1] > min_val + 0.01:
break
for i in vec_thick_grad_enum[min_idx:]:
if i[1] < min_val + 0.08:
vec_stop = i
elif i[1] > min_val + 0.08:
break
#vector coordinates and thickness values in roi
roi_vec_coords = vec_coords[vec_start[0]:vec_stop[0]+1]
roi_vec_thick = vec_thick[vec_start[0]:vec_stop[0]+1]
return roi_vec_coords, roi_vec_thick
elif region =='crown':
#find gradients for thickness vectors
vec_thick_gradients = list(np.gradient(vec_thick))
min_val, min_idx = min(vec_thick_gradients), vec_thick_gradients.\
index(min(vec_thick_gradients))
vec_thick_grad_enum = list(enumerate(np.gradient(vec_thick)))
"""
#plot gradient descent curve
for i in vec_thick_grad_enum:
print(i)
plt.plot(vec_thick_gradients, marker='o', ms=2.5)
plt.show()
"""
#threshold out lines outside of roi
roi_grad_lst = np.unique([round(i,2) for i in np.linspace(0.05, 0.3, 60)])
roi_thres_lst = []
for i in vec_thick_grad_enum:
if round(i[1], 2) in roi_grad_lst:
roi_thres_lst.append(i)
#vector coordinates and thickness values in roi
vec_start = roi_thres_lst[0]
vec_stop = roi_thres_lst[-1]
roi_vec_coords = vec_coords[vec_start[0]:vec_stop[0]+1]
roi_vec_thick = vec_thick[vec_start[0]:vec_stop[0]+1]
return roi_vec_coords, roi_vec_thick
def roi_vecs(layer_coords, vec_coords, region):
"""This function takes as input the coordinates for layer boundaries and
straight vectors from the pial surface to gwb. The function then returns
the coordinates of the vectors only in our small ROI."""
if region == 'crown':
#find threshold for vectors inside roi
start_x_lst = []
stop_x_lst = []
for i in range(1,5):
start_x_lst.append(layer_coords[i][0][0])
stop_x_lst.append(layer_coords[i][-1][0])
start_x = max(start_x_lst)
stop_x = min(stop_x_lst)
roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(start_x, stop_x+5))]
return roi_vec_coords
elif region == 'fundus':
#find threshold for vectors inside roi
start_x_lst = []
stop_x_lst = []
for i in range(1,5):
start_x_lst.append(layer_coords[i][0][0])
stop_x_lst.append(layer_coords[i][-1][0])
start_x = max(start_x_lst)
stop_x = min(stop_x_lst)
# roi_vec_coords = [i for i in vec_coords if i[1][0] in list(range(start_x-10, stop_x+3))]
roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(stop_x, start_x))]
# print(roi_vec_coords)
return roi_vec_coords
def ind_thick(layer_coords, roi_vec_coords, tissue_image):
"""This function takes as input a list of coordinates for each individual
layer(roi_vec_coords) and returns a list of individual thickness values
for each layer."""
#find thickness for each ind layer
vecall_lall_lst = []
for vec in roi_vec_coords:
#coordinates for intersection of vector and layer
x_in_pts = list(map(math.floor, np.linspace(vec[0][0], vec[1][0], 70)))[1:-1] #or70
# print(x_in_pts)
l1_vec = vec[0], list(np.squeeze([i for i in layer_coords[1] if i[0] == x_in_pts[0]]))
l2_vec = l1_vec[1], list(np.squeeze([i for i in layer_coords[2] if i[0] == x_in_pts[1]]))
l3_vec = l2_vec[1], list(np.squeeze([i for i in layer_coords[3] if i[0] == x_in_pts[2]]))
l4_vec = l3_vec[1], list(np.squeeze([i for i in layer_coords[4] if i[0] == x_in_pts[3]]))
l5_vec = l4_vec[1], list(np.squeeze([i for i in layer_coords[5] if i[0] == x_in_pts[4]]))
l6_vec = l5_vec[1], vec[1]
#thickness values for each layer
lall_vec = l1_vec, l2_vec, l3_vec, l4_vec, l5_vec, l6_vec
lall_thick = vec_thick(layer_coords, lall_vec, tissue_image)
# print(l5_vec, "\n", l6_vec)
print(lall_thick)
vecall_lall_lst.append(lall_thick)
return vecall_lall_lst
def save_thick_csv(path, label_lst, org_tiss_img, save_name):
"""This function takes as input the names of the laminar boundary labels and
saves an output csv file with thickness values."""
#get label full path info
org_tiss_img = path + org_tiss_img
org_label_lst = [i for i in label_lst if 'LI_long' in i], \
[i for i in label_lst if 'LII.' in i], [i for i in label_lst if 'LIII.' in i], \
[i for i in label_lst if 'LIV.' in i], [i for i in label_lst if 'LV.' in i], \
[i for i in label_lst if 'LVI.' in i], [i for i in label_lst if 'GWB_long' in i]
org_label_lst = [ii for i in org_label_lst for ii in i]
full_path_lst = [path+i for i in org_label_lst]
#get coordinates and vectors for laminar boundaries
lay_coords = layer_coords(full_path_lst)
vector_coords = np.array(vec_coords(lay_coords))
fun_check = [True for i in label_lst if 'fundus' in i]
# samp_vec_lst = []
if all(fun_check):
samp_vecs = np.array(roi_vecs(lay_coords, vector_coords, 'fundus'))
# samp_vec_lst = samp_vecs
# print(samp_vecs)
else:
samp_vecs = np.array(roi_vecs(lay_coords, vector_coords, 'crown'))
#get ind layer thickness
header = ['Layer1', 'Layer2', 'Layer3', 'Layer4', 'Layer5', 'Layer6', 'Tot_thick']
layer_thickness = ind_thick(lay_coords, samp_vecs, org_tiss_img)
full_line = layer_thickness
# print(layer_thickness)
# layer_thickness.append(str(sum(layer_thickness)))
#create file and save thickness data
with open(os.path.join(path, save_name+'.csv'), mode='w') as thick_csv:
thick_csv_writer = csv.writer(thick_csv)
thick_csv_writer.writerow(header)
for thickness in layer_thickness:
tot_thick = sum(thickness)
full_line = thickness + [tot_thick]
thick_csv_writer.writerow(full_line)
| true |
280b248069015efdc0a307f5b7ef53271e5bf93d | Python | Flosac/crawler | /lib/SocketServer.lib.py | UTF-8 | 1,989 | 2.703125 | 3 | [] | no_license | import json
from multiprocessing import Process
import os
import socket
from threading import Thread
from time import sleep, time
import sys
__author__ = 'florian'
QUEUE = []
RUNNING = True
def writing():
global QUEUE, RUNNING
while RUNNING:
print 'Queue has items: ' + str(len(QUEUE))
if len(QUEUE) > 0:
while len(QUEUE)>0:
item = QUEUE.pop(0).strip()
if item == "":
continue
try:
msg = json.loads(item)
f = open(msg['file'], 'a+')
for line in msg['data']:
f.write(line.encode('utf8').strip() + "\n")
f.close()
del f
except Exception as e:
print e, item
else:
sleep(5)
def talking(client, address):
global QUEUE
line = ''
#client.send('Hello Ure address is {0}\n'.format(address))
lastmsg = time()
while line != ':quit':
if lastmsg - time() > 1800: break
data = client.recv(512*1024)
data = str(data).strip()
for line in data.split('\n'):
if line == "": continue
lastmsg = time()
if line == ':quit':
break
elif line == ':flush':
client.send(json.dumps(QUEUE))
else:
QUEUE.append(line)
sleep(1)
client.close()
return True
scriptpath = os.path.dirname(os.path.realpath(__file__))
socketPath = scriptpath +"/../socket007"
s = socket.socket(socket.AF_UNIX)
s.bind(socketPath)
try:
wt = Thread(target=writing)
wt.start()
while True:
s.listen(5)
client, address = s.accept()
t = Thread(target=talking, args=(client, address))
t.start()
except (KeyboardInterrupt, SystemExit):
RUNNING = False
s.close()
except Exception as e:
RUNNING = False
s.close()
print e
exit()
| true |
55b200775def43675fd0396d653861e30daaed17 | Python | phargadeakshay/python-all | /akshay6 class.py | UTF-8 | 408 | 3.046875 | 3 | [] | no_license | class Employee:
no_of_leaves = 24
travelling_passes = 30
pass
sagar = Employee()
kishor = Employee()
kishor.name = 'kishor'
kishor.salary=15000
kishor.roll = 'maintenace'
sagar.name = 'sagar'
sagar.salary = 15500
sagar.roll = 'jr.engineer'
Employee.no_of_leaves = 10 # you can change vales in calss this way
print(kishor.salary)
print(Employee.no_of_leaves)
| true |
0f2537cb85c650f7b390caea85cf15f307b73fcc | Python | miketahani/SD-lobbyists | /old.py | UTF-8 | 1,349 | 2.765625 | 3 | [] | no_license | # grab south dakota lobbyist info - the ugly/hacky version
# mike tahani m.tahani at gmail
import urllib, re, os, csv
def getdocs(doc_dir):
""" get the raw documents from the gov site """
base = 'http://apps.sd.gov/applications/ST12ODRS/LobbyistViewlist.asp?start='
end = 8120
i = 1 # iteration
while i < end:
req = str(i)
urllib.urlretrieve(base + req, filename=doc_dir+req+'.html')
print "got", base + req
i += 20
return
def getfields(writer, data):
""" get all fields from a document's raw data (& write to csv) """
chunks = re.compile('\<tr bgcolor=\"#FFFFFF\"\>(.+?)\</tr\>', re.DOTALL)
res = chunks.findall(data)
for chunk in res:
fields = chunk.split('<td>')
row = filter(None, [ re.sub('\<.+?\>|\ ','',field).strip() for field in fields ])
writer.writerow(row)
return
def main():
doc_dir = 'docs/'
#get_docs(doc_dir) # run once
hdrs = ['year','name','address','address2','employer','employer_address',
'employer_address2']
writer = csv.writer(open('lobbyists_old.csv', 'wb'), dialect='excel')
writer.writerow(hdrs)
for f in os.listdir(doc_dir):
data = open(doc_dir + f).read()
getfields(writer, data)
return
if __name__ == '__main__':
main() | true |
3b81b6624373c78604c4f7fa25c71e96d295a62b | Python | brennagibbons/Side-Projects | /metallic glasses/exec/testPredictions.py | UTF-8 | 16,273 | 2.890625 | 3 | [] | no_license | # Script to classify the low-throughput data with a random forest
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import learning_curve
from sklearn import preprocessing
import sys
import itertools
from sklearn import metrics
import random
###### Tuning parameters ######
train_size = .8
###### Function definitiions ######
# Function to separate ternaries by name
def ternaryName(row,elements):
composition = row[elements].nonzero()[0]
return '-'.join(sorted(elements[composition].values))
def write_for_plotting(mydata,pred,filename,elems):
mydata['formula'] = ''
for i,x in mydata.iterrows():
tn = x['ternary_name']
# elems = tn.split('-')
# elems = ['Al','Ni','Zr']
f = ''
for e in elems:
f = f+e
f = f+str(x[e])
# print f
mydata.at[i,'formula'] = f
with open(filename,'w') as writefile:
mydata.reset_index()
for i in np.arange(mydata.shape[0]):
# for i,x in mydata.iterrows():
x = mydata.iloc[[i]]
p = pred[i][1]
# f = mydata.at[i,'formula']
f = x['formula'].values[0]
elems = ["".join(x) for _, x in itertools.groupby(f, key=str.isalpha)]
elemline = ' '.join(elems)
writefile.write('{} {:.4f}\n'.format(elemline,p))
def load_prep_data(filename,allfeatures):
###### Load data to predict from ../data/comp_and_features.csv ######
mydata = pd.read_csv(filename,header=0)
del mydata['gfa_measured'] #remove feature that is equivalent to class
myfeatures = mydata.columns[:-2]
myend_of_elem = np.where(myfeatures.values=='NComp')[0][0]
# Extract list of elements (elements)
myelements = myfeatures[0:myend_of_elem]
mydata['is_train'] = 0
mydata['ternary_name'] = mydata.apply(lambda row: ternaryName(row,myelements),axis=1)
# populate elements that this data didn't have
allmyFeatures = list(mydata)
missing_features = [x for x in allfeatures if x not in allmyFeatures]
for f in missing_features:
mydata[f] = 0
mydata[not_elements] = preprocessing.scale(mydata[not_elements])
return mydata
def real_class_to_proba(mydata,elemlist):
# return the real class values in the same form as clf.predict_proba
# also can select specific ternaries and sub-ternaries
data_sel = mydata[mydata.ternary_name.isin(elemlist)]
data_val = data_sel.as_matrix(columns=['Class'])
data_val = list(data_val.flatten())
data_list = np.array(zip(data_val,[-(x-1) for x in data_val]))
return data_list
###### Load low-throughput data from ../data/comp_and_features.csv ######
data = pd.read_csv('../data/comp_and_features.csv',header=0)
del data['gfa_measured'] #remove feature that is equivalent to class
# Extract list of feature names (features)
features = data.columns[:-2]
end_of_elem = np.where(features.values=='NComp')[0][0]
# Extract list of elements (elements)
elements = features[0:end_of_elem]
not_elements = features[end_of_elem:]
# Normalize data to have zero-mean and unit variance
data[not_elements] = preprocessing.scale(data[not_elements])
# Extract list of ternaries (ternaries)
data['ternary_name'] = data.apply(lambda row: ternaryName(row,elements),axis=1)
ternaries = np.unique(data['ternary_name'].values,return_counts=True)
#min_error = 1
#max_error = 0
#for j in range(100):
# Split data into training and test sets, based on train_size parameter
tern_shuf = list(zip(ternaries[0],ternaries[1]))
# plot ternary histogram
tern_n = [x[1] for x in tern_shuf]
bins = [0,4,10,25,50,100,205]
plt.figure()
plt.hist(tern_n,bins,edgecolor='black',facecolor = 'blue')
plt.title('Sparse Data Distribution')
plt.xlabel('Number of data points per ternary')
plt.ylabel('Number of ternaries')
plt.savefig('ternary_histogram.pdf')
plt.show()
print("Median ternary size is: {}\n".format(np.median(tern_n)))
np.random.shuffle(tern_shuf)
# Move ternaries of interest to test set
temp = [tern_shuf.index(item) for item in tern_shuf if
(item[0] == 'Al-Ni-Zr' or item[0] == 'Co-V-Zr' or
item[0] == 'Co-Fe-Zr' or item[0] == 'Fe-Nb-Ti' or
item[0] == 'Al-Ni' or item[0] == 'Al-Zr' or item[0] == 'Ni-Zr' or
item[0] == 'Co-V' or item[0] == 'Co-Fe' or item[0] == 'Co-Zr' or
item[0] == 'V-Zr' or item[0] == 'Fe-Zr')]
for i in range(len(temp)):
tern_shuf.append(tern_shuf.pop(temp[i]-i))
# Split training and test sets
ternaries = [list(t) for t in zip(*tern_shuf)]
tern_train = np.extract(np.cumsum(ternaries[1])/sum(ternaries[1])<=train_size,
ternaries)
data['is_train'] = data['ternary_name'].isin(tern_train)
# To use min/max error test and train sets, uncomment following lines
# min_train = np.loadtxt('min_error_train.txt',dtype='str')
# data['is_train'] = data['ternary_name'].isin(min_train)
# To use randomly chosen data, rather than data separated by ternaries,
# uncomment this line
#data['is_train'] = np.random.uniform(0,1,len(data))<=train_size
train, test = data[data['is_train']==True], data[data['is_train']==False]
print('Number of observations in the training data:',len(train))
print('Number of observations in the test data:',len(test))
# Build classification vector (y)
y = train['Class'].astype(int)
###### Train Random Forest classifier ######
sys.stdout.write('Building Random Forest classifier......... ')
# Create classifier
clf = RandomForestClassifier(n_estimators=500,n_jobs=-1) #n_jobs: -1 runs on all avail cores
# Train classifier on training data
clf.fit(train[features],y)
print('Done.')
# Determine feature importance
imp_feat = sorted(zip(clf.feature_importances_,train[features]))
imp_feat = [list(t) for t in zip(*imp_feat)]
imp_feat = imp_feat[1][-30:-1]
###### Test classifier on test data ######
# Predict classifications of test data
test_pred = clf.predict(test[features])
# Create vector with validation values of test data
test_val = test['Class'].astype(int).values
# Output the number of incorrect classifications
print('Classifier generated ',np.sum(test_pred != test_val),
' misclassifications out of ',len(test_val),' resulting in ',
np.sum(test_pred != test_val)/len(test_val),' classification error.')
#if (np.sum(test_pred != test_val)/len(test_val) < min_error):
# train['ternary_name'].to_csv('min_error_train.txt',sep=',',index=False)
# test['ternary_name'].to_csv('min_error_test.txt',sep=',',index=False)
# min_error = np.sum(test_pred != test_val)/len(test_val)
#elif (np.sum(test_pred != test_val)/len(test_val) > max_error):
# train['ternary_name'].to_csv('max_error_train.txt',sep=',',index=False)
# test['ternary_name'].to_csv('max_error_test.txt',sep=',',index=False)
# max_error = np.sum(test_pred != test_val)/len(test_val)
###### Output Al-Ni-Zr predictions ######
# print(test[test['ternary_name'].isin(['Al-Ni-Zr','Al-Ni','Al-Zr','Ni-Zr'])])
###### Plot learning curve ######
# features = imp_feat
# train_sizes = []
# train_scores = []
# test_scores = []
# for subset in [.01,.05,.1,.2,.3,.4,.5,.6,.7,.8,.9,1]:
# clf = RandomForestClassifier(n_estimators=500,n_jobs=-1,oob_score=True)
# train_subset = train[np.random.uniform(0,1,len(train))<=subset]
# clf.fit(train_subset[features],train_subset['Class'].astype(int))
# train_sizes.append(len(train_subset))
# train_scores.append(clf.oob_score_)
# test_pred = clf.predict(test[features])
# test_scores.append(clf.score(test[features],test_val))
# del train_subset
# del clf
# plt.plot(train_sizes,train_scores,'b-')
# plt.plot(train_sizes,test_scores,'r-')
# plt.xlabel('# of training samples')
# plt.ylabel('OOB score')
# plt.legend(['Training set','Test set'])
# plt.show()
allfeatures = list(data)
####### add hitp dev data and retrain
hitp_data = load_prep_data('../data/hitp_glass_data_featurized.csv',allfeatures)
hitp_train_data = hitp_data[~hitp_data['ternary_name'].isin(['Co-Fe-Zr','Co-V-Zr','Fe-Nb-Ti'])]
# select every nth row from the high throughput training data
# n = 2
hitp_train_data = hitp_train_data.reset_index()
# hitp_train_data = hitp_train_data.iloc[0::n,:]
# add the (maybe downsampled) hitp data to the whole LB train data set
all_train_data = pd.concat([train, hitp_train_data])
print('Number of observations in the training data:',len(all_train_data))
print('Number of observations in the test data:',len(test))
# Build classification vector (y)
yall = all_train_data['Class'].astype(int)
sys.stdout.write('\nBuilding Random Forest classifier......... \n')
# Create classifier
clf = RandomForestClassifier(n_estimators=500,n_jobs=-1) #n_jobs: -1 runs on all avail cores
# Train classifier on training data
clf.fit(all_train_data[features],yall)
print('Done.')
test_pred = clf.predict(test[features])
test_proba = clf.predict_proba(test[features])
# Create vector with validation values of test data
test_val = test['Class'].astype(int).values
# Output the number of incorrect classifications
print('Classifier generated ',np.sum(test_pred != test_val),
' misclassifications out of ',len(test_val),' resulting in ',
np.sum(test_pred != test_val)/len(test_val),' classification error.')
logloss = metrics.log_loss(test_val,test_proba)
# ###########################
# #### Al Ni Zr ternary #####
tri_alnizr = load_prep_data('../data/triangles_alnizr_featurized.csv',allfeatures)
alnizr_pred = clf.predict_proba(tri_alnizr[features])
write_for_plotting(tri_alnizr,alnizr_pred,'../data/alnizr_pred.csv',['Al','Ni','Zr'])
alnizr_real = data[data.ternary_name.isin(['Al-Ni-Zr', 'Al-Ni','Ni-Zr','Al-Zr','Al','Ni','Zr'])]
# alnizr_real_val = alnizr_real.as_matrix(columns=['Class'])
# alnizr_real_val = list(alnizr_real_val.flatten())
# alnizr_real_val = np.array(zip(alnizr_real_val,[-(x-1) for x in alnizr_real_val]))
# write_for_plotting(alnizr_real,alnizr_real_val,'../data/alnizr_exp.csv',['Al','Ni','Zr'])
# #### all dev set ternaries for plotting #####
dev_plot_pred = load_prep_data('../data/triangles_glass_featurized.csv',allfeatures)
dev_plot_exp = load_prep_data('../data/hitp_glass_data_featurized.csv',allfeatures)
# # dev_plot_pred = clf.predict_proba(dev_plot_data)
# ###### Co Fe Zr ######
cofezr_list = ['Co-Fe-Zr','Co-Fe','Co-Zr','Fe-Zr','Co','Fe','Zr']
cofezr = dev_plot_pred[dev_plot_pred.ternary_name.isin(cofezr_list)]
cofezr_pred = clf.predict_proba(cofezr[features])
write_for_plotting(cofezr,cofezr_pred,'../data/cofezr_pred.csv',['Co','Fe','Zr'])
cofezr_real = dev_plot_exp[dev_plot_exp.ternary_name.isin(cofezr_list)]
# cofezr_real_val = real_class_to_proba(cofezr_real,cofezr_list)
# write_for_plotting(cofezr_real,cofezr_real_val,'../data/cofezr_exp.csv',['Co','Fe','Zr'])
# ###### Co Fe Zr ######
# covzr_list = ['Co-V-Zr','Co-V','Co-Zr','V-Zr','Co','V','Zr']
# covzr = dev_plot_pred[dev_plot_pred.ternary_name.isin(covzr_list)]
# covzr_pred = clf.predict_proba(covzr[features])
# write_for_plotting(covzr,covzr_pred,'../data/covzr_pred.csv',['Co','V','Zr'])
# covzr_real = dev_plot_exp[dev_plot_exp.ternary_name.isin(covzr_list)]
# covzr_real_val = real_class_to_proba(covzr_real,covzr_list)
# write_for_plotting(covzr_real,covzr_real_val,'../data/covzr_exp.csv',['Co','V','Zr'])
# ###### Fe Nb Ti ######
fenbti_list = ['Fe-Nb-Ti','Fe-Nb','Fe-Ti','Nb-Ti','Fe','Nb','Ti']
fenbti = dev_plot_pred[dev_plot_pred.ternary_name.isin(fenbti_list)]
fenbti_pred = clf.predict_proba(fenbti[features])
write_for_plotting(fenbti,fenbti_pred,'../data/fenbti_pred.csv',['Fe','Nb','Ti'])
fenbti_real = dev_plot_exp[dev_plot_exp.ternary_name.isin(fenbti_list)]
# fenbti_real_val = real_class_to_proba(fenbti_real,fenbti_list)
# write_for_plotting(fenbti_real,fenbti_real_val,'../data/fenbti_exp.csv',['Fe','Nb','Ti'])
################
# Plotting CoFeZr at 2% Hitp training
x = .05
hitp_indices = hitp_train_data.index.tolist()
random.shuffle(hitp_indices)
hitp_len = len(hitp_indices)
x_ind = int(x*hitp_len)
inds = hitp_indices[0:x_ind]
hitp_train_data_n = hitp_train_data.iloc[inds]
all_train_data = pd.concat([train, hitp_train_data_n])
yall = all_train_data['Class'].astype(int)
clf = RandomForestClassifier(n_estimators=500,n_jobs=-1) #n_jobs: -1 runs on all avail cores
clf.fit(all_train_data[features],yall)
cofezr_pred = clf.predict_proba(cofezr[features])
write_for_plotting(cofezr,cofezr_pred,'../data/cofezr_pred_5percent.csv',['Co','Fe','Zr'])
# ################
# ################
# print("\nTesting HiTP learning curve:\n")
# # x = np.arange(numll)+1
# # x = x[::-1]
# # x = [.1,.2,.3,.4,.5,.6,.7,.8,.9,1]
# x = [0,.025,.05,.075,.1,.2,.4,.6,.8,1]
# hitp_indices = hitp_train_data.index.tolist()
# random.shuffle(hitp_indices)
# hitp_len = len(hitp_indices)
# x_ind = [int(i*hitp_len) for i in x]
# numll = len(x)
# loglosses_test = np.zeros(numll)
# loglosses_train = np.zeros(numll)
# loglosses_alnizr = np.zeros(numll)
# loglosses_cofezr = np.zeros(numll)
# loglosses_fenbti = np.zeros(numll)
# acc_test = np.zeros(numll)
# acc_train = np.zeros(numll)
# acc_alnizr = np.zeros(numll)
# acc_cofezr = np.zeros(numll)
# acc_fenbti = np.zeros(numll)
# # hitp_train_data_n = hitp_train_data.iloc[x_ind[n]]
# for n in np.arange(len(x)):
# if x_ind[n] == 0:
# all_train_data = train
# else:
# # hitp_train_data_n = hitp_train_data.iloc[0::n,:]
# inds = hitp_indices[0:x_ind[n]]
# hitp_train_data_n = hitp_train_data.iloc[inds]
# all_train_data = pd.concat([train, hitp_train_data_n])
# yall = all_train_data['Class'].astype(int)
# clf = RandomForestClassifier(n_estimators=500,n_jobs=-1) #n_jobs: -1 runs on all avail cores
# clf.fit(all_train_data[features],yall)
# test_pred = clf.predict(test[features])
# test_proba = clf.predict_proba(test[features])
# test_val = test['Class'].astype(int).values
# logloss_test = metrics.log_loss(test_val,test_proba)
# loglosses_test[n] = logloss_test
# acc_test[n] = metrics.accuracy_score(test_val,test_pred)
# train_val = train['Class'].astype(int).values
# train_proba = clf.predict_proba(train[features])
# logloss_train = metrics.log_loss(train_val,train_proba)
# loglosses_train[n] = logloss_train
# alnizr_proba = clf.predict_proba(alnizr_real[features])
# alnizr_pred = clf.predict(alnizr_real[features])
# alnizr_val = alnizr_real['Class'].astype(int).values
# loglosses_alnizr[n] = metrics.log_loss(alnizr_val,alnizr_proba)
# acc_alnizr[n] = metrics.accuracy_score(alnizr_val,alnizr_pred)
# cofezr_proba = clf.predict_proba(cofezr_real[features])
# cofezr_pred = clf.predict(cofezr_real[features])
# cofezr_val = cofezr_real['Class'].astype(int).values
# loglosses_cofezr[n] = metrics.log_loss(cofezr_val,cofezr_proba)
# acc_cofezr[n] = metrics.accuracy_score(cofezr_val,cofezr_pred)
# fenbti_proba = clf.predict_proba(fenbti_real[features])
# fenbti_pred = clf.predict(fenbti_real[features])
# fenbti_val = fenbti_real['Class'].astype(int).values
# loglosses_fenbti[n] = metrics.log_loss(fenbti_val,fenbti_proba)
# acc_fenbti[n] = metrics.accuracy_score(fenbti_val,fenbti_pred)
# del clf
# plt.figure()
# plt.plot(x,loglosses_test,'r.-',label='Dev set')
# plt.plot(x,loglosses_alnizr,'b.-',label = 'AlNiZr')
# plt.plot(x,loglosses_cofezr,'g.-',label = 'CoFeZr')
# plt.plot(x,loglosses_fenbti,'m.-',label = 'FeNbTi')
# # plt.plot(1/x,loglosses_train,'b',label='train')
# plt.legend()
# plt.title('Log Loss: Dense Data Learning Curve')
# plt.ylabel('Log Loss')
# plt.xlabel('Fraction of Dense Training Data Included')
# plt.savefig('loglosses.pdf')
# plt.show()
# plt.figure()
# plt.plot(x,acc_test,'r.-',label='Dev set')
# plt.plot(x,acc_alnizr,'b.-',label = 'AlNiZr')
# plt.plot(x,acc_cofezr,'g.-',label = 'CoFeZr')
# plt.plot(x,acc_fenbti,'m.-',label = 'FeNbTi')
# # plt.plot(1/x,loglosses_train,'b',label='train')
# plt.legend()
# plt.title('Accuracy: Dense Data Learning Curve')
# plt.ylabel('Accuracy')
# plt.xlabel('Fraction of Dense Training Data Included')
# plt.savefig('acc.pdf')
# plt.show()
| true |
51a8afbe808709f80c0517ae8b28d3b1557c7014 | Python | lzxdale/Tiny-Study | /Scrapy/rental/rental/spiders/crawl_rental.py | UTF-8 | 1,625 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class CrawlRentalSpider(CrawlSpider):
name = 'crawl_rental' #crawler name
allowed_domains = ['ganji.com']
start_urls = ['http://sz.ganji.com/zufang/pn1/']
get_links = []
rules = (
Rule(LinkExtractor(allow=r'http://sz.ganji.com/zufang/\d+x.shtml'), callback='parse_item', follow=False),
Rule(LinkExtractor(allow=r'http://sz.ganji.com/zufang/pn\d+')) #callback='', follow=True),
# if callback not exited, follow must be true, or it will atuo correct to true
# d+ means any number behind it
)
#def parse(self, response): not allowed to use parse in crawl templates
#pass
def parse_item(self, response): #this is dfferent from basic, basic is parse.
item = {}
self.get_links.append(response.url)
print(response.url, len(self.get_links))
# item['title'] = response.xpath(".//p[@class = 'card-title']/i/text()").get()
# item['price'] = response.xpath(".//span[@class = 'price']/text()").get()
# item['payment style'] = response.xpath(".//span[@class = 'unit']/text()").get()
# item['orientation'] = response.xpath(".//*[@id='f_detail']/div[5]/div[2]/div[1]/ul[1]/li[3]/span[2]").get()
# item['area'] = response.xpath(".//*[@id='f_detail']/div[5]/div[2]/div[1]/ul[1]/li[2]/span[2]/text()").get()
# item['address'] = response.xpath(".//*[@id='f_detail']/div[5]/div[2]/div[1]/ul[2]/li[3]/span[2]/text()").get()
# print(item)
#return item
| true |
7149efdfb8ac996f4fe6e7d90a6e845d36fbe39a | Python | sjzyjc/leetcode | /269/269-0.py | UTF-8 | 2,134 | 3.21875 | 3 | [] | no_license | from collections import defaultdict, deque
class Solution:
def alienOrder(self, words):
"""
:type words: List[str]
:rtype: str
"""
in_degree = {}
graph = defaultdict(set)
for index in range(len(words)):
word1 = words[index]
ptr1 = 0
while ptr1 < len(word1):
if word1[ptr1] not in in_degree:
in_degree[word1[ptr1]] = 0
ptr1 += 1
if index + 1 >= len(words):
continue
word2 = words[index + 1]
ptr1 = ptr2 = 0
while ptr1 < len(word1) and ptr2 < len(word2):
char1 = word1[ptr1]
char2 = word2[ptr2]
if char1 not in in_degree:
in_degree[char1] = 0
if char2 not in in_degree:
in_degree[char2] = 0
if char1 == char2:
ptr1 += 1
ptr2 += 1
else:
if char2 not in graph[char1]:
graph[char1].add(char2)
in_degree[char2] += 1
break
while ptr2 < len(word2):
if word2[ptr2] not in in_degree:
in_degree[word2[ptr2]] = 0
ptr2 += 1
#print(in_degree, graph)
queue = deque([])
for i in in_degree:
if in_degree[i] == 0:
queue.append(i)
ans = ""
#print(in_degree, graph)
while queue:
#print(queue)
node = queue.popleft()
ans += node
for nei in graph[node]:
in_degree[nei] -= 1
if in_degree[nei] == 0:
queue.append(nei)
#if len(ans) < len
return ans if len(ans) == len(in_degree) else ""
| true |
fdd4c94aeda9d68d8e6a47c3844f31ff6625f870 | Python | LeonardoPereirajr/Curso_em_video_Python | /des089.py | UTF-8 | 838 | 3.53125 | 4 | [
"MIT"
] | permissive | consulta=[]
tot=0
classe=[]
sala=[]
media=[]
while True:
aluno = str(input('Nome : '))
nota1 =float(input('Nota 1: '))
nota2 = float(input('Nota 2 : '))
media = (nota1+nota2)/2
classe.append([aluno, [nota1, nota2], media])
tot+=1
res = str(input('Quer continuar [S/N] : '))
if res in 'Nn':
break
print(classe[0][1])
print('=+'*40)
print(f'{"No.":<4} {"NOME":>10} {"NOTAS":>8}')
print('--'*40)
for i, a in enumerate(classe):
print(f' {i:<4} {a[0]:<10} {a[2]:>8.1f} ')
print('=+'*40)
while True:
boletim = int(input('Deseja ver as notas de qual aluno : [999 interrompe] '))
if boletim == 999:
print('FINALIZANDO...')
break
if boletim <= len(classe)-1:
print(f' Notas de {classe[boletim][0]} são {classe[boletim][1]}')
print('FIM') | true |
0f69131f46f833429f1d5eb27ab539e50d0b26d0 | Python | mrkazawa/algo | /prob_late.py | UTF-8 | 2,760 | 3.65625 | 4 | [] | no_license | """
Tony is not a morning person and is late for work with 0.5 probability. Andy is a generous
supervisor, so tries not to care for Tony's being late. But Andy scolds Tony if Tony is late
for work three times in a row. Calculate the probability that Tony is not being scolded at
all if he has attended for twenty days.
Ian is also not a morning person and is late for work with 2/3 probability. Calculate the
probability that Ian is not being scolded by Andy at all if he has attended for twenty days.
"""
import sys
sys.setrecursionlimit(100) # just a safeguard :)
def prob_of_consecutive_late(num_days, late_limit, late_prob, saved=None):
""" Returns the probability of the employees getting n numbers of
consecutive late.
Parameters:
num_days the total number of days that the employee works
late_limit the total of late allowable before the employee
start getting scolded
late_prob the late probability of the employee
saved the DP aspect of the algorithm, no need to calculate
already calculated value from the previous step
Returns:
result the late probability
"""
if saved == None: saved = {}
ID = (num_days, late_limit, late_prob)
if ID in saved: return saved[ID]
else:
if late_limit > num_days or num_days <= 0:
result = 0;
else:
result = late_prob ** late_limit
for firstLate in range(1, late_limit+1):
pr = prob_of_consecutive_late(num_days - firstLate, late_limit, late_prob, saved)
result += (late_prob ** (firstLate - 1)) * (1 - late_prob) * pr
saved[ID] = result
return result
# test for employee Tony
print('Tony work for n days, probability of not getting scolded at all')
scolded_prob = 1 - prob_of_consecutive_late(3,3,0.5)
print('n = 3; probability =', scolded_prob)
scolded_prob = 1 - prob_of_consecutive_late(7,3,0.5)
print('n = 7; probability =', scolded_prob)
scolded_prob = 1 - prob_of_consecutive_late(14,3,0.5)
print('n = 14; probability =', scolded_prob)
scolded_prob = 1 - prob_of_consecutive_late(21,3,0.5)
print('n = 21; probability =', scolded_prob)
# test for employee Ian
print()
print('Ian work for n days, probability of not getting scolded at all')
scolded_prob = 1 - prob_of_consecutive_late(3,3,0.666)
print('n = 3; probability =', scolded_prob)
scolded_prob = 1 - prob_of_consecutive_late(7,3,0.666)
print('n = 7; probability =', scolded_prob)
scolded_prob = 1 - prob_of_consecutive_late(14,3,0.666)
print('n = 14; probability =', scolded_prob)
scolded_prob = 1 - prob_of_consecutive_late(21,3,0.666)
print('n = 21; probability =', scolded_prob) | true |
947122d2ef8ee99ca1b7094ba11cf4baf3ad6018 | Python | mjhydri/sparse-analytic-filters | /scripts/model.py | UTF-8 | 2,641 | 2.625 | 3 | [
"MIT"
] | permissive | # Author: Frank Cwitkowitz <fcwitkow@ur.rochester.edu>n
# My imports
from amt_tools.models import OnsetsFrames2
import amt_tools.tools as tools
# Regular imports
import torch
def kld_scaling(iter):
"""
Define a function with which to scale the KL-divergence loss term
as a function of the current iteration.
Parameters
----------
iter : int
Current training iteration
Returns
----------
scaling : float
KL-divergence scaling factor for the current iteration
"""
scaling = 0.01
return scaling
class OnsetsFrames2LHVQT(OnsetsFrames2):
"""
Implements the Onsets & Frames model (V2) with a learnable filterbank frontend.
"""
def __init__(self, dim_in, profile, in_channels, lhvqt, model_complexity=2, detach_heads=True, device='cpu'):
"""
Initialize the model and establish parameter defaults in function signature.
Parameters
----------
See OnsetsFrames2 class for others...
lhvqt : LHVQT (Wrapper)
Filterbank to use as frontend
"""
super().__init__(dim_in, profile, in_channels, model_complexity, detach_heads, device)
# Create a pointer to the wrapper
self.lhvqt = lhvqt
# Append the filterbank learning module to the front of the model
self.frontend.add_module('fb', self.lhvqt.lhvqt)
self.frontend.add_module('rl', torch.nn.ReLU())
def post_proc(self, batch):
"""
Calculate KL-divergence for the 1D convolutional layer in the filterbank and append to the tracked loss.
Parameters
----------
batch : dict
Dictionary including model output and potentially
ground-truth for a group of tracks
Returns
----------
output : dict
Dictionary containing multi pitch, onsets, and offsets output as well as loss
"""
# Perform standard Onsets & Frames 2 steps
output = super().post_proc(batch)
# Obtain a pointer to the filterbank module
fb_module = self.frontend.fb.get_modules()[0]
# Check to see if loss is being tracked
if tools.KEY_LOSS in output.keys() and fb_module.var_drop:
# Extract all of the losses
loss = output[tools.KEY_LOSS]
# Calculate the KL-divergence term
loss[tools.KEY_LOSS_KLD] = fb_module.time_conv.kld()
# Compute the total loss and add it back to the output dictionary
loss[tools.KEY_LOSS_TOTAL] += kld_scaling(self.iter) * loss[tools.KEY_LOSS_KLD]
output[tools.KEY_LOSS] = loss
return output
| true |
9ccabf2a1eb2198ef2e6d6a61113683841cef90e | Python | jadevaibhav/mnist_trial | /trial1.py | UTF-8 | 1,326 | 3.15625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
#Data
x_train = (np.arange(20).reshape((20,1))*np.ones((20,10))).reshape((20,10))
print(x_train[0].shape)
y_train = (np.arange(20)*0.5).reshape((20,1))
print(y_train.shape)
# Computation Graph
x = tf.placeholder(tf.float32,shape=(None,10),name='x')
y = tf.placeholder(tf.float32,shape=(None,1),name='y')
w = tf.Variable(np.ones((10,1),dtype=np.float32),name='w')
b = tf.Variable(np.ones((1),dtype=np.float32),name='b')
y_pred = tf.add(tf.matmul(x,w),b,name='y_pred')
cost = tf.reduce_sum(tf.pow(y-y_pred,2))/20
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
init = tf.global_variables_initializer()
# Running the grapg in session with appropriate parameters
with tf.Session() as sess:
sess.run(init)
writer = tf.summary.FileWriter("trial1")
writer.add_graph(sess.graph)
for epoch in range(1000):
for (X,Y) in zip(x_train,y_train):
sess.run(optimizer,feed_dict={x:X.reshape((1,10)),y:Y.reshape(1,1)})
if((epoch+1)%100 == 0):
c = sess.run(cost,feed_dict={x:x_train,y:y_train})
print("cost:",c,"weight:",sess.run(w),"bias:",sess.run(b))
print("training done...\n")
training_cost = sess.run(cost,feed_dict={x:x_train,y:y_train})
print("training_cost:"+str(training_cost)+'\n')
| true |
0d4585443054218acb08f3a5e8350a050026c959 | Python | dariansk/iterators-generators-yeld | /generator.py | UTF-8 | 270 | 3.09375 | 3 | [] | no_license | import hashlib
def hash_file(filepath):
try:
with open(filepath, 'r') as datafile:
for line in datafile.readlines():
yield hashlib.md5(line.encode('utf-8')).hexdigest()
except FileNotFoundError as error:
print(error) | true |
7c3a4218b133ac6fe0f049e2499a863e6da75b88 | Python | eamt/holbertonschool-higher_level_programming | /0x07-python-test_driven_development/4-print_square.py | UTF-8 | 484 | 4.125 | 4 | [] | no_license | #!/usr/bin/python3
"""module to define the print_square function
"""
def print_square(size):
""" Function to print a square.
Args:
size (int): size of square
"""
if type(size) != int or size != size:
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
if size > 0:
for row in range(size):
for column in range(size):
print("#", end="")
print()
| true |