blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f322108470d6efe2415a10438194a55832e60f33 | Python | jones139/mapbook | /mapbook.py | UTF-8 | 11,455 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
class Page:
def __init__(self, mapnumber, minx, miny, width, ratio):
self.bounds=(minx, miny, minx+width, miny+width*ratio)
self.mapnumber=mapnumber
# Adjacent pages in the grid
# ul uc ur
# ml mc mr
# dl dc dr
self.ul = None
self.uc = None
self.ur = None
self.ml = None
self.mr = None
self.dl = None
self.dc = None
self.dr = None
if __name__ == "__main__":
import argparse
class LineArgumentParser(argparse.ArgumentParser):
def convert_arg_line_to_args(self, arg_line):
if arg_line:
if arg_line.strip()[0] == '#':
return
for arg in ('--' + arg_line).split():
if not arg.strip():
continrue
yield arg
parser = LineArgumentParser(description='Create a mapbook',fromfile_prefix_chars='@')
# Location-based options
parser.add_argument('--startx', type=float, help='West coordinate to map in mercator km',required=True)
parser.add_argument('--starty', type=float, help='South coordinate to map in mercator km',required=True)
parser.add_argument('--width', type=float, help='Width in mercator km of a map page',required=True)
parser.add_argument('--overwidth', type=float, help='Width in mercator km to add to each side', default=0.)
# Page layout options
parser.add_argument('--pagewidth', type=float, help='Page width in points. Should be <= physical page width',required=True)
parser.add_argument('--pageheight', type=float, help='Page height in points. Should be <= physical page height',required=True)
parser.add_argument('--pagepadding', type=float, help='Padding around the edges of each map',default=15.)
# File options
parser.add_argument('--mapfile',help='Mapnik XML file',default='osm.xml')
parser.add_argument('--outputfile',help='Name of PDF file to create',default='map.pdf')
# Grid options
parser.add_argument('--rows',type=int,help='Number of rows of map pages', default=1)
parser.add_argument('--columns',type=int,help='Number of columns of map pages', default=1)
parser.add_argument('--firstmap',type=int,help='Number of first map', default=1)
# Page options
parser.add_argument('--firstpage',type=int,help='Page number of first page', default=1)
parser.add_argument('--blankfirst',action='store_true',help='Insert an empty page at the beginning of the PDF',default=False)
opts=parser.parse_args()
print opts
import mapnik2 as mapnik
import cairo
import pango
import pangocairo
# Initial mapnik setup
merc = mapnik.Projection('+init=epsg:3857')
m = mapnik.Map(int(opts.pagewidth),int(opts.pageheight))
m.srs = merc.params()
# Calculate some information
mapwidth=opts.pagewidth-opts.pagepadding
mapheight=opts.pageheight-2*opts.pagepadding
# Lay out the grid of pages
# pagegrid
# [2,0] [2,1] [2,2] [2,3]
# [1,0] [1,1] [1,2] [1,3]
# [0,0] [0,1] [0,2] [0,3]
pagegrid = []
for y in range(opts.rows):
pagegrid.append(range(opts.firstmap+y*opts.columns,opts.firstmap+(1+y)*opts.columns))
# Define the pages
pages = []
for y, row in enumerate(pagegrid):
for x, n in enumerate(row):
thispage = Page(n,opts.startx+x*opts.width, opts.starty+y*opts.width*(mapheight/mapwidth),opts.width,(mapheight/mapwidth))
# Skip over the corners
if y+1<len(pagegrid):
#if x-1>=0:
# thispage.ul=pagegrid[y+1][x-1]
thispage.uc=pagegrid[y+1][x]
#if x+1<len(pagegrid[y+1]):
# thispage.ur=pagegrid[y+1][x+1]
if x-1>=0:
thispage.ml=pagegrid[y][x-1]
if x+1<len(pagegrid[y]):
thispage.mr=pagegrid[y][x+1]
if y-1>=0:
#if x-1>=0:
# thispage.dl=pagegrid[y-1][x-1]
thispage.dc=pagegrid[y-1][x]
#if x+1<len(pagegrid[y-1]):
# thispage.dr=pagegrid[y-1][x+1]
pages.append(thispage)
# Start rendering pages
print opts
print 'Rendering a total of %d pages ' % (opts.rows * opts.columns)
#print 'Rendering a total of {} pages'.format(opts.rows*opts.columns)
book = cairo.PDFSurface(opts.outputfile,opts.pagewidth,opts.pageheight)
pagecount = opts.firstpage
ctx = pangocairo.CairoContext(cairo.Context(book))
if opts.blankfirst:
ctx.show_page()
pagecount = pagecount + 1
for page in pages:
print 'Rendering map %d on page %d' % (page.mapnumber, pagecount)
#print 'Rendering map {} on page {}'.format(page.mapnumber, pagecount)
#pages[0].bounds[0] - overwidth - 0.5 * (mwidth-width)
# = . . . *(opts.pagewidth/mapwidth-1)*width
# minx, miny, maxx, maxy
bbox = (\
page.bounds[0] - 2*opts.overwidth - 0.5 * (opts.pagewidth/mapwidth - 1) * (pages[0].bounds[2] - pages[0].bounds[0]),\
page.bounds[1] - 2*opts.overwidth - 0.5 * (opts.pagewidth/mapwidth - 1) * (pages[0].bounds[3] - pages[0].bounds[1]),\
page.bounds[2] + 2*opts.overwidth + 0.5 * (opts.pagewidth/mapwidth - 1) * (pages[0].bounds[2] - pages[0].bounds[0]),\
page.bounds[1] + 2*opts.overwidth + 0.5 * (opts.pagewidth/mapwidth - 1) * (pages[0].bounds[3] - pages[0].bounds[1])\
)
m.zoom_to_box(mapnik.Box2d(*bbox))
mapnik.load_map(m,opts.mapfile)
# Save the current clip region
ctx.save()
if pagecount % 2 != 1:
ctx.rectangle(opts.pagepadding,opts.pagepadding,mapwidth,mapheight)
else:
ctx.rectangle(0,opts.pagepadding,mapwidth,mapheight)
ctx.clip()
mapnik.render(m,ctx,0,0)
# Restore the clip region
ctx.restore()
ctx.set_line_width(.25)
ctx.set_source_rgb(0, 0, 0)
if pagecount % 2 != 1:
ctx.rectangle(opts.pagepadding,opts.pagepadding,mapwidth,mapheight)
else:
ctx.rectangle(0,opts.pagepadding,mapwidth,mapheight)
ctx.stroke()
# Draw adjacent page arrows
ctx.set_source_rgb(0., 0., 0.)
if pagecount % 2 != 1:
if page.ul:
ctx.move_to(0,0)
ctx.rel_line_to(2*opts.pagepadding,0)
ctx.rel_line_to(-2*opts.pagepadding,2*opts.pagepadding)
ctx.close_path()
if page.ml:
ctx.move_to(0,opts.pageheight/2)
ctx.rel_line_to(opts.pagepadding,-opts.pagepadding)
ctx.rel_line_to(0,2*opts.pagepadding)
ctx.close_path()
if page.dl:
ctx.move_to(0,opts.pageheight)
ctx.rel_line_to(2*opts.pagepadding,0)
ctx.rel_line_to(-2*opts.pagepadding,-2*opts.pagepadding)
ctx.close_path()
else:
if page.dr:
ctx.move_to(opts.pagewidth,opts.pageheight)
ctx.rel_line_to(-2*opts.pagepadding,0)
ctx.rel_line_to(2*opts.pagepadding,-2*opts.pagepadding)
ctx.close_path
if page.mr:
ctx.move_to(opts.pagewidth, opts.pageheight/2)
ctx.rel_line_to(-opts.pagepadding,opts.pagepadding)
ctx.rel_line_to(0,-2*opts.pagepadding)
ctx.close_path()
if page.ur:
ctx.move_to(opts.pagewidth,0)
ctx.rel_line_to(0,2*opts.pagepadding)
ctx.rel_line_to(-2*opts.pagepadding,-2*opts.pagepadding)
ctx.close_path()
if page.uc:
ctx.move_to(opts.pagewidth/2,0.)
ctx.rel_line_to(opts.pagepadding,opts.pagepadding)
ctx.rel_line_to(-2*opts.pagepadding,0)
ctx.close_path()
if page.dc:
ctx.move_to(opts.pagewidth/2,opts.pageheight)
ctx.rel_line_to(opts.pagepadding,-opts.pagepadding)
ctx.rel_line_to(-2*opts.pagepadding,0)
ctx.close_path()
ctx.fill()
# Draw adjacent page numbers
ctx.set_source_rgb(1., 1., 1.)
arrowfont = pango.FontDescription("Sans " + str(opts.pagepadding*.38))
if pagecount % 2 != 1:
if page.dr:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.dr))
ctx.move_to(opts.pagewidth-opts.pagepadding*2/3, opts.pageheight-opts.pagepadding*2/3-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
if page.mr:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.mr))
ctx.move_to(opts.pagewidth-opts.pagepadding*2/3, opts.pageheight/2-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
if page.ur:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.ur))
ctx.move_to(opts.pagewidth-opts.pagepadding*2/3, opts.pagepadding*2/3-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
else:
if page.ul:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.ul))
ctx.move_to(opts.pagepadding*2/3, opts.pagepadding*2/3-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
if page.ml:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.ml))
ctx.move_to(opts.pagepadding*2/3, opts.pageheight/2-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
if page.dl:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.dl))
ctx.move_to(opts.pagepadding*2/3, opts.pageheight-opts.pagepadding*2/3-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
if page.uc:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.uc))
ctx.move_to(opts.pagewidth/2, opts.pagepadding*2/3-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
if page.dc:
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*2))
layout.set_alignment(pango.ALIGN_CENTER)
layout.set_font_description(arrowfont)
layout.set_text(str(page.dc))
ctx.move_to(opts.pagewidth/2, opts.pageheight-opts.pagepadding*2/3-0.5*layout.get_size()[1]/pango.SCALE)
ctx.update_layout(layout)
ctx.show_layout(layout)
# Draw mapnumber text
if pagecount % 2 != 1:
ctx.rectangle(opts.pagepadding*2.75,opts.pageheight-opts.pagepadding, opts.pagepadding*2, opts.pagepadding*.8)
else:
ctx.rectangle(opts.pagewidth-opts.pagepadding*4.75,opts.pageheight-opts.pagepadding, opts.pagepadding*2, opts.pagepadding*.8)
ctx.set_source_rgb(0.95, 0.95, 0.95)
ctx.fill_preserve()
ctx.set_source_rgb(0., 0., 0.)
ctx.stroke_preserve()
ctx.set_source_rgb(0., 0., 0.)
layout=ctx.create_layout()
layout.set_width(int(opts.pagepadding*4))
if pagecount % 2 != 1:
layout.set_alignment(pango.ALIGN_LEFT)
else:
layout.set_alignment(pango.ALIGN_RIGHT)
layout.set_font_description(pango.FontDescription("Sans " + str(opts.pagepadding*.5)))
layout.set_text(str(page.mapnumber))
if pagecount % 2 != 1:
ctx.move_to(opts.pagepadding*3,opts.pageheight-opts.pagepadding)
else:
ctx.move_to(opts.pagewidth-opts.pagepadding*3,opts.pageheight-opts.pagepadding)
ctx.update_layout(layout)
ctx.show_layout(layout)
# Move to the next page
ctx.show_page()
pagecount = pagecount + 1
book.finish()
| true |
00c12bdf1943ebe6cee196f3227f68cd53344f59 | Python | Iwomichu/probable-giggle | /space_game/managers/EventManager.py | UTF-8 | 2,363 | 2.578125 | 3 | [
"MIT"
] | permissive | from typing import Dict, Deque, DefaultDict, Any
from collections import deque, defaultdict
from space_game.domain_names import ObjectId
from space_game.events.EventProcessor import EventProcessor
from space_game.events.Event import Event
from space_game.managers.ObjectsManager import objects_manager
from space_game.events.creation_events.NewEventProcessorAddedEvent import NewEventProcessorAddedEvent
from space_game.events.creation_events.NewObjectCreatedEvent import NewObjectCreatedEvent
from space_game.events.ObjectDeletedEvent import ObjectDeletedEvent
class EventManager(EventProcessor):
def __init__(self):
self.event_processors: DefaultDict[Any, Dict[ObjectId, EventProcessor]] = defaultdict(dict)
self.event_queue: Deque[Event] = deque()
self.event_processors[ObjectDeletedEvent][id(self)] = self
self.event_processors[NewEventProcessorAddedEvent][id(self)] = self
self.event_processors[NewObjectCreatedEvent][id(objects_manager)] = objects_manager
self.event_processors[ObjectDeletedEvent][id(objects_manager)] = objects_manager
self.event_resolver = {
ObjectDeletedEvent: self.process_object_deleted_event,
NewEventProcessorAddedEvent: self.process_new_event_processor_added_event,
Event: lambda e: None
}
def process_events(self) -> None:
while len(self.event_queue) > 0:
event = self.event_queue.popleft()
for processor in self.event_processors[type(event)].values():
processor.process_event(event)
def process_event(self, event: Event):
self.event_resolver[type(event)](event)
def process_object_deleted_event(self, event: ObjectDeletedEvent):
for sub_dict in self.event_processors.values():
if event.object_id in sub_dict:
del sub_dict[event.object_id]
def process_new_event_processor_added_event(self, event: NewEventProcessorAddedEvent):
self.add_event_processor(event.processor_id, event.event_type)
def add_event(self, event: Event):
self.event_queue.append(event)
def add_event_processor(self, event_processor_id: ObjectId, event_type: Any):
event_processor = objects_manager.get_by_id(event_processor_id)
self.event_processors[event_type][event_processor_id] = event_processor
| true |
56682415dfa35c08f408fa9445611166a203f81c | Python | Seamonsters-2605/CompetitionBot2018 | /robotconfig.py | UTF-8 | 3,290 | 2.59375 | 3 | [] | no_license | import math
from ctre import ControlMode
theRobot = "2018 new encoders"
class DriveGear:
def __init__(self, mode,
forwardScale=1.0, strafeScale=1.0, turnScale=1.0,
p=0.0, i=0.0, d=0.0, f=0.0):
self.mode = mode
self.forwardScale = forwardScale
self.strafeScale = strafeScale
self.turnScale = turnScale
self.p = p
self.i = i
self.d = d
self.f = f
def __repr__(self):
return str(self.mode) + " fwd %f str %f trn %f (%f %f %f %f)" \
% (self.forwardScale, self.strafeScale, self.turnScale,
self.p, self.i, self.d, self.f)
if theRobot == "Leviathan":
wheelCircumference = 6 * math.pi
# encoder has 100 raw ticks -- with a QuadEncoder that makes 400 ticks
# the motor gear has 12 teeth and the wheel has 85 teeth
# 85 / 12 * 400 = 2833.333 = ~2833
ticksPerWheelRotation = 2833
maxError = ticksPerWheelRotation * 1.5
maxVelocityPositionMode = 650
maxVelocitySpeedMode = maxVelocityPositionMode * 5
positionModePIDs = (
(30.0, 0.0009, 3.0, 0.0),
(3.0, 0.0009, 3.0, 0.0),
(1.0, 0.0009, 3.0, 0.0)
)
speedModePIDs = (
(3.0, 0.0009, 3.0, 0.0),
(1.0, 0.0009, 3.0, 0.0),
(1.0, 0.0009, 3.0, 0.0)
)
elif theRobot == "2018" or theRobot == "2018 new encoders":
if theRobot == "2018 new encoders":
# 10,767; 10,819; 10,832
ticksPerWheelRotation = 10826
maxVelocitySpeedMode = 12115
else:
ticksPerWheelRotation = 7149
maxVelocitySpeedMode = 8000
wheelCircumference = 6 * math.pi
maxError = ticksPerWheelRotation * 1.5
maxVelocityPositionMode = maxVelocitySpeedMode / 5
normalGears = (
DriveGear(mode=ControlMode.Velocity,
forwardScale=0.4, strafeScale=0.15, turnScale=0.2,
p=0.25, i=0.0, d=5.0),
DriveGear(mode=ControlMode.Velocity,
forwardScale=0.5, strafeScale=0.2, turnScale=0.4,
p=0.25, i=0.0, d=5.0),
DriveGear(mode=ControlMode.Velocity,
forwardScale=0.8, strafeScale=0.2, turnScale=0.5,
p=0.1, i=0.0009, d=3.0),
)
slowPIDGears = (
DriveGear(mode=ControlMode.Velocity,
forwardScale=0.4, strafeScale=0.15, turnScale=0.3,
p=0.1, i=0.0009, d=3.0),
DriveGear(mode=ControlMode.Velocity,
forwardScale=0.5, strafeScale=0.2, turnScale=0.4,
p=0.1, i=0.0009, d=3.0),
DriveGear(mode=ControlMode.Velocity,
forwardScale=0.8, strafeScale=0.2, turnScale=0.5,
p=0.1, i=0.0009, d=3.0)
)
voltageGears = (
DriveGear(mode=ControlMode.PercentOutput,
forwardScale=0.5, strafeScale=0.6, turnScale=0.4),
DriveGear(mode=ControlMode.PercentOutput,
forwardScale=0.5, strafeScale=0.6, turnScale=0.4),
DriveGear(mode=ControlMode.PercentOutput,
forwardScale=1.0, strafeScale=0.6, turnScale=0.4)
)
autoGear = DriveGear(mode=ControlMode.Velocity,
p=0.3, i=0.0, d=5.0)
autoGearVoltage = DriveGear(mode=ControlMode.PercentOutput)
| true |
f6769660191585dfe0501e6efb6d81d94d93af60 | Python | marceloamaro/Python-Mombaca | /Lista Aula03 Decisões e Repetições/08a.py | UTF-8 | 364 | 4.53125 | 5 | [] | no_license | """
Faça uma função que receba uma lista de números inteiros e retorne o maior e menor elemento desta lista. Utilize o for
"""
lista = []
for i in range(0, 10):
lista.append(int(input(f"digite um valor para prosição {i}:")))
print(f"Voce digitou os valores da {lista}")
print ("O maior elemento: ", max(lista))
print ("O menor elemento: ", min(lista))
| true |
c73a0e95c6e341c792e8e95bc02c441ac6d65395 | Python | shilpisirohi12/db_api | /app.py | UTF-8 | 1,806 | 2.75 | 3 | [
"MIT"
] | permissive | import flask
import json
from flask import request, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app =flask.Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']='mysql://cutr:cutr@127.0.0.1:3306/world'
db=SQLAlchemy(app)
class WorldData(db.Model):
__tablename__='city'
id=db.Column('ID', db.INT, primary_key=True)
name=db.Column('Name',db.CHAR(35))
country_code=db.Column('CountryCode',db.CHAR(3))
district=db.Column('District',db.CHAR(20))
population=db.Column('Population',db.INT)
def __repr__(self):
return self.id
@app.route('/', methods=['GET'])
def initPoint():
return render_template('home.html')
@app.route("/api/v1/resources/city/all", methods=['GET'])
def api_AllData():
cities=WorldData.query.all()
cityList=[]
""""Creating List Of Distionaries"""
for city in cities:
cityList.append({'id':city.id,'name':city.name,'country_code':city.country_code,'district':city.district,'population':city.population})
#print(city.id,city.name,city.country_code,city.district,city.population)
#print(cityList)
return jsonify(cityList)
@app.route("/api/v1/resources/city", methods=['GET'])
def api_byID():
results=[]
if 'name' in request.args:
name=request.args['name']
cityData=WorldData.query.filter(WorldData.name==name).all()
cityList=[]
for city in cityData:
cityList.append({'id':city.id,'name':city.name,'country_code':city.country_code,'district':city.district,'population':city.population})
else:
return " No city name found in request URL. Please enter URL with parameter name"
return jsonify(cityList)
if __name__ == "__main__":
app.run(debug=True) | true |
5139c1758b4c7fd3ffbcaa6056dbee52a719f91b | Python | Jayesh97/programmming | /cicso/100_stocks.py | UTF-8 | 169 | 3.0625 | 3 | [] | no_license | a = [7,1,5,3,6,4]
least = float('inf')
maxx = 0
for i in a:
if i<least:
least = i
profit = i-least
if profit>maxx:
maxx = profit
print(maxx)
| true |
71a28acaea0b6541563d8cc91ac9b40dca4a83bf | Python | kumcp/python-uwsgi-deploy | /source/base/common/dict_handle.py | UTF-8 | 2,204 | 3.53125 | 4 | [] | no_license | from .validate import has_key
def summary_list_dict(*dict_list):
"""Compute list dict with the sum of all key
Ex:
summary_list_dict([
{ "time": 1, "amount": 2},
{ "time": 2, "amount": 3},
{ "time": 3, "amount": 4},
]) -> { "time": 6 , "amount": 9 }
Returns:
[type] -- [description]
"""
def summary_compute(prev, current):
for key in current:
if prev.get(key) is not None:
prev[key] = prev[key] + current[key]
else:
prev[key] = current[key]
return prev
return compute_list_dict(summary_compute, *dict_list)
def compute_list_dict(func, *dict_list):
"""Compute a list of dictionary to get the result
Ex:
compute_list_dict(lambda prev,
curr: { "amount": prev["amount"] + curr["amount"] },
{ "time": 1, "amount": 2},
{ "time": 2, "amount": 3},
{ "time": 3, "amount": 4},
) -> { "time": 6 , "amount": 9 }
Arguments:
func {fuction} -- Computing function
Returns:
dict -- Computed result after run through the list dict
"""
result = {}
for curr_dict in dict_list:
result = func(result, curr_dict)
return result
def map_dict(dict_object, mapping_table):
"""Mapping a dict to another dict with different keys in mapping_table
Arguments:
dict_object {dict} -- Source dictionary
mapping_table {dict} -- keys and values are string
which represent for new keys set
Returns:
dict -- New dict with new key/value set
"""
return {key: dict_object[val] for key, val in mapping_table.items()}
def map_list_dict(list_dict_object, mapping_table):
"""Mapping a list of dict to another list of dict
with different keys in mapping_table
Arguments:
list_dict_object {list} -- Source list object or dict
mapping_table {dict} -- keys and values are string
which represent for new keys set
Returns:
list -- New list dict with new key/value set
"""
return [map_dict(vars(item), mapping_table) for item in list_dict_object]
| true |
57038ddaa185944a833bdfda3e5e524bf417c535 | Python | EdwardTFS/raspi-examples | /lcd/lcd3.py | UTF-8 | 1,228 | 2.828125 | 3 | [] | no_license | #!/usr/bin/python3
from RPLCD.gpio import CharLCD, GPIO
import time
print("Start")
lcd = CharLCD(cols=16, rows=2, pin_rs=22, pin_e=18, pins_data=[16, 15, 13, 11],numbering_mode=GPIO.BOARD)
lcd.write_string('Czesc Lukasz!')
text =input ("Wypisz tekst wprowadzony>")
lcd.write_string(text)
input('Smiley')
#własne znaki
smiley = (
0b00000,
0b01010,
0b01010,
0b00000,
0b10001,
0b10001,
0b01110,
0b00000,
)
lcd.create_char(0, smiley)
lcd.write_string('\x00')
input('Test scrolla - ctrl+c exit')
framebuffer = [
'Test scrolla',
'',
]
def write_to_lcd(lcd, framebuffer, num_cols):
"""Write the framebuffer out to the specified LCD."""
lcd.home()
for row in framebuffer:
lcd.write_string(row.ljust(num_cols)[:num_cols])
lcd.write_string('\r\n')
def loop_string(string, lcd, framebuffer, row, num_cols, delay=0.2):
padding = ' ' * num_cols
s = padding + string + padding
for i in range(len(s) - num_cols + 1):
framebuffer[row] = s[i:i+num_cols]
write_to_lcd(lcd, framebuffer, num_cols)
time.sleep(delay)
try:
while True:
loop_string('testowy dlugi napis abcd', lcd, framebuffer, 1, 16)
except KeyboardInterrupt:
pass
lcd.close(clear=True)
print("End")
| true |
7b92dad4b582cbf9765f9601e774964ec8ec69b4 | Python | taeseunglee/hackerrank-Regex | /7.Applications/uk_and_us_part2.py | UTF-8 | 471 | 2.96875 | 3 | [] | no_license | import re
if __name__ == '__main__':
lines = []
num_lines = int(input())
for nl in range(num_lines):
lines.append(input())
queries = []
num_queries = int(input())
for nq in range(num_queries):
queries.append(input())
for q in queries:
prog = re.compile("\\b(" + q + "|" + q.replace("our", "or") + ")\\b")
cnt = 0
for l in lines:
cnt += len(prog.findall(l))
print(cnt) | true |
13d84b500aaf5d9dfdb5ef939909baea13602322 | Python | whogopu/ml_nlp_practice | /NLP/TFIDF_vectorize_string_similarity.py | UTF-8 | 2,338 | 3.109375 | 3 | [] | no_license | from nlpia.data.loaders import harry_docs as docs
from nltk.tokenize import TreebankWordTokenizer
tokenizer = TreebankWordTokenizer()
doc_tokens = []
for doc in docs:
doc_tokens += [sorted(tokenizer.tokenize(doc.lower()))]
len(doc_tokens)
all_doc_tokens = sum(doc_tokens, [])
len(all_doc_tokens)
lexicons = sorted(set(all_doc_tokens))
len(lexicons)
# create zero vector for comparison
from collections import OrderedDict
zero_vec = OrderedDict((token, 0) for token in lexicons)
#now make copy of zero_vec and update the values for each doc
import copy
from collections import Counter
# calculating tfidf vectors
doc_tfidf_vector = []
for doc in docs:
vec = copy.copy(zero_vec)
tokens = tokenizer.tokenize(doc.lower())
token_counts = Counter(tokens)
for token, count in token_counts.items():
docs_containing_token = 0
for _doc in docs:
if token in _doc.lower():
docs_containing_token += 1
tf = count/len(lexicons)
if docs_containing_token:
idf = len(docs)/docs_containing_token
else:
idf = 0
vec[token] = tf*idf
doc_tfidf_vector.append(vec)
import math
def consine_sim(vec1, vec2):
vec1 = [val for val in vec1.values()]
vec2 = [val for val in vec2.values()]
dot_prod = 0
for i, v in enumerate(vec1):
dot_prod += v * vec2[i]
mag1 = math.sqrt(sum([x**2 for x in vec1]))
mag2 = math.sqrt(sum([x**2 for x in vec2]))
return dot_prod/(mag1 * mag2)
# create query to get the string similarity
#query = "why i am so hairy as harry"
query = "How long does it take to get to the store?"
query_vec = copy.copy(zero_vec)
query_tokens = tokenizer.tokenize(query)
query_tokens_counts = Counter(query_tokens)
for token, count in query_tokens_counts.items():
doc_containing_token = 0
for _doc in docs:
if token in _doc.lower():
doc_containing_token += 1
if doc_containing_token == 0:
continue
tf = count/len(lexicons)
if(doc_containing_token):
idf = len(docs)/doc_containing_token
else:
idf = 0
query_vec[token] = tf*idf
# checking string similarity againes stored docs_tfidf_vectors
for tfidf in doc_tfidf_vector:
print(consine_sim(query_vec, tfidf))
| true |
045a9b7d759c64f1665386fe7f6888fef813d513 | Python | Chronoes/project-euler | /euler_36.py | UTF-8 | 214 | 3.078125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
sum = 0
for n in range( 1, 1000000 ):
n_bin = bin( n )[ 2: ]
n = str( n )
if n == "".join( reversed( n ) ) and n_bin == "".join( reversed( n_bin ) ):
sum += int( n ) | true |
4c2f4cf6993eaa306f482cfe39b4760ead1fbe4f | Python | thechargedneutron/First-ML-implementation | /predict.py | UTF-8 | 1,191 | 2.671875 | 3 | [] | no_license | import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import LabelEncoder
from sklearn.pipeline import Pipeline
seed=7
numpy.random.seed(seed)
dataframe=pandas.read_csv("iris.csv",header=None)
dataset=dataframe.values
X=dataset[:,0:4].astype(float)
Y=dataset[:,4]
encoder=LabelEncoder()
encoder.fit(Y)
encoded_Y=encoder.transform(Y)
dummy_y=np_utils.to_categorical(encoded_Y)
def baseline_model():
model=Sequential()
model.add(Dense(8, input_dim=4, activation='relu'))
model.add(Dense(3,activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
return model
estimator=KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=5, verbose=0)
kfold=KFold(n_splits=10,shuffle=True, random_state=seed)
results=cross_val_score(estimator, X, dummy_y, cv=kfold)
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
| true |
0c55ccf3ef5fa8fbea46c0175493bf23814fdb88 | Python | mazhitu/obs_noise.dir | /allsubs.py | UTF-8 | 5,729 | 2.625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 2 16:46:05 2018
@author: zhitu
"""
import numpy as np
from obspy.geodetics import locations2degrees
import matplotlib.pyplot as plt
from scipy.fftpack import fft,fftfreq
from scipy.fftpack import rfft,rfftfreq,irfft
def getsacname(network,station,starttime,endtime,channel):
return network+'.'+station+'.'+starttime+'.'+endtime+'.'+channel+'.SAC'
def Getwins(inventory,xlat1,xlon1,t1,U0=4.0):
""" calculate the windows that does not contain an earthquake in the catalog
Input
an obspy inventory
xlat1,xlon1 for the station to be examined
UTCtime for the start time of the series t1"""
win_len=2000
wins=np.arange(0,86400-2000,win_len)
nwin=len(wins)
idx=np.ones(nwin,dtype=bool)
eq_labels=[]
for ev in inventory:
t0=ev.origins[0].time
xlat0=ev.origins[0].latitude
xlon0=ev.origins[0].longitude
dist=locations2degrees(xlat0,xlon0,xlat1,xlon1)
time=dist*111.1949/U0
if (t0+time < t1):
continue
if (t0+time >= t1+86400):
break
ijk=np.floor((t0+time-t1)/win_len).astype(int)
if (ijk <= nwin-1):
idx[ijk]=False
idx[np.maximum(1,ijk-1)]=False
idx[np.minimum(nwin-1,ijk+1)]=False
eq_labels.append(t0+time-t1)
print(ev.origins[0].time,dist)
return wins[idx],eq_labels
def Caltransfer(y1,y2,wins,nlen=2000,iopt=1):
""" calculate the transfer function from y1 to y2
return the coherence, admittance, phase and their corresponding error
if iopt==0, then only coherence is returned """
coh_debug=[]
win_debug=[]
for ijk,win in enumerate(wins):
y1tmp=y1[win:win+nlen]
y2tmp=y2[win:win+nlen]
hann=np.hanning(nlen)
y1_fft=np.split(fft(hann*y1tmp),2)[0]
y2_fft=np.split(fft(hann*y2tmp),2)[0]
if (ijk == 0):
Gxy=np.conj(y1_fft)*y2_fft
Gxx=np.conj(y1_fft)*y1_fft
Gyy=np.conj(y2_fft)*y2_fft
else:
Gxy=Gxy+np.conj(y1_fft)*y2_fft
Gxx=Gxx+np.conj(y1_fft)*y1_fft
Gyy=Gyy+np.conj(y2_fft)*y2_fft
ff=np.split(fftfreq(nlen,1.0),2)[0]
idx=(ff>0.005) & (ff<0.010)
cohtmp=np.abs(Gxy)**2/np.real(Gxx)/np.real(Gyy)
cohtmp=np.sqrt(cohtmp)
coh_debug.append(np.mean(cohtmp[idx]))
win_debug.append(win)
coh=np.abs(Gxy)**2/np.real(Gxx)/np.real(Gyy)
coh=np.sqrt(coh)
if (iopt == 0):
adm=0.
phs=0.
adm_err=0.
phs_err=0.
else:
adm=np.abs(Gxy)/np.real(Gxx)
phs=np.angle(Gxy)
nd=len(wins)
adm_err=np.sqrt(1.-coh**2)/coh/np.sqrt(2*nd)
adm_err=adm*adm_err
phs_err=adm_err
ff=np.split(fftfreq(nlen,1.0),2)[0]
plt.plot(win_debug,coh_debug,'o')
return ff,coh,adm,phs,adm_err,phs_err
def Remove(tr1,tr2,adm,adm_err,phs,phs_err,f1,f2,ff,iplot=0):
""" calculate a quadratic fit to adm and phs
use this information to predict from tr1, then remove this from tr2
returning two trace (obspy class), one is the prediction
one is this prediction removed from tr2 """
idx=(ff>f1) & (ff<f2)
ff_select=ff[idx]
adm_select=adm[idx]
adm_err_select=adm_err[idx]
w=1./adm_err_select
apol=np.polyfit(ff_select,adm_select,2,w=w)
phs_select=phs[idx]
phs_err_select=phs_err[idx]
w=1./phs_err_select
ppol=np.polyfit(ff_select,phs_select,2,w=w)
if (iplot==1):
plt.subplot(1,2,1)
adm_fit=apol[0]*ff_select**2+apol[1]*ff_select+apol[2]
plt.plot(ff_select,adm_select)
plt.plot(ff_select,adm_fit)
plt.subplot(1,2,2)
phs_fit=ppol[0]*ff_select**2+ppol[1]*ff_select+ppol[2]
plt.plot(ff_select,phs_select)
plt.plot(ff_select,phs_fit)
plt.show()
plt.close()
ffr=rfftfreq(len(tr1.data),1.0)
tr_pred=tr1.copy()
tr_left=tr1.copy()
Htmp_spec=rfft(tr1.data)
Htmp_spec[0]=0
Htmp_spec[-1]=0
for i in np.arange(1,len(ffr)-1,2):
rp=Htmp_spec[i]
ip=Htmp_spec[i+1]
if(ffr[i]>f2 or ffr[i]<f1):
Htmp_spec[i]=0.
Htmp_spec[i+1]=0.
continue
amp=apol[0]*ffr[i]**2+apol[1]*ffr[i]+apol[2]
phs=ppol[0]*ffr[i]**2+ppol[1]*ffr[i]+ppol[2]
c=amp*np.cos(phs)
d=amp*np.sin(phs)
Htmp_spec[i]=rp*c-ip*d
Htmp_spec[i+1]=ip*c+rp*d
Htmp=irfft(Htmp_spec)
tr_pred.data=Htmp
tr_left.data=tr2.data-Htmp
return tr_pred,tr_left
def Plot_Trace(tr_list,labels=[],eq_labels=[],title=[],outfile='test.ps'):
plt.figure(figsize=(7,9))
ntr=len(tr_list)
fac=[1e+6,1e+3,1e+3,1e+3,1e+6,1e+6,1,1e+6,1e+6]
for itr,tr in enumerate(tr_list,1):
tt=tr.times()
tc=tr.copy()
tc.filter('bandpass',freqmin=0.01,freqmax=0.05)
ax=plt.subplot(ntr,1,itr)
plt.plot(tt,tc.data*fac[itr-1]);
ax.ticklabel_format(style='plain')
if itr < len(tr_list):
ax.tick_params(labelbottom=False)
# if (itr in [1,5,6,8,9]):
# plt.ylim((-0.00003,0.00003))
if (len(labels)>0):
plt.ylabel(labels[itr-1])
if (title and itr == 1):
plt.title(title)
if (itr in [1,6,9]):
ymax=np.max(tc.data)*fac[itr-1]
for x in eq_labels:
plt.plot(x,ymax,'rv')
plt.savefig(outfile,orientation='landscape')
# plt.show()
# plt.close()
# tr=tr_list[-1]
# plt.plot(tr.times(),tr.data);plt.ylim((-0.0001,0.0001))
# plt.show()
| true |
65170a89959c18911d25b44ec813888c71d35a00 | Python | Alonsovau/sketches | /chapter9/st23.py | UTF-8 | 622 | 3.78125 | 4 | [] | no_license | # 在局部变量域中执行代码
# a = 13
# exec('b = a + 1')
# print(b)
def test():
a = 13
loc = locals()
exec('b = a + 1')
b = loc['b']
print(b)
test()
def test2():
x = 0
loc = locals()
print('before:', loc)
exec('x += 1')
print('after:', loc)
print('x = ', x)
test2()
def test3():
x = 0
loc = locals()
print('test3', loc)
exec('x += 1')
print('test3', loc)
locals()
print('test3', loc)
test3()
def test4():
a = 13
loc = {'a': a}
glb = {}
exec('b = a + 1', glb, loc)
b = loc['b']
print('test4', b)
test4() | true |
24676dcf9b85918c07e138ad844ab0ce2796cab5 | Python | yangzhenkoxui/interface_ipi | /base/test_unittest.py | UTF-8 | 1,697 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-02 15:39
# @Author : Aries
# @Site :
# @File : test_unittest.py
# @Software: PyCharm
import unittest
from openpyxl.compat import file
from base.demo import RunMain
import HTMLTestRunner
class TestMethod(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('class 执行之前的方法')
@classmethod
def tearDownClass(cls):
print('class 执行之后的方法')
#每次方法前执行
def setUp(self):
self.run = RunMain()
print('test-->setup')
#每次方法后执行
def tearDown(self):
print('test-->teardown')
def test_01(self):
url = ''
data = {
}
res = self.run.run_main(url,data)
#参数有关联关系使用全局变量
globals()['name'] = ''
print('this is a testcase ')
#忽略case
@unittest.skip('')
def test_02(self):
url = ''
data = {
}
res = self.run.run_main(url,data)
print('this is a testcase2 ')
if __name__ == '__main__':
#报告生成路径
filepath = "../report/htmlreport.html"
#需要一个资源流,去写入报告
fp = file(filepath,'wb')
#运行整体case
#unittest.main()
#可以单独运行筛选case,创建一个容器
suite = unittest.TestSuite
#添加case,首先添加testmethod列表,在加case名字
suite.addTest(TestMethod('test_02'))
#HTMLTestRunner运行测试报告方式
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='this is first report ')
runner.run(suite)
#运行case
unittest.TextTestRunner.run(suite) | true |
5400f1b0883d4c66ebce8c90b111f37caf626471 | Python | Sandy4321/DataScienceFromScratch-9 | /probability/conditional_probability.py | UTF-8 | 661 | 3.71875 | 4 | [
"MIT"
] | permissive | import random
def random_kid(): return random.choice(["boy", "girl"])
def main():
both_girls, older_girl, either_girl = 0, 0, 0
random.seed(42)
for _ in range(10000):
younger = random_kid()
older = random_kid()
if "girl" == older:
older_girl = older_girl + 1
if "girl" == older and "girl" == younger:
both_girls = both_girls + 1
if "girl" == older or "girl" == younger:
either_girl = either_girl + 1
print("P(both | older) = " + str(both_girls / older_girl))
print("P(both | either) = " + str(both_girls / either_girl))
if __name__ == '__main__':
main()
| true |
419395f596dec4decd405b7af1117650f96c59f0 | Python | shuxinzhang/nltk-learning | /exercises/Chapter 02/02-6.py | UTF-8 | 561 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
import nltk
'''
☼ In the discussion of comparative wordlists, we created an object
called translate which you could look up using words in both German and Spanish
in order to get corresponding words in English.
What problem might arise with this approach?
Can you suggest a way to avoid this problem?
'''
'''
some words with multiple meanings might cause ambiguity.
Compare the synset of the two words, compare the synonyms and get words along with definitions with the closest meaning.
'''
| true |
bef2512e6250a3652f025db2a294fcc9be561529 | Python | avatar196kc/dev-mooc | /data-science/dataquest/dataquest-data-scientist-path/7-Advanced-Python-and-Computer-Science/1-data-structures-and-algorithms/guided-project/read.py | UTF-8 | 966 | 2.609375 | 3 | [] | no_license | import pandas as pd
import csv
f = open('AviationData.txt', 'r',encoding='utf-8')
reader = csv.reader(f)
aviation_data = []
for line in reader:
summed = ''
for l in line:
summed += l.replace(',','')
summed += '-'
aviation_data.append([summed])
aviation_list = []
for i, av in enumerate(aviation_data):
#print(i)
if i != 0:
split = av[0].split(' | ')
aviation_list.append(split[:-1])
else:
aviation_list.append(av[0].split(' | ')[:-1])
lax_code = []
for avl in aviation_list:
for avl_component in avl:
if avl_component == 'LAX94LA336':
lax_code.append(avl)
aviation_dict_list = []
colnames = []
for i, av_line in enumerate(aviation_list):
split = av_line
if i != 0:
row_dict = {k:v for (k,v) in zip(colnames, split)}
aviation_dict_list.append(row_dict)
else:
colnames = av_line | true |
2e09846c2bb05342943449ba5c0b257d05f3c4df | Python | zilani-09/Task_String_with_Python | /3.py | UTF-8 | 147 | 3.828125 | 4 | [] | no_license | input1 = input("Enter First Input\n")
input2 = input("Enter 2nd Input\n")
if (input1!=input2):
print("Not Same")
else:
print("same") | true |
44d6df0d664e7cd15eb45c6304dd412df77ad460 | Python | sathishsridhar/algorithms | /sortingalgos/quicksort.py | UTF-8 | 584 | 3.78125 | 4 | [] | no_license | def getPivot(arr,low,high):
pivot = arr[high]
i = low - 1
for j in range(low,high):
if arr[j] < pivot:
i+=1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high]=arr[high],arr[i+1]
return i+1
def quickSort(arr,low,high):
if low < high:
pi = getPivot(arr,low,high)
quickSort(arr,low,pi-1)
quickSort(arr,pi+1,high)
if __name__=='__main__':
arr = [24,32,5,7,93,60];
quickSort(arr,0,len(arr)-1)
print(arr)
'''
[24,32,5,7,60,93]
[5,32,24,7] [60,93]
[5,7,24,32] [60,93]
[5,7,24,32,60,93]
'''
| true |
6d3de4425d34754611e16b106719d5caacef5d71 | Python | iverson0201/Python100 | /水仙花数/sxhs.py | UTF-8 | 579 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
"""
描述:打印出所有的“水仙花数”,所谓“水仙花数”是指一个三位数,其各位数字立方和
等于该数本身。例如:153是一个“水仙花数”,因为153=1的三次方+5的三次方+3的三
次方。
标签:水仙花数
"""
def judge(num) :
rel=0
tmp=num
while tmp>0 :
rel+=(tmp%10)**3
tmp//=10
if rel==num :
return True
else :
return False
if __name__ == "__main__" :
for i in range(100,1000) :
if judge(i) :
print(i,end=',')
| true |
d66196860cc664b8624d92836ee6bc1338c5177a | Python | brainysmurf/pinned-timetable-cal | /timezone_info.py | UTF-8 | 1,459 | 3.09375 | 3 | [
"MIT"
] | permissive | """
Timezone assistant for pycal module
"""
from dateutil import tz as timezone_manager
from dateutil.parser import parse
import datetime, time
from app import TIMEZONE
local_timezone = timezone_manager.tzlocal()
target_timezone = timezone_manager.gettz(TIMEZONE)
def now_target_timezone():
return datetime.datetime.now(timezone_manager.gettz(TIMEZONE))
def convert_to_target_timezone(the_date):
if the_date.tzinfo is None:
the_date.replace(tzinfo=local_timezone)
return the_date.astimezone(target_timezone)
def convert_to_local_timezone(the_date):
return the_date.astimezone(local_timezone)
def raw_string_with_timezone_to_target(raw_string, fmt=None):
if fmt is None:
fmt = '%A, %B %d, %Y at %H:%M:%S %z'
dt = convert_to_target_timezone(parse(raw_string))
return dt
# date_obj = datetime.datetime.strptime(raw_string[:-6], fmt).replace(tzinfo=utc_timezone)
# return date_obj.astimezone(target_timezone)
def get_utc_offset_HH_MM():
hours_number = datetime.datetime.now(target_timezone).utcoffset().total_seconds() / 60 / 60
return "{}{:>02}{:>02}".format(
'+' if hours_number >= 0 else '-',
int(hours_number),
'30' if hours_number % 1 == 0.5 else '00'
)
if __name__ == '__main__':
from dateutil.parser import parse
raw = "Friday, July 14, 2017 at 09:30:00 +0900"
dt = convert_to_target_timezone(raw_string_with_timezone_to_target(raw))
print(dt)
| true |
387958fd1d7075fe2308c13979395ea813650b27 | Python | JOHONGJU/johongju | /practice24_readfile.py | UTF-8 | 884 | 3.421875 | 3 | [] | no_license | # score_file = open("score.txt", "r", encoding="utf-8")
# print(score_file.read())
# score_file.close()
# #한줄한줄 열어서 표기하는 방식
# score_file = open("score.txt", "r", encoding="utf-8")
# print(score_file.readline(), end="") #줄별로 읽기, 한 줄 읽고 커서는 다음 줄로 이동
# print(score_file.readline(), end="")
# print(score_file.readline(), end="")
# print(score_file.readline(), end="")
# score_file.close()
#몇줄일지 모를 때
# score_file = open("score.txt", "r", encoding = "utf8")
# while True:
# line = score_file.readline()
# if not line:
# break
# print(linem, end=" "), #줄바꿈 안할려면 end 쓰면 됨
# score_file.close()
score_file = open("score.txt", "r", encoding="utf-8")
lines = score_file.readlines() #list형태로 저장
for line in lines:
print(line, end="")
score_file.close()
| true |
937f77ebe3e48ea00d0b5915de1fbb30a25c1037 | Python | Suraj124/python_practice | /April2019/7-04-2019/collections_module_OrderedDict.py | UTF-8 | 746 | 3.9375 | 4 | [] | no_license | from collections import OrderedDict
d={}
d['A']=1
d['B']=2
d['C']=3
d['D']=4
d['E']=5
d['F']=6
d['G']=7
print(d)
for i,j in d.items(): #In normal dictionary order is not maintained
print(i,j)
#-------------------------------------------------#
# OrderedDict #
print("OrderedDict")
dd=OrderedDict() #In order dictionary order is maintained
dd['A']=1
dd['B']=2
dd['C']=3
dd['D']=4
dd['E']=5
dd['F']=6
dd['G']=7
for k,v in dd.items():
print(k,v)
#--------------------------------------------------#
d1={}
d1['A']=1
d1['B']=2
d2={}
d2['B']=2
d2['A']=1
print(d1==d2) #True
#-----------------------------------------------------#
dd1=OrderedDict()
dd1['A']=1
dd1['B']=2
dd2=OrderedDict()
dd2['B']=2
dd2['A']=1
print(dd1==dd2) | true |
868063d0e1110519c7a2678720ab756248d01a64 | Python | jeppefm1/Parkering | /Nummerpladegenkendelse/findChars.py | UTF-8 | 13,771 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import os
import cv2
import numpy as np
import math
import numberplateRec
import imageProcess
import classPossibleChar
#Konstanter til at chekke et bogstav eller tal.
#Disse definerer dermed hvordan et char ser ud.
MIN_PIXEL_WIDTH = 2
MIN_PIXEL_HEIGHT = 8
MIN_ASPECT_RATIO = 0.25
MAX_ASPECT_RATIO = 1.0
MIN_PIXEL_AREA = 80
#Konstanter til sammenligning af to chars
MIN_DIAG_SIZE_MULTIPLE_AWAY = 0.3
MAX_DIAG_SIZE_MULTIPLE_AWAY = 5.0
MAX_CHANGE_IN_AREA = 0.5
MAX_CHANGE_IN_WIDTH = 0.8
MAX_CHANGE_IN_HEIGHT = 0.2
MAX_ANGLE_BETWEEN_CHARS = 12.0
#Andre konstanter
MIN_NUMBER_OF_MATCHING_CHARS = 3
RESIZED_CHAR_IMAGE_WIDTH = 30
RESIZED_CHAR_IMAGE_HEIGHT = 45
MIN_CONTOUR_AREA = 100
#Indlæser eget datasæt
try:
labels = np.loadtxt("labelStor.txt", np.int32)
flattenedImages = np.loadtxt("flattened_imagesStor.txt", np.float32)
except:
print("Træningsdataen kunne ikke åbnes. Har du klassificeret chars inden?\n")
os.system("pause")
#Sætter modellen op.
kNearest = cv2.ml.KNearest_create()
labels = labels.reshape((labels.size, 1))
kNearest.setDefaultK(3)
kNearest.train(flattenedImages, cv2.ml.ROW_SAMPLE, labels)
def detectCharsInPlates(listOfPossiblePlates):
#Chekker om der er mulige nummerplader. Hvis ikke spring resten over.
if len(listOfPossiblePlates) == 0:
return listOfPossiblePlates
#Loop for hver nummerplade
for possiblePlate in listOfPossiblePlates:
#Grayscale og thresshold mulig nummerplade
possiblePlate.imgGrayscaled, possiblePlate.imgThressholded = imageProcess.preprocessImg(possiblePlate.imgPlate)
#Forstør billedet for at nemmere at kunne finde chars
possiblePlate.imgThressholded = cv2.resize(possiblePlate.imgThressholded, (0, 0), fx = 1.6, fy = 1.6)
#Thresshold billede igen.
thresholdValue, possiblePlate.imgThressholded = cv2.threshold(possiblePlate.imgThressholded, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#Find alle mulige chars i nummerpladen
#Finder alle konturer, og chekker ud fra konstanterne om det kan være mulige chars.
listOfPossibleCharsInPlate = findPossibleCharsInPlate(possiblePlate.imgGrayscaled, possiblePlate.imgThressholded)
#Finder grupper af matchende chars
listOfListsOfMatchingCharsInPlate = findListOfListsOfMatchingChars(listOfPossibleCharsInPlate)
#Hvis der ikke blev fundet nogle grupper, må det ikke være en nummerplade.
if (len(listOfListsOfMatchingCharsInPlate) == 0):
#Springer dermed videre til næste nummerplade, og gemmer at denne var tom.
possiblePlate.charsInPlate = ""
continue
#Hvis der er mere end en gruppe:
for i in range(0, len(listOfListsOfMatchingCharsInPlate)):
#Sorter listen efter center possion. Fra venstre mod højre.
#Anvender en lamda funktion, der tager center positionen, som nøgle til sorteringen.
listOfListsOfMatchingCharsInPlate[i].sort(key = lambda matchingChar: matchingChar.centerX)
#Anvender egen funktion til at fjerne overlap mellem bogstaver
listOfListsOfMatchingCharsInPlate[i] = removeElementOfOverlappingChars(listOfListsOfMatchingCharsInPlate[i])
#Antager at gruppen med flest bogstaver må være den korrekte nummerplade.
#Kan dermed sortere de andre nummerplader fra.
lenOfLongestListOfChars = 0
indexOfLongestListOfChars = 0
#Anvender et loop til at finde placeringen af den korrekte nummerplade.
for i in range(0, len(listOfListsOfMatchingCharsInPlate)):
if len(listOfListsOfMatchingCharsInPlate[i]) > lenOfLongestListOfChars:
lenOfLongestListOfChars = len(listOfListsOfMatchingCharsInPlate[i])
indexOfLongestListOfChars = i
longestListOfMatchingCharsInPlate = listOfListsOfMatchingCharsInPlate[indexOfLongestListOfChars]
#Anvend egen funktion til klassificeringen af de forskellige chars.
possiblePlate.charsInPlate = recognizeCharsInPlate(possiblePlate.imgThressholded, longestListOfMatchingCharsInPlate)
return listOfPossiblePlates
def findPossibleCharsInPlate(imgGrayscaled, imgThressholded):
listOfPossibleChars = []
contours = []
#Nødvendigt med en kopi, da kontur søgningen ændrer billedet
imgThressholdedCopy = imgThressholded.copy()
#Find alle konturer i nummerpladen
contours, npaHierarchy = cv2.findContours(imgThressholdedCopy, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#Loop gennem alle konturerne
for contour in contours:
#Opret objekter af klassen classPossibleChar
possibleChar = classPossibleChar.PossibleChar(contour)
#Hvis mulig char gem char objektet i listen.
#Anvender egen funktion til kontrollen.
if checkIfPossibleChar(possibleChar):
listOfPossibleChars.append(possibleChar)
return listOfPossibleChars
#Funktion der undersøger om det mulige bogsatv lever op til krav.
def checkIfPossibleChar(possibleChar):
if (possibleChar.boundingRectArea > MIN_PIXEL_AREA and
possibleChar.boundingRectWidth > MIN_PIXEL_WIDTH and possibleChar.boundingRectHeight > MIN_PIXEL_HEIGHT and
MIN_ASPECT_RATIO < possibleChar.aspectRatio and possibleChar.aspectRatio < MAX_ASPECT_RATIO):
return True
else:
return False
#Funktion der sorterer chars. Fra start af er alle mulige chars blandet sammen i en stor liste.
#Målet med denne funktion er at arrangere listen således, at det bliver en liste over liter med matchende chars.
#Med matchende chars menes der, at charsene er af cirka samme form og størrelse, samt at de er beliggende tæt på hinanden.
def findListOfListsOfMatchingChars(listOfPossibleChars):
listOfListsOfMatchingChars = []
#loop gennem chars
for possibleChar in listOfPossibleChars:
listOfMatchingChars = findListOfMatchingChars(possibleChar, listOfPossibleChars)
#Gem i listen
listOfMatchingChars.append(possibleChar)
#Hvis længden af listen med matchende bogstaver er over den fastfastte grænse fortsæt,
#ellers spring videre til den næste liste.
if len(listOfMatchingChars) < MIN_NUMBER_OF_MATCHING_CHARS:
continue
#Gem i liste med liste over matchende chars
listOfListsOfMatchingChars.append(listOfMatchingChars)
#Laver en liste til at fjerne de andre chars således at hvert char ikke bliver machet flere gang.
listOfPossibleCharsWithMatchesRemoved = []
#Anvender set() funktionen til at fjerne dem fra listen, og dermed den store pulje af chars
listOfPossibleCharsWithMatchesRemoved = list(set(listOfPossibleChars) - set(listOfMatchingChars))
#Kalder sig selv igen, for at få de andre lister med matchende chars
recursiveListOfListsOfMatchingChars = findListOfListsOfMatchingChars(listOfPossibleCharsWithMatchesRemoved)
#For hver liste fundet ved at kalde sig selv. Looper igennem alle lister.
for recursiveListOfMatchingChars in recursiveListOfListsOfMatchingChars:
listOfListsOfMatchingChars.append(recursiveListOfMatchingChars)
break
return listOfListsOfMatchingChars
#Denne funktion anvendes til samme formål som den tidligere. Til at sortere en stor liste af chars.
#Denne funktion modtager den store liste, sorterer dem efter matchene chars, og returnerer de matchende i en ny liste.
def findListOfMatchingChars(possibleChar, listOfChars):
listOfMatchingChars = []
#Loop gennem mulige chars
for possibleMatchingChar in listOfChars:
#Hvis det er det samme char, som vi prøver at finde matchende chars til,
#skal loopet springe den over, og dermed ikke inkludere den i listen med matchende chars.
if possibleMatchingChar == possibleChar:
continue
#Beregn data om muligt matchende char. Disse skal senere bruges til at chekke om de to chars er matchende.
#Afstand mellem chars
distanceBetweenChars = distanceBetweenCharsFunction(possibleChar, possibleMatchingChar)
#Vinkel mellem chars
angleBetweenChars = angleBetweenCharsFunction(possibleChar, possibleMatchingChar)
#Ændring i størrelsen - er de cirka samme areal?
changeInArea = float(abs(possibleMatchingChar.boundingRectArea - possibleChar.boundingRectArea)) / float(possibleChar.boundingRectArea)
#Ændring i højde og bredde
changeInWidth = float(abs(possibleMatchingChar.boundingRectWidth - possibleChar.boundingRectWidth)) / float(possibleChar.boundingRectWidth)
changeInHeight = float(abs(possibleMatchingChar.boundingRectHeight - possibleChar.boundingRectHeight)) / float(possibleChar.boundingRectHeight)
#Hvis disse beregninger er inden for de fastfastte grænser, skal de tilføjes til listen med matchende chars.
if (distanceBetweenChars < (possibleChar.diagonalSize * MAX_DIAG_SIZE_MULTIPLE_AWAY) and
angleBetweenChars < MAX_ANGLE_BETWEEN_CHARS and
changeInArea < MAX_CHANGE_IN_AREA and
changeInWidth < MAX_CHANGE_IN_WIDTH and
changeInHeight < MAX_CHANGE_IN_HEIGHT):
listOfMatchingChars.append(possibleMatchingChar)
return listOfMatchingChars
#Funktion der anvender pythagoras til at betstemme distancen mellem to chars
def distanceBetweenCharsFunction(firstChar, secondChar):
x = abs(firstChar.centerX - secondChar.centerX)
y = abs(firstChar.centerY - secondChar.centerY)
return math.sqrt((x ** 2) + (y ** 2))
#Funktion til at bestemme vinkelen mellen to chars ud fra deres center possition
def angleBetweenCharsFunction(firstChar, secondChar):
adjacent = float(abs(firstChar.centerX - secondChar.centerX))
opposite = float(abs(firstChar.centerY - secondChar.centerY))
#Hvis ikke den hosliggende katete er 0, anvend trignometri til at bestemme vinklen.
#Hvis den er 0, sæt vinklen til 90 grader.
if(adjacent != 0.0):
angleinRad = math.atan(opposite/adjacent)
else:
angleinRad = 1.57
#Konverter til grader
angleInDeg = angleinRad * (180/math.pi)
return angleInDeg
#Funktion til at håndtere overlap mellem chars. Her bliver den inderste char/den mindste char fjernet.
#Dermed undgås forviring ved genkendelse.
def removeElementOfOverlappingChars(listOfMatchingChars):
listOfMatchingCharsOverlappingResolved = list(listOfMatchingChars)
#Loop gennem chars
for currentChar in listOfMatchingChars:
for otherChar in listOfMatchingChars:
if (currentChar != otherChar):
#Hvis afstanden er mindre end dirgonal afstanden ganget en konstant,
#altså at afstanden er så lille, at de to chars går ind over hinanden.
if distanceBetweenCharsFunction(currentChar, otherChar) < (currentChar.diagonalSize * MIN_DIAG_SIZE_MULTIPLE_AWAY):
#Fjern det mindste af to chars der går ind over hinanden
if currentChar.boundingRectArea < otherChar.boundingRectArea:
#Checker om det er blevet fjernet en gang allerede
if currentChar in listOfMatchingCharsOverlappingResolved:
listOfMatchingCharsOverlappingResolved.remove(currentChar)
else:
#Checkr om det er blevet fjernet en gang allerede
if otherChar in listOfMatchingCharsOverlappingResolved:
listOfMatchingCharsOverlappingResolved.remove(otherChar)
return listOfMatchingCharsOverlappingResolved
#Funktion til at genkende chars i billedet. Hertil anvendes KNN modellen, der defineret tidligere.
def recognizeCharsInPlate(imgThressholded, listOfMatchingChars):
charsCombined = ""
#Find størrlese og lav en tom matrix med den korrekte størrelse.
#Denne skal anvendes til at gemme et farve billede af nummerpladen. Dette skal bruges således, at det er muligt at tegne i farver oven på.
height, width = imgThressholded.shape
imgThresholdedColor = np.zeros((height, width, 3), np.uint8)
#Sorter chars efter x position. Dermed bliver nummerpladen i den korrekte læseretning.
#Hertil anvendes en lamda funktion, der finder centerX koordinaten.
listOfMatchingChars.sort(key = lambda matchingChar: matchingChar.centerX)
#Lav farve version af nummerpladen
cv2.cvtColor(imgThressholded, cv2.COLOR_GRAY2BGR, imgThresholdedColor)
for currentChar in listOfMatchingChars:
#Tegn regtangler rundt om de forskellige chars
pt1 = (currentChar.boundingRectX, currentChar.boundingRectY)
pt2 = ((currentChar.boundingRectX + currentChar.boundingRectWidth), (currentChar.boundingRectY + currentChar.boundingRectHeight))
cv2.rectangle(imgThresholdedColor, pt1, pt2, numberplateRec.COLOR_GREEN, 2)
#Klip det enkelte char ud til genkendelse
imgchar = imgThressholded[currentChar.boundingRectY : currentChar.boundingRectY + currentChar.boundingRectHeight,
currentChar.boundingRectX : currentChar.boundingRectX + currentChar.boundingRectWidth]
#Tilpas størrelsen af det udklippede char
charResized = cv2.resize(imgchar, (RESIZED_CHAR_IMAGE_WIDTH, RESIZED_CHAR_IMAGE_HEIGHT))
#Gør billedet flat
charResized = charResized.reshape((1, RESIZED_CHAR_IMAGE_WIDTH * RESIZED_CHAR_IMAGE_HEIGHT))
#Konverter til float
charResized = np.float32(charResized)
#Lav KNN forudsigelse
retval, results, neigh_resp, dists = kNearest.findNearest(charResized, k = 3)
result = str(chr(int(results[0][0])))
charsCombined = charsCombined + result
return charsCombined
| true |
124e471c3c1136ff3bee2a1fe3b81522299c4b51 | Python | gkkrtcby283782/Sequences-of-Objects | /sequence_classifier/models/deepLSTM.py | UTF-8 | 8,199 | 2.890625 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import csv
import math
import sys
from tensorflow.contrib import rnn
class DeepLSTM:
def __init__(self, input_dim, output_dim,
seq_size, hidden_dim, layer, learning_rate, dropout):
# Hyperparameters
self.input_dim = input_dim # input dim for each step
self.output_dim = output_dim # output dim for last step, that is class number
self.seq_size = seq_size # step number, that is, object number
self.hidden_dim = hidden_dim # hidden dim in each cell, input gate, forget gate, output gate, hidden state, output, all weights and biases
self.layer = layer # deep of lstm
self.learning_rate = learning_rate
self.dropout = dropout
# Weight variables and placeholders
self.W_out = tf.Variable(tf.random_normal([hidden_dim, output_dim]), name='W_out')
self.b_out = tf.Variable(tf.random_normal([output_dim]), name='b_out')
self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim]) # input data
self.y = tf.placeholder(tf.float32, [None, output_dim]) # ground truth class, one hot representation
self.keep_prob = tf.placeholder(tf.float32)
self.y_hat = self.model() # output class score, before softmax
self.softmax = tf.nn.softmax(self.y_hat)
# Cost optimizer
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.y, logits=self.y_hat)
self.loss = tf.reduce_mean(cross_entropy)
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss)
# assess
correct_pred = tf.equal(tf.argmax(self.softmax, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# saver
self.saver = tf.train.Saver()
def get_a_cell(self):
if self.dropout:
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(self.hidden_dim)
lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self.keep_prob) # only dropout input between layers, no dropout between memory
return lstm_cell
else:
return rnn.BasicLSTMCell(self.hidden_dim)
def model(self):
"""
:param x: inputs of size [N, seq_size, input_size]
:param W_out: matrix of fully-connected output layer weights
:param b_out: vector of fully-connected output layer biases
"""
# stack cell
cell = rnn.MultiRNNCell([self.get_a_cell() for _ in range(self.layer)])
# initial state with 0s
batch_size = tf.shape(self.x)[0]
h0 = cell.zero_state(batch_size, tf.float32)
# outputs: all outputs of the last layer and in all time steps , [batch_size, seq_size, hidden_dim]
# states: hidden states in the last time step,
# layer * LSTMStateTuple(hidden_state, output), both are [batch_size, hidden_dim]
if self.dropout:
self.x_drop = tf.nn.dropout(self.x, self.keep_prob)
outputs, states = tf.nn.dynamic_rnn(cell, self.x_drop, dtype=tf.float32, initial_state=h0)
else:
outputs, states = tf.nn.dynamic_rnn(cell, self.x, dtype=tf.float32, initial_state=h0)
last_output = outputs[:, -1, :]
out = tf.matmul(last_output, self.W_out) + self.b_out
return out
def get_batch(self, input, output, batch_size, mode):
if mode == 'train':
# random select index in np.arange(len(X))
# length = batch_size
# replace indicate whether choosing repeatedly, false means can not
index = np.random.choice(len(input), batch_size, replace=False)
return input[index], output[index]
elif mode == 'test':
return input[:batch_size], output[:batch_size]
else:
sys.exit()
def train_test(self, training_input, training_output, training_name,
test_input, test_output, test_name,
batch_size_train, batch_size_test,
epoch):
iteration_train = int(len(training_input) / batch_size_train)
iteration_test = int(len(test_input) / batch_size_test)
with tf.Session() as sess:
tf.get_variable_scope().reuse_variables() # share variable between time steps
sess.run(tf.global_variables_initializer())
for i in range(epoch):
# training
for j in range(iteration_train):
input_batch, output_batch = self.get_batch(training_input, training_output, batch_size_train,
mode='train')
_, loss = sess.run([self.optimizer, self.loss], feed_dict={self.x: input_batch, self.y: output_batch})
if i % 100 == 0:
print('epoch: {0}, training loss = {1}'.format(i, loss))
# test
accuracy = 0.
count = 0
for j in range(iteration_test):
input_batch, output_batch = self.get_batch(test_input, test_output, batch_size_test,
mode='test')
count += 1
predictions = sess.run(self.softmax, feed_dict={self.x: input_batch, self.keep_prob: 1.0})
accuracy += self.average_test(predictions, output_batch)
print('test accuracy =', accuracy/count)
print('\n')
def train_test_dropout(self, training_input, training_output, training_name,
test_input, test_output, test_name,
batch_size_train, batch_size_test,
epoch, keep_prob):
iteration_train = int(len(training_input) / batch_size_train)
iteration_test = int(len(test_input) / batch_size_test)
with tf.Session() as sess:
tf.get_variable_scope().reuse_variables() # share variable between time steps
sess.run(tf.global_variables_initializer())
for i in range(epoch):
# training
for j in range(iteration_train):
input_batch, output_batch = self.get_batch(training_input, training_output, batch_size_train,
mode='train')
_, loss = sess.run([self.optimizer, self.loss],
feed_dict={self.x: input_batch, self.y: output_batch, self.keep_prob: keep_prob})
if i % 100 == 0:
print('epoch: {0}, training loss = {1}'.format(i, loss))
# test
accuracy = 0.
count = 0
for j in range(iteration_test):
input_batch, output_batch = self.get_batch(test_input, test_output, batch_size_test,
mode='test')
count += 1
predictions = sess.run(self.softmax, feed_dict={self.x: input_batch, self.keep_prob: 1.0})
accuracy += self.average_test(predictions, output_batch)
print('test accuracy =', accuracy / count)
print('\n')
def average_test(self, predictions, ground_truth):
accuracy = 0.
# predictions: [None, class_number], confidence
# ground_truth: [None, class_number], one-hot
for i in np.arange(0, len(predictions), 2):
average_predictions = (predictions[i] + predictions[i+1]) / 2.0
if np.argmax(average_predictions) == np.argmax(ground_truth[i]):
accuracy += 1.0
return accuracy / (len(predictions) / 2.0)
# # test
# model = DeepLSTM(input_dim=8, output_dim=3,
# seq_size=20,
# hidden_dim=10, layer=2,
# learning_rate=0.01, dropout=True)
| true |
ff34a8e2f65b5a35d546a08f2f017ccd119ef88c | Python | kevinszuchet/itc-fellows-part-time | /lesson_3/copyspecial/copyspecial.py | UTF-8 | 4,336 | 3.65625 | 4 | [] | no_license | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import subprocess
"""Copy Special exercise
The program takes one or more directories as its arguments and it can:
- List the absolute paths of the special files in all the directories
- Copy the files to the given directory, creating it if necessary
- Create a zipfile containing the files
(We'll say that a "special" file is one where the name contains the pattern __w__,
where the w is one or more word chars.)
"""
# Write functions and modify main() to call them
def get_special_paths(directories):
"""Given a list of directories, returns the absolute path of the special files inside the directories."""
try:
absolute_paths = [absolute_path(directory, filename)
for directory in directories for filename in os.listdir(directory) if is_special(filename)]
check_repeated_special_files(absolute_paths)
except (FileNotFoundError, NotADirectoryError):
print(f"There are some invalid directories. Please check that all are valid.")
sys.exit(1)
return absolute_paths
def check_repeated_special_files(absolute_paths):
"""Given all the absolute paths, it checks that there aren't repeated filenames in different directories."""
special_filenames = [os.path.basename(abs_path) for abs_path in absolute_paths]
repeated_special_filenames = {special_filename for special_filename in special_filenames if
special_filenames.count(special_filename) > 1}
if repeated_special_filenames:
print(
f"The next files: {list(repeated_special_filenames)}, are repeated in different directories. Please, fix it.")
sys.exit(1)
def absolute_path(directory, filename):
"""Given the directory and the filename, joins both, and returns the absolute path to the file."""
path = os.path.join(directory, filename)
return os.path.abspath(path)
def is_special(filename):
"""Given a filename, checks if it is special."""
return re.match(r'.*__\w+__.*', filename)
def copy_to(directories, to_dir):
"""Given a list of directories and a destination directory, copies all the special files in it."""
special_paths = get_special_paths(directories)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
for special_file in special_paths:
print(f"Copying {os.path.basename(special_file)} in {to_dir}...")
shutil.copy(special_file, to_dir)
def zip_to(directories, to_zip):
"""
Given a list of directories and a destination directory, creates a zip file with all the special directories.
Then moves it to the to_zip destination.
"""
special_paths = get_special_paths(directories)
try:
command = f"zip -j {to_zip} {' '.join(special_paths)}"
print(f"Command I'm going to do: {command}")
subprocess.check_call(command.split(), stdout=subprocess.DEVNULL)
except subprocess.CalledProcessError:
print("Cannot execute the command. Please check that all the arguments are valid.")
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print
"usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print
"error: must specify one or more dirs"
sys.exit(1)
if todir:
copy_to(args, todir)
elif tozip:
zip_to(args, tozip)
else:
special_paths = get_special_paths(args)
print(f"Special files:\n" + '\n'.join(special_paths))
if __name__ == "__main__":
main()
| true |
f7c8533824c8f7afd6bb30a712f027c51644f21d | Python | neilpanchal/matplotlib | /branches/unit_support/users_guide/code/compare_with_matlab.py | UTF-8 | 292 | 2.578125 | 3 | [] | no_license | from pylab import *
dt = 0.01
t = arange(0,10,dt)
nse = randn(len(t))
r = exp(-t/0.05)
cnse = conv(nse, r)*dt
cnse = cnse[:len(t)]
s = 0.1*sin(2*pi*t) + cnse
subplot(211)
plot(t,s)
subplot(212)
psd(s, 512, 1/dt)
savefig('../figures/psd_py.eps')
savefig('../figures/psd_py.png')
show()
| true |
9b7b661cc7ba44595aba34a8f431621ab57f8007 | Python | Darius-sss/suduku | /main.py | UTF-8 | 4,425 | 2.828125 | 3 | [] | no_license | __time__ = '2021/8/1'
__author__ = 'ZhiYong Sun'
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox, QTableWidgetItem
from PyQt5.QtGui import QBrush, QColor
from sudoku import Ui_Form
from matplotlib.pyplot import rcParams, imread, figure, show, axis, imshow, title
import os
import csv
class game(QMainWindow, Ui_Form):
def __init__(self):
super(game, self).__init__()
self.setupUi(self)
self.path = self.getRealPath() # exe执行时解压后的资源路径
def load_problem(self): # 加载问题数据 curr = self.curr
with open(file=self.path + r'./ziyuan/problem.txt', mode='r', encoding='utf-8') as fr:
data = list(csv.reader(fr))
curr = int(data[0][0])
if curr >= len(data):
QMessageBox.information(self, "通关证明", "恭喜玲兰姐姐完美通关!请点击获取福利~", QMessageBox.Yes)
self.show_pic()
curr = len(data) - 1
prob = [[data[curr][i * 9 + j] for j in range(9)] for i in range(9)]
self.count.setText(str(curr))
return prob
def show_pic(self):
rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
img = imread(self.path + r'./ziyuan/fuli.jpg')
figure("通关福利") # 图像窗口名称
imshow(img)
axis('off') # 关掉坐标轴为 off
title('情 人 节 快 乐 之 湿 身 诱 惑') # 图像题目
show()
def isvalid(self, board): # 判定数独是否有效
size = len(board)
rows, cols, subs = [set() for _ in range(size)], [set() for _ in range(size)], [set() for _ in range(size)]
for i in range(size):
for j in range(size):
num = board[i][j]
if num <= 0 or num > 9:
return False
if 0 < num <= 9:
sub_index = 3 * (i // 3) + j // 3
if num in rows[i] or num in cols[j] or num in subs[sub_index]:
return False
rows[i].add(num)
cols[j].add(num)
subs[sub_index].add(num)
return True
def getRealPath(self):
# 获取exe解压目录的绝对路径
p = os.path.realpath(sys.path[0])
p = p.replace(r'\base_library.zip', '')
return p
def reset(self):
prob = self.load_problem()
for i in range(9):
for j in range(9):
self.table.setItem(i, j, QTableWidgetItem(prob[i][j])) # 设置数字
self.table.item(i, j).setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter) # 设置居中显示
if prob[i][j] != '0':
self.table.item(i, j).setFlags(Qt.ItemIsEditable) # 设置初始不为0的数字不可编辑
self.table.item(i, j).setBackground(QBrush(QColor(0, 225, 0)))
def read_table(self):
board = [] # 读取table中的数据
for i in range(9):
tmp = []
for j in range(9):
tmp.append(int(self.table.item(i, j).text()))
board.append(tmp)
return board
def submit(self):
board = self.read_table()
if self.isvalid(board):
QMessageBox.information(self, "提交结果", "玲兰姐姐太棒了~", QMessageBox.Yes)
else:
QMessageBox.warning(self, "提交结果", "玲兰姐姐再看看~", QMessageBox.Yes)
def next_(self): # 将关卡数字 + 1,然后执行重置模块
board = self.read_table()
if not self.isvalid(board):
QMessageBox.critical(self, "过关失败", "当前结果有问题,无法进入下一关~", QMessageBox.Yes)
else:
with open(file=self.path + r'./ziyuan/problem.txt', mode='r') as fr:
data = list(csv.reader(fr))
data[0][0] = str(int(data[0][0]) + 1)
with open(file=self.path + r'./ziyuan/problem.txt', mode='w', newline='') as fw:
f_csv = csv.writer(fw)
f_csv.writerows(data)
self.reset()
def exit(self):
sys.exit()
if __name__ == '__main__':
app = QApplication(sys.argv)
MainWindow = game() # 创建窗体对象
MainWindow.show() # 显示窗体
sys.exit(app.exec_()) # 程序关闭时退出进程 | true |
09cc0190554f92cd52141c92f8da1008db8b8bfe | Python | open-reblock/parcelization | /boundary_wkt.py | UTF-8 | 1,268 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 1 19:47:05 2018
@author: Annie
"""
file_name = "lagos_test.csv"
import pandas as pd
import_df = pd.read_csv(file_name)
columnhead = ""
if 'verification/A0_Boundary' in import_df:
columnhead = 'verification/A0_Boundary'
elif 'C2a. Settlement Boundary' in import_df:
columnhead = 'C2a. Settlement Boundary'
elif 'section_C/C2_Boundary' in import_df:
columnhead = 'section_C/C2_Boundary'
export_df = import_df[['device_id',columnhead]].copy()
export_df[columnhead] = export_df[columnhead].fillna("no bounds")
export_df
def switchcoords(text):
text= text.split(";")
if len(text) < 3: #invalid number of coords
return text
coords = [] # list of x y coords
for i in text:
line = i.split()
coord = line[1] +" " + line[0]
coords.append(coord)
fcoord = coords[0]
lcoord = coords.append(fcoord)
polygon = "POLYGON((" + ",".join(coords) + "))" #polygon string
return polygon
export_df[columnhead] = export_df[columnhead].apply(switchcoords)
#check output dataframe contains desired fields
print(export_df['device_id'])
print(export_df[columnhead])
export_df.to_csv('lagos_test_wkt.csv')
| true |
366183cf252547033b88b345eb9de6e4d5589c89 | Python | nawaz1774/restaurantmenuapp | /Restaurant.py | UTF-8 | 2,580 | 3.5625 | 4 | [] | no_license | """This module contains the definition of the class Restaurant.
This Module defines the Restaurant object and also contains functions to retrive data related to Restaurant class.
"""
import database_config as dc
class Restaurant():
"""This class defines the Restaurant class.
This Restaurant class is used to define the Restaurant objects and all its attributes.
Attributes:
name: A String which the anme of the Restaurant.
cuisine: A String which decribes the cuise being serverd at the Restaurant.
description: A String which contains a Brief description of the Restaurant.
"""
def __init__(self, name, cuisine, description):
"""Inits Restaurant with name, cuisine, description.
"""
self.name = name
self.cuisine = cuisine
self.description = description
def get_res_name_with_id(res_id):
"""This function returns the name of the Restaurant.
Args:
res_id: An integer which is a restaurant id that needs to be deleted.
Returns:
Returns a string that contain the restaurant name.
"""
params = (res_id,)
q = "select restaurant_id, name, cuisine, description from restaurant where restaurant_id = %s"
result = dc.selectOP(q, params)
res_name = result[0][1]
return res_name
def get_list_of_res(cnt):
"""This function returns a List of Restaurants.
Args:
cnt: Count of Restuarnts that need to be returned.
Returns:
Returns a List of Tuples that contain the restaurant details.
"""
q = "select restaurant_id, name, cuisine, description from restaurant order by restaurant_id limit "+str(cnt)
params = ()
result = dc.selectOP(q, params)
return result
def add_res(res):
"""This function adds a new Restaurant to the database.
Args:
res: A Restaurant object that need to be added to thr database
Returns:
Returns a the operation outcome 1 - Success, 0 - Failure
"""
params = (res.name, res.cuisine, res.description,)
q = "INSERT INTO restaurant (name, cuisine, description) VALUES (%s, %s, %s)"
result = dc.insertOP(q, params)
if result == 1:
return 1
else:
return 0
def delete_res(res_id):
"""This function deletes a Restaurant from the database.
Args:
res_id: An integer which is a restaurant id that needs to be deleted.
Returns:
Returns a the operation outcome 1 - Success, 0 - Failure
"""
params = (res_id,)
q1 = "DELETE FROM menuitem WHERE restaurant_id = %s"
q = "DELETE FROM restaurant WHERE restaurant_id = %s"
result1 = dc.deleteOP(q1, params)
result = dc.deleteOP(q, params)
print(result)
if result == 1 and result1 == 1:
return 1
else:
return 0
| true |
a2794ef9a8224a02bc27c4aa6826c22c07068378 | Python | PhamKhoa96/pandas | /Q8.py | UTF-8 | 705 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 14:38:03 2020
@author: phamk
"""
import numpy as np
import pandas as pd
df = pd.read_excel('Superstore.xls')
#print(df.dtypes)
table_sort = df.sort_values('Order Date')
table_sort['Year'] = table_sort['Order Date'].dt.year
table_sort['Month'] = table_sort['Order Date'].dt.month
#table_sort['Year'] == 2015
print(table_sort)
for y in range (0, 4):
print('In' , (2015+y))
for x in range (0, 4):
df3 = table_sort.loc[((table_sort['Month'] > (0+3*x)) & (table_sort['Month'] < (4+3*x)))
& (table_sort['Year'] == (2015+y))]
data = df3['Profit'].sum()
print ( 'Qtr' , x+1 , ' = ', data) | true |
5775eec5dddaea5228d728cc3d60087552b87bc6 | Python | alvarogatenorio/Machine-Learning | /LeastSquared/generar.py | UTF-8 | 1,406 | 3.078125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
import numpy as np
import matplotlib.pyplot as plt
N = 40
N0, N1, N2, N3 = 10, 10, 10, 10
K = 4
mu0 = np.array([10, 0])
X0 = np.random.randn(2, N0) + mu0[:, np.newaxis]
mu1 = np.array([-10, 10])
X1 = np.random.randn(2, N1) + mu1[:, np.newaxis]
mu2 = np.array([-10, -0])
X2 = np.random.randn(2, N2) + mu2[:, np.newaxis]
mu3 = np.array([10, 10])
X3 = np.random.randn(2, N3) + mu3[:, np.newaxis]
f = open('puntosGenerados.txt', 'a')
def escribePuntos(V,long):
for i in range(0,long):
f.write(str(V[0][i]) + ' ')
f.write(str(V[1][i]))
f.write('\n')
return
#escribimos la cabecera
f.write(str(N))
f.write('\n')
f.write(str(K))
f.write('\n')
f.write(str(N0))
f.write('\n')
f.write(str(N1))
f.write('\n')
f.write(str(N2))
f.write('\n')
f.write(str(N3))
f.write('\n')
#escribimos los putnos con formato
escribePuntos(X0,N0)
escribePuntos(X1,N1)
escribePuntos(X2,N2)
escribePuntos(X3,N3)
#Escribimos su clase
clase1 = np.array([1,0,0,0])
clase2 = np.array([0,1,0,0])
clase3 = np.array([0,0,1,0])
clase4 = np.array([0,0,0,1])
def escribeClase(V,long):
for i in range(0,long):
enStr = ' '.join(map(str, V))
f.write(enStr)
f.write('\n')
return
escribeClase(clase1, N0)
escribeClase(clase2, N1)
escribeClase(clase3, N2)
escribeClase(clase4, N3)
f.close()
| true |
656cc6a7517bbfcdc919eeda50fd3f635c7adc8a | Python | rphly/fastapi-workshop | /routers/items.py | UTF-8 | 839 | 2.515625 | 3 | [] | no_license | from database import db
from fastapi import APIRouter, Request, HTTPException, Depends
from typing import Optional
from models.item import Item
def authenticated(request: Request):
user = request.state.user
if user is None:
raise HTTPException(status_code=401, detail="Unauthorized access")
return user
router = APIRouter(
prefix="/items",
responses={404: {"description": "Not found"}},
dependencies=[Depends(authenticated)],
)
@router.get("")
def get_items():
res = db.child("items").get()
return res.val()
@router.get("/{item_id}")
def get_items(item_id: Optional[int]):
res = db.child("items").order_by_child("id").equal_to(item_id).get()
return res.val()
@router.post("")
async def create(request: Request, item: Item):
item = item.dict()
print(item)
return item
| true |
fe4820ccde19db656e18f230ceebdde5acaefd89 | Python | arhayrap/image_analysis | /main.py | UTF-8 | 1,714 | 2.984375 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from read_images import get_data
from RNN import RNN
def main():
'''
tr_paths = ["./Datasets/Training_Data1.csv",
"./Datasets/Training_Data2.csv",
"./Datasets/Training_Data3.csv"]
ts_paths = ["./Datasets/Testing_Data.csv"]
train_valid_data = pd.DataFrame({"image": [], "label": []})
test_data = pd.DataFrame({"image": []})
for i in tr_paths:
train_valid_data = pd.concat([train_valid_data, pd.read_csv(i)], axis=0, sort=False)
for i in ts_paths:
test_data = pd.concat([test_data, pd.read_csv(i)], axis=0, sort=False)
'''
data = get_data()
train_valid_data = data[0]
test_data = data[1]
print(data[0]["image"].shape, type(data[0]["image"]))
print(data[1].shape, type(data[1]))
# print(pd.DataFrame(data[0]))
print("Data has been collected!")
x_train, x_valid, y_train, y_valid = train_test_split(np.array(train_valid_data["image"]),
np.array(train_valid_data["label"]),
test_size=0.25)
# x_train = x_train.to_numpy()
# x_valid = x_valid.to_numpy()
# y_train = y_train.to_numpy()
# y_valid = y_valid.to_numpy()
print(x_train)
print(x_train.shape, x_train[0].shape)
model = RNN(x_train, y_train, x_valid, y_valid, test_data)
print("Model training process")
results = model.fit_and_test()
print("The results are ready!")
return results
if __name__ == "__main__":
print(main())
| true |
dc0655f46f704aa9d70a4f39dc09fe37d661e5a8 | Python | satot/sort-algorithms | /counting.py | UTF-8 | 1,449 | 3.96875 | 4 | [] | no_license | import random
def counting_sort(array, k):
result_array = [-1 for _ in array]
# Array to count each element
counting_array = [0 for _ in range(k)]
# counting_array[i]: number of elements which is equal to i in the array
# e.g. array [1, 4, 0, 2, 0] => counting_array [2, 1, 1, 0, 1]
for i in range(len(array)):
counting_array[array[i]] += 1
print(counting_array)
# counting_array[i]: number of elements which is less than i in the array
# e.g. array [1, 4, 0, 2, 0]
# => counting_array [2, 1, 1, 0, 1]
# => counting_array [2, 3, 4, 4, 5]
for i in range(1, k):
counting_array[i] += counting_array[i-1]
print(counting_array)
# Stable sort by populating the array from last element
# e.g. array [1, 4, 0, 2, 0], counting_array [2, 3, 4, 4, 5]
# number of 0 in array: 2 => result_array[-1, 0, -1, -1, -1]
# next 0 would be populated before this index, hence this sort would be stable
for i in range(len(array), 0, -1):
cur = array[i-1]
result_array[counting_array[cur]-1] = cur
counting_array[cur] -= 1
print(counting_array, result_array)
return result_array
if __name__ == '__main__':
random.seed(1)
array = [random.randrange(10) for _ in range(20)]
#array = [1,1,1,4,0,2,2,0]
print("before", array)
sorted_array = counting_sort(array, max(array)+1)
print('after', sorted_array)
| true |
9f68bf9a65f68b05147e12c5bd21510d7faf23d1 | Python | Amanda-Dinitto/PHYS512-Homework- | /Homework_1/H1P2-Error.py | UTF-8 | 1,163 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 19:56:06 2020
@author: amanda
"""
import numpy as np
from matplotlib import pyplot as plt
##Same code as before but this time separated the list into odd and even points to calculate error better.
list = np.loadtxt("./lakeshore2.txt", usecols=range(0,2))
#
t = list[:,0]
temp = np.flip(t)
v =list[:,1]
volt = np.flip(v)
V = np.array(volt[::2])
T = np.array(temp[::2])
V2=np.linspace(V[1],V[-2],47)
T_interp=np.zeros(len(V2))
for i in range (len(V2)):
ind=np.max(np.where(V2[i]>=V)[0])
V_good=V[ind-1:ind+3]
T_good=T[ind-1:ind+3]
pars=np.polyfit(V_good, T_good,3)
predicted=np.polyval(pars,V2[i])
T_interp[i]=predicted
plt.plot(V2, T_interp)
plt.plot(V,T, '.')
plt.xlabel('Voltage')
plt.ylabel('Temperature')
plt.savefig("H1P2_Error_Plot_Short.jpg")
plt.show()
#interpolation val for every other value and estimated against
no_interp = np.array(temp[1::2]) #taking the odd values from the lakeshore txt
estimate = (T_interp - no_interp)
trial = np.std(estimate)
print ("The standard deviation between interpolated point and actual value is", trial)
| true |
3a91d7a2beff5999f93aa487b5fe252621852fe4 | Python | Jtrue27/ADL2019-Homeworks | /HW3/agent_dir/agent_ppo.py | UTF-8 | 7,033 | 2.609375 | 3 | [
"MIT"
] | permissive | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from agent_dir.agent import Agent
from environment import Environment
from torch.distributions import Categorical
import matplotlib.pyplot as plt
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class PPO(nn.Module):
def __init__(self, obs_shape, act_shape, hidden_size):
super(PPO, self).__init__()
self.affine = nn.Linear(obs_shape, hidden_size)
self.actor = nn.Sequential(
nn.Linear(obs_shape, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, act_shape),
nn.Softmax(dim=-1),
)
self.critic = nn.Sequential(
nn.Linear(obs_shape, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1),
)
def forward(self, state, action=None, evaluate=False):
raise NotImplementedError
class AgentPPO(Agent):
def __init__(self, env, args):
self.env = env
self.model = PPO(obs_shape = self.env.observation_space.shape[0],
act_shape= self.env.action_space.n,
hidden_size=64).to(device)
self.model_old = PPO(obs_shape = self.env.observation_space.shape[0],
act_shape= self.env.action_space.n,
hidden_size=64).to(device)
if args.test_ppo:
self.load('ppo.cpt')
# discounted reward
self.gamma = 0.99
# i am not sure beta
self.betas = (0.9, 0.999)
self.eps_clip = 0.2
self.K_epochs = 5
self.steps = 0
self.MseLoss = nn.MSELoss()
# optimizer
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=3e-3,betas=self.betas)
# saved rewards and actions
# self.rewards, self.saved_actions = [], []
# self.saved_log_probs=[]
self.actions = []
self.states = []
self.logprobs = []
self.state_values = []
self.rewards = []
def save(self, save_path):
print('save model to', save_path)
torch.save(self.model.state_dict(), save_path)
def load(self, load_path):
print('load model from', load_path)
self.model_old.load_state_dict(torch.load(load_path))
def init_game_setting(self):
self.rewards, self.saved_actions = [], []
def clear_memory(self):
del self.actions[:]
del self.states[:]
del self.logprobs[:]
del self.rewards[:]
def make_action(self, state, action, test=False):
# TODO:
# Use your model to output distribution over actions and sample from it.
# HINT: google torch.distributions.Categorical
if not test:
state = torch.from_numpy(state).float().to(device)
state_value = self.model.critic(state)
action_probs = self.model.actor(state)
action_distribution = Categorical(action_probs)
if not test:
action = action_distribution.sample()
self.actions.append(action)
self.states.append(state)
self.logprobs.append(action_distribution.log_prob(action))
action_logprobs = action_distribution.log_prob(action)
dist_entropy=action_distribution.entropy()
if test:
return action_logprobs,torch.squeeze(state_value),dist_entropy
if not test:
return action.item()
def update(self):
# TODO:
# discount your saved reward
rewards = []
discounted_reward = 0
for reward in reversed(self.rewards):
discounted_reward = reward + (self.gamma * discounted_reward)
rewards.insert(0, discounted_reward)
rewards = torch.tensor(rewards).to(device)
rewards = (rewards - rewards.mean()) / (rewards.std() + 1e-5)
# convert list in tensor
old_states = torch.stack(self.states).to(device).detach()
old_actions = torch.stack(self.actions).to(device).detach()
old_logprobs = torch.stack(self.logprobs).to(device).detach()
for _ in range(self.K_epochs):
# Evaluating old actions and values :
logprobs, state_values, dist_entropy = self.make_action(old_states, old_actions, test=True)
# Finding the ratio (pi_theta / pi_theta__old):
ratios = torch.exp(logprobs - old_logprobs.detach())
# Finding Surrogate Loss:
advantages = rewards - state_values.detach()
surr1 = ratios * advantages
surr2 = torch.clamp(ratios, 1-self.eps_clip, 1+self.eps_clip) * advantages
loss = -torch.min(surr1, surr2) + 0.5*self.MseLoss(state_values, rewards) - 0.01*dist_entropy
# take gradient step
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.clear_memory()
# Copy new weights into old policy:
self.model_old.load_state_dict(self.model.state_dict())
def train(self):
running_reward=0
avg_length=0
n_update = 10
log_interval = 10
num_episodes=5000
rewards=[]
time_step=0
steps=[]
max_timesteps=300
time_step=0
update_timestep = 2000
for epoch in range(num_episodes):
state = self.env.reset()
for t in range(max_timesteps):
time_step+=1
action=self.make_action(state,None,test=False)
state, reward, done, _ = self.env.step(action)
self.rewards.append(reward)
if time_step % update_timestep == 0:
self.update()
time_step = 0
running_reward+=reward
if done:
break
avg_length+=t
if running_reward >50*log_interval:
self.save('ppo.cpt')
print("########## Solved! ##########")
break
if epoch % log_interval == 0:
avg_length = int(avg_length/log_interval)
running_reward = int((running_reward/log_interval))
rewards.append(running_reward)
print('Episode {} \t avg length: {} \t reward: {}'.format(epoch, avg_length, running_reward))
running_reward = 0
avg_length = 0
plt.plot(rewards)
plt.ylabel('Moving average reward')
plt.xlabel('Step')
plt.savefig('./ppo50')
| true |
a1aa01acef1dae7a37bd8fbabc9ee3dfd8af5f02 | Python | zouzhuwen/PycharmProject | /mooc_selenium/case/keyword_case.py | UTF-8 | 3,090 | 2.8125 | 3 | [] | no_license | #coding=utf-8
from util.excel_util import ExcelUtil
from keywordselenium.actionMethod import ActionMethod
class KeyWordCase():
def run_main(self):
self.action_method = ActionMethod()
handle_excel = ExcelUtil("D:\PycharmProject\mooc_selenium\config\keyword.xls")
case_lines = handle_excel.get_lines()
if case_lines:
for i in range(1,case_lines):
is_run = handle_excel.get_col_data(i,3)
print(is_run)
if is_run == 'yes': #是否执行
method = handle_excel.get_col_data(i,4)
send_value = handle_excel.get_col_data(i, 5)
handle_value = handle_excel.get_col_data(i, 6)
# ''而不是为None
# if send_value
self.run_method(method, send_value, handle_value)
except_result_method = handle_excel.get_col_data(i,7)
except_result = handle_excel.get_col_data(i,8)
if except_result != '':
print("********"+except_result)
except_value = self.get_except_result_value(except_result)
if except_value[0] =="text":
result = self.run_method(except_result_method)
print("###########" + result)
if except_value[1] in result:
handle_excel.writer_value(i,9,'pass')
else:
handle_excel.writer_value(i,9,'fail')
elif except_value[0] == "element":
result = self.run_method(except_result_method,except_value[1])
print(result)
if result:
handle_excel.writer_value(i,9,'pass')
else:
handle_excel.writer_value(i,9,'fail')
else:
print("没有else")
else:
print("没有预期结果值")
#拿到行数
#循环执行每一行
#if 是否需要执行
#拿到执行方法
#拿到操作值
#if 是否有输入数据
def get_except_result_value(self,data):
return data.split('=')
def run_method(self,method,send_value='',handle_value=''):
method_value = getattr(self.action_method,method)
# print(method)
# print(send_value+"---------->"+handle_value)
if send_value == "" and handle_value != "":
result = method_value(handle_value)
elif send_value == "" and handle_value == "":
result = method_value()
elif send_value != "" and handle_value == "":
result = method_value(send_value)
else:
result = method_value(handle_value,send_value)
return result
if __name__ == '__main__':
KeyWordCase().run_main()
| true |
0c531b77c832f509dc2988fda84cf328b72a106f | Python | londonhackspace/irccat-commands | /sugarwater.py | UTF-8 | 182 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python3
from sys import argv
if argv[-1] == 'sugarwater': print ("you take a swig of refreshing sugarwater")
print("you sustain %s with nourishing sugarwater" % argv[5])
| true |
685caa0d624933d2a52c903532e4dd9b5bfd5cb0 | Python | GB255/code-tests | /codility/Python/MissingInteger_100.py | UTF-8 | 266 | 2.765625 | 3 | [] | no_license | # you can write to stdout for debugging purposes, e.g.
# print("this is a debug message")
def solution(A):
B=[0]*(max(A)+1)
for x in A:
if x>0:
B[x-1]=x
if len(B)<1:
B.append(0)
res = B.index(0)+1
return res
pass
| true |
050db2fd3b303f6d9ea9ab7dc9d417fb2df8b943 | Python | vperic/sympy | /sympy/stats/crv_types.py | UTF-8 | 43,509 | 3.296875 | 3 | [
"BSD-3-Clause"
] | permissive | """
Continuous Random Variables - Prebuilt variables
Contains
========
Arcsin
Benini
Beta
BetaPrime
Cauchy
Chi
Dagum
Exponential
Gamma
Laplace
Logistic
LogNormal
Maxwell
Nakagami
Normal
Pareto
Rayleigh
StudentT
Triangular
Uniform
UniformSum
Weibull
WignerSemicircle
"""
from sympy import (exp, log, sqrt, pi, S, Dummy, Interval, S, sympify, gamma,
Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs,
Symbol, log)
from sympy import beta as beta_fn
from crv import SingleContinuousPSpace
from sympy.core.decorators import _sympifyit
import random
oo = S.Infinity
__all__ = ['ContinuousRV',
'Arcsin',
'Benini',
'Beta',
'BetaPrime',
'Cauchy',
'Chi',
'Dagum',
'Exponential',
'Gamma',
'Laplace',
'Logistic',
'LogNormal',
'Maxwell',
'Nakagami',
'Normal',
'Pareto',
'Rayleigh',
'StudentT',
'Triangular',
'Uniform',
'UniformSum',
'Weibull',
'WignerSemicircle'
]
def _value_check(condition, message):
"""
Check a condition on input value.
Raises ValueError with message if condition is not True
"""
if condition is not True:
raise ValueError(message)
def ContinuousRV(symbol, density, set=Interval(-oo,oo)):
"""
Create a Continuous Random Variable given the following:
-- a symbol
-- a probability density function
-- set on which the pdf is valid (defaults to entire real line)
Returns a RandomSymbol.
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
return SingleContinuousPSpace(symbol, density, set).value
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
class ArcsinPSpace(SingleContinuousPSpace):
def __new__(cls, name, a, b):
a, b = sympify(a), sympify(b)
x = Symbol(name)
pdf = 1/(pi*sqrt((x-a)*(b-x)))
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(a, b))
return obj
def Arcsin(name, a=0, b=1):
r"""
Create a Continuous Random Variable with an arcsin distribution.
The density of the arcsin distribution is given by
.. math::
f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}}
with :math:`x \in [a,b]`. It must hold that :math:`-\infty < a < b < \infty`.
Parameters
==========
a : Real number, the left interval boundary
b : Real number, the right interval boundary
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Arcsin, density
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> X = Arcsin("x", a, b)
>>> density(X)
Lambda(_x, 1/(pi*sqrt((-_x + b)*(_x - a))))
References
==========
[1] http://en.wikipedia.org/wiki/Arcsine_distribution
"""
return ArcsinPSpace(name, a, b).value
#-------------------------------------------------------------------------------
# Benini distribution ----------------------------------------------------------
class BeniniPSpace(SingleContinuousPSpace):
def __new__(cls, name, alpha, beta, sigma):
alpha, beta, sigma = sympify(alpha), sympify(beta), sympify(sigma)
x = Symbol(name)
pdf = (exp(-alpha*log(x/sigma)-beta*log(x/sigma)**2)
*(alpha/x+2*beta*log(x/sigma)/x))
obj = SingleContinuousPSpace.__new__(cls, x, pdf,
set = Interval(sigma, oo))
return obj
def Benini(name, alpha, beta, sigma):
r"""
Create a Continuous Random Variable with a Benini distribution.
The density of the Benini distribution is given by
.. math::
f(x) := e^{-\alpha\log{\frac{x}{\sigma}}
-\beta\log\left[{\frac{x}{\sigma}}\right]^2}
\left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right)
Parameters
==========
alpha : Real number, `alpha` > 0 a shape
beta : Real number, `beta` > 0 a shape
sigma : Real number, `sigma` > 0 a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Benini, density
>>> from sympy import Symbol, simplify, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> sigma = Symbol("sigma", positive=True)
>>> X = Benini("x", alpha, beta, sigma)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ 2 \
| / / x \\ / x \ / x \|
| | 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----||
| |alpha \sigma/| \sigma/ \sigma/|
Lambda|x, |----- + -----------------|*e |
\ \ x x / /
References
==========
[1] http://en.wikipedia.org/wiki/Benini_distribution
"""
return BeniniPSpace(name, alpha, beta, sigma).value
#-------------------------------------------------------------------------------
# Beta distribution ------------------------------------------------------------
class BetaPSpace(SingleContinuousPSpace):
def __new__(cls, name, alpha, beta):
alpha, beta = sympify(alpha), sympify(beta)
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
x = Symbol(name)
pdf = x**(alpha-1) * (1-x)**(beta-1) / beta_fn(alpha, beta)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(0, 1))
obj.alpha = alpha
obj.beta = beta
return obj
def sample(self):
return {self.value: random.betavariate(self.alpha, self.beta)}
def Beta(name, alpha, beta):
r"""
Create a Continuous Random Variable with a Beta distribution.
The density of the Beta distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `alpha` > 0 a shape
beta : Real number, `beta` > 0 a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Beta, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> X = Beta("x", alpha, beta)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ alpha - 1 beta - 1 \
| x *(-x + 1) *gamma(alpha + beta)|
Lambda|x, -----------------------------------------------|
\ gamma(alpha)*gamma(beta) /
>>> simplify(E(X, meijerg=True))
alpha/(alpha + beta)
>>> simplify(variance(X, meijerg=True))
alpha*beta/((alpha + beta)**2*(alpha + beta + 1))
References
==========
[1] http://en.wikipedia.org/wiki/Beta_distribution
[2] http://mathworld.wolfram.com/BetaDistribution.html
"""
return BetaPSpace(name, alpha, beta).value
#-------------------------------------------------------------------------------
# Beta prime distribution ------------------------------------------------------
class BetaPrimePSpace(SingleContinuousPSpace):
def __new__(cls, name, alpha, beta):
alpha, beta = sympify(alpha), sympify(beta)
x = Symbol(name)
pdf = x**(alpha-1)*(1+x)**(-alpha-beta)/beta_fn(alpha, beta)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set = Interval(0, oo))
return obj
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `alpha` > 0 a shape
beta : Real number, `beta` > 0 a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ alpha - 1 -alpha - beta \
| x *(x + 1) *gamma(alpha + beta)|
Lambda|x, ---------------------------------------------------|
\ gamma(alpha)*gamma(beta) /
References
==========
[1] http://en.wikipedia.org/wiki/Beta_prime_distribution
[2] http://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return BetaPrimePSpace(name, alpha, beta).value
#-------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
class CauchyPSpace(SingleContinuousPSpace):
def __new__(cls, name, x0, gamma):
x0, gamma = sympify(x0), sympify(gamma)
x = Symbol(name)
pdf = 1/(pi*gamma*(1+((x-x0)/gamma)**2))
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
return obj
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi} \arctan\left(\frac{x-x_0}{\gamma}\right)
+\frac{1}{2}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `gamma` > 0 the scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> X = Cauchy("x", x0, gamma)
>>> density(X)
Lambda(_x, 1/(pi*gamma*(1 + (_x - x0)**2/gamma**2)))
References
==========
[1] http://en.wikipedia.org/wiki/Cauchy_distribution
[2] http://mathworld.wolfram.com/CauchyDistribution.html
"""
return CauchyPSpace(name, x0, gamma).value
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
class ChiPSpace(SingleContinuousPSpace):
def __new__(cls, name, k):
k = sympify(k)
x = Symbol(name)
pdf = 2**(1-k/2)*x**(k-1)*exp(-x**2/2)/gamma(k/2)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set = Interval(0, oo))
return obj
def Chi(name, k):
r"""
Create a continuous random variable with a Chi distribution.
The density of the Chi distribution is given by
.. math::
f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}
with :math:`x \geq 0`.
Parameters
==========
k : Integer, `k` > 0 the number of degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Chi, density, E, std
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> X = Chi("x", k)
>>> density(X)
Lambda(_x, 2**(-k/2 + 1)*_x**(k - 1)*exp(-_x**2/2)/gamma(k/2))
References
==========
[1] http://en.wikipedia.org/wiki/Chi_distribution
[2] http://mathworld.wolfram.com/ChiDistribution.html
"""
return ChiPSpace(name, k).value
#-------------------------------------------------------------------------------
# Dagum distribution -----------------------------------------------------------
class DagumPSpace(SingleContinuousPSpace):
def __new__(cls, name, p, a, b):
p, a, b = sympify(p), sympify(a), sympify(b)
x = Symbol(name)
pdf = a*p/x*((x/b)**(a*p)/(((x/b)**a+1)**(p+1)))
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
return obj
def Dagum(name, p, a, b):
r"""
Create a continuous random variable with a Dagum distribution.
The density of the Dagum distribution is given by
.. math::
f(x) := \frac{a p}{x} \left( \frac{(\tfrac{x}{b})^{a p}}
{\left((\tfrac{x}{b})^a + 1 \right)^{p+1}} \right)
with :math:`x > 0`.
Parameters
==========
p : Real number, `p` > 0 a shape
a : Real number, `a` > 0 a shape
b : Real number, `b` > 0 a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Dagum, density
>>> from sympy import Symbol, simplify
>>> p = Symbol("p", positive=True)
>>> b = Symbol("b", positive=True)
>>> a = Symbol("a", positive=True)
>>> X = Dagum("x", p, a, b)
>>> density(X)
Lambda(_x, a*p*(_x/b)**(a*p)*((_x/b)**a + 1)**(-p - 1)/_x)
References
==========
[1] http://en.wikipedia.org/wiki/Dagum_distribution
"""
return DagumPSpace(name, p, a, b).value
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
class ExponentialPSpace(SingleContinuousPSpace):
def __new__(cls, name, rate):
rate = sympify(rate)
_value_check(rate > 0, "Rate must be positive.")
x = Symbol(name)
pdf = rate * exp(-rate*x)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(0, oo))
obj.rate = rate
return obj
def sample(self):
return {self.value: random.expovariate(self.rate)}
def Exponential(name, rate):
r"""
Create a continuous random variable with an Exponential distribution.
The density of the exponential distribution is given by
.. math::
f(x) := \lambda \exp(-\lambda x)
with :math:`x > 0`.
Parameters
==========
rate : Real number, `rate` > 0 the rate or inverse scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Exponential, density, cdf, E
>>> from sympy.stats import variance, std, skewness
>>> from sympy import Symbol
>>> l = Symbol("lambda", positive=True)
>>> X = Exponential("x", l)
>>> density(X)
Lambda(_x, lambda*exp(-_x*lambda))
>>> cdf(X)
Lambda(_z, Piecewise((0, _z < 0), (1 - exp(-_z*lambda), True)))
>>> E(X)
1/lambda
>>> variance(X)
lambda**(-2)
>>> skewness(X)
2
>>> X = Exponential('x', 10)
>>> density(X)
Lambda(_x, 10*exp(-10*_x))
>>> E(X)
1/10
>>> std(X)
1/10
References
==========
[1] http://en.wikipedia.org/wiki/Exponential_distribution
[2] http://mathworld.wolfram.com/ExponentialDistribution.html
"""
return ExponentialPSpace(name, rate).value
#-------------------------------------------------------------------------------
# Gamma distribution -----------------------------------------------------------
class GammaPSpace(SingleContinuousPSpace):
def __new__(cls, name, k, theta):
k, theta = sympify(k), sympify(theta)
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
x = Symbol(name)
pdf = x**(k-1) * exp(-x/theta) / (gamma(k)*theta**k)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(0, oo))
obj.k = k
obj.theta = theta
return obj
def sample(self):
return {self.value: random.gammavariate(self.k, self.theta)}
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k` > 0 a shape
theta : Real number, `theta` > 0 a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> X = Gamma("x", k, theta)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ -x \
| -----|
| k - 1 -k theta|
| x *theta *e |
Lambda|x, ---------------------|
\ gamma(k) /
>>> C = cdf(X, meijerg=True)
>>> pprint(C, use_unicode=False)
Lambda/z, / 0 for z < 0\
| | |
| | / z \ |
| < k*lowergamma|k, -----| |
| | k*lowergamma(k, 0) \ theta/ |
| |- ------------------ + ---------------------- otherwise|
\ \ gamma(k + 1) gamma(k + 1) /
>>> E(X)
theta*gamma(k + 1)/gamma(k)
>>> V = variance(X)
>>> pprint(V, use_unicode=False)
2 2 -k k + 1
theta *gamma (k + 1) theta*theta *theta *gamma(k + 2)
- -------------------- + -------------------------------------
2 gamma(k)
gamma (k)
References
==========
[1] http://en.wikipedia.org/wiki/Gamma_distribution
[2] http://mathworld.wolfram.com/GammaDistribution.html
"""
return GammaPSpace(name, k, theta).value
#-------------------------------------------------------------------------------
# Laplace distribution ---------------------------------------------------------
class LaplacePSpace(SingleContinuousPSpace):
def __new__(cls, name, mu, b):
mu, b = sympify(mu), sympify(b)
x = Symbol(name)
pdf = 1/(2*b)*exp(-Abs(x-mu)/b)
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
return obj
def Laplace(name, mu, b):
r"""
Create a continuous random variable with a Laplace distribution.
The density of the Laplace distribution is given by
.. math::
f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right)
Parameters
==========
mu : Real number, the location
b : Real number, `b` > 0 a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Laplace, density
>>> from sympy import Symbol
>>> mu = Symbol("mu")
>>> b = Symbol("b", positive=True)
>>> X = Laplace("x", mu, b)
>>> density(X)
Lambda(_x, exp(-Abs(_x - mu)/b)/(2*b))
References
==========
[1] http://en.wikipedia.org/wiki/Laplace_distribution
[2] http://mathworld.wolfram.com/LaplaceDistribution.html
"""
return LaplacePSpace(name, mu, b).value
#-------------------------------------------------------------------------------
# Logistic distribution --------------------------------------------------------
class LogisticPSpace(SingleContinuousPSpace):
def __new__(cls, name, mu, s):
mu, s = sympify(mu), sympify(s)
x = Symbol(name)
pdf = exp(-(x-mu)/s)/(s*(1+exp(-(x-mu)/s))**2)
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
return obj
def Logistic(name, mu, s):
r"""
Create a continuous random variable with a logistic distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2}
Parameters
==========
mu : Real number, the location
s : Real number, `s` > 0 a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Logistic, density
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> X = Logistic("x", mu, s)
>>> density(X)
Lambda(_x, exp((-_x + mu)/s)/(s*(exp((-_x + mu)/s) + 1)**2))
References
==========
[1] http://en.wikipedia.org/wiki/Logistic_distribution
[2] http://mathworld.wolfram.com/LogisticDistribution.html
"""
return LogisticPSpace(name, mu, s).value
#-------------------------------------------------------------------------------
# Log Normal distribution ------------------------------------------------------
class LogNormalPSpace(SingleContinuousPSpace):
def __new__(cls, name, mean, std):
mean, std = sympify(mean), sympify(std)
x = Symbol(name)
pdf = exp(-(log(x)-mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(0, oo))
obj.mean = mean
obj.std = std
return obj
def sample(self):
return {self.value: random.lognormvariate(self.mean, self.std)}
def LogNormal(name, mean, std):
r"""
Create a continuous random variable with a log-normal distribution.
The density of the log-normal distribution is given by
.. math::
f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}}
e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}}
with :math:`x \geq 0`.
Parameters
==========
mu : Real number, the log-scale
sigma : Real number, :math:`\sigma^2 > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import LogNormal, density
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True)
>>> X = LogNormal("x", mu, sigma)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ 2\
| -(-mu + log(x)) |
| ----------------|
| 2 |
| ___ 2*sigma |
| \/ 2 *e |
Lambda|x, -----------------------|
| ____ |
\ 2*x*\/ pi *sigma /
>>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
>>> density(X)
Lambda(_x, sqrt(2)*exp(-log(_x)**2/2)/(2*_x*sqrt(pi)))
References
==========
[1] http://en.wikipedia.org/wiki/Lognormal
[2] http://mathworld.wolfram.com/LogNormalDistribution.html
"""
return LogNormalPSpace(name, mean, std).value
#-------------------------------------------------------------------------------
# Maxwell distribution ---------------------------------------------------------
class MaxwellPSpace(SingleContinuousPSpace):
def __new__(cls, name, a):
a = sympify(a)
x = Symbol(name)
pdf = sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set = Interval(0, oo))
return obj
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
Parameters
==========
a : Real number, `a` > 0
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> X = Maxwell("x", a)
>>> density(X)
Lambda(_x, sqrt(2)*_x**2*exp(-_x**2/(2*a**2))/(sqrt(pi)*a**3))
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
[1] http://en.wikipedia.org/wiki/Maxwell_distribution
[2] http://mathworld.wolfram.com/MaxwellDistribution.html
"""
return MaxwellPSpace(name, a).value
#-------------------------------------------------------------------------------
# Nakagami distribution --------------------------------------------------------
class NakagamiPSpace(SingleContinuousPSpace):
def __new__(cls, name, mu, omega):
mu, omega = sympify(mu), sympify(omega)
x = Symbol(name)
pdf = 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu-1)*exp(-mu/omega*x**2)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set = Interval(0, oo))
return obj
def Nakagami(name, mu, omega):
r"""
Create a continuous random variable with a Nakagami distribution.
The density of the Nakagami distribution is given by
.. math::
f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1}
\exp\left(-\frac{\mu}{\omega}x^2 \right)
with :math:`x > 0`.
Parameters
==========
mu : Real number, :math:`mu \geq \frac{1}{2}` a shape
omega : Real number, `omega` > 0 the spread
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Nakagami, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", positive=True)
>>> omega = Symbol("omega", positive=True)
>>> X = Nakagami("x", mu, omega)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ 2 \
| -x *mu|
| ------|
| 2*mu - 1 mu -mu omega |
| 2*x *mu *omega *e |
Lambda|x, ---------------------------------|
\ gamma(mu) /
>>> simplify(E(X, meijerg=True))
sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1)
>>> V = simplify(variance(X, meijerg=True))
>>> pprint(V, use_unicode=False)
/ 2 \
omega*\gamma(mu)*gamma(mu + 1) - gamma (mu + 1/2)/
--------------------------------------------------
gamma(mu)*gamma(mu + 1)
References
==========
[1] http://en.wikipedia.org/wiki/Nakagami_distribution
"""
return NakagamiPSpace(name, mu, omega).value
#-------------------------------------------------------------------------------
# Normal distribution ----------------------------------------------------------
class NormalPSpace(SingleContinuousPSpace):
def __new__(cls, name, mean, std):
mean, std = sympify(mean), sympify(std)
_value_check(std > 0, "Standard deviation must be positive")
x = Symbol(name)
pdf = exp(-(x-mean)**2 / (2*std**2)) / (sqrt(2*pi)*std)
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
obj.mean = mean
obj.std = std
obj.variance = std**2
return obj
def sample(self):
return {self.value: random.normalvariate(self.mean, self.std)}
def Normal(name, mean, std):
r"""
Create a continuous random variable with a Normal distribution.
The density of the Normal distribution is given by
.. math::
f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
Parameters
==========
mu : Real number, the mean
sigma : Real number, :math:`\sigma^2 > 0` the variance
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Normal, density, E, std, cdf, skewness
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu")
>>> sigma = Symbol("sigma", positive=True)
>>> X = Normal("x", mu, sigma)
>>> density(X)
Lambda(_x, sqrt(2)*exp(-(_x - mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma))
>>> C = simplify(cdf(X))
>>> pprint(C, use_unicode=False)
/ / ___ \ \
| |\/ 2 *(z - mu)| |
| erf|--------------| |
| \ 2*sigma / 1|
Lambda|z, ------------------- + -|
\ 2 2/
>>> simplify(skewness(X))
0
>>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1
>>> density(X)
Lambda(_x, sqrt(2)*exp(-_x**2/2)/(2*sqrt(pi)))
>>> E(2*X + 1)
1
>>> simplify(std(2*X + 1))
2
References
==========
[1] http://en.wikipedia.org/wiki/Normal_distribution
[2] http://mathworld.wolfram.com/NormalDistributionFunction.html
"""
return NormalPSpace(name, mean, std).value
#-------------------------------------------------------------------------------
# Pareto distribution ----------------------------------------------------------
class ParetoPSpace(SingleContinuousPSpace):
def __new__(cls, name, xm, alpha):
xm, alpha = sympify(xm), sympify(alpha)
_value_check(xm > 0, "Xm must be positive")
_value_check(alpha > 0, "Alpha must be positive")
x = Symbol(name)
pdf = alpha * xm**alpha / x**(alpha+1)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(xm, oo))
obj.xm = xm
obj.alpha = alpha
return obj
def sample(self):
return {self.value: random.paretovariate(self.alpha)}
def Pareto(name, xm, alpha):
r"""
Create a continuous random variable with the Pareto distribution.
The density of the Pareto distribution is given by
.. math::
f(x) := \frac{\alpha\,x_\mathrm{m}^\alpha}{x^{\alpha+1}}
with :math:`x \in [x_m,\infty]`.
Parameters
==========
xm : Real number, `xm` > 0 a scale
alpha : Real number, `alpha` > 0 a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Pareto, density
>>> from sympy import Symbol
>>> xm = Symbol("xm", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> X = Pareto("x", xm, beta)
>>> density(X)
Lambda(_x, _x**(-beta - 1)*beta*xm**beta)
References
==========
[1] http://en.wikipedia.org/wiki/Pareto_distribution
[2] http://mathworld.wolfram.com/ParetoDistribution.html
"""
return ParetoPSpace(name, xm, alpha).value
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
class RayleighPSpace(SingleContinuousPSpace):
def __new__(cls, name, sigma):
sigma = sympify(sigma)
x = Symbol(name)
pdf = x/sigma**2*exp(-x**2/(2*sigma**2))
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set = Interval(0, oo))
return obj
def Rayleigh(name, sigma):
r"""
Create a continuous random variable with a Rayleigh distribution.
The density of the Rayleigh distribution is given by
.. math ::
f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2}
with :math:`x > 0`.
Parameters
==========
sigma : Real number, `sigma` > 0
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Rayleigh, density, E, variance
>>> from sympy import Symbol, simplify
>>> sigma = Symbol("sigma", positive=True)
>>> X = Rayleigh("x", sigma)
>>> density(X)
Lambda(_x, _x*exp(-_x**2/(2*sigma**2))/sigma**2)
>>> E(X)
sqrt(2)*sqrt(pi)*sigma/2
>>> variance(X)
-pi*sigma**2/2 + 2*sigma**2
References
==========
[1] http://en.wikipedia.org/wiki/Rayleigh_distribution
[2] http://mathworld.wolfram.com/RayleighDistribution.html
"""
return RayleighPSpace(name, sigma).value
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
class StudentTPSpace(SingleContinuousPSpace):
def __new__(cls, name, nu):
nu = sympify(nu)
x = Symbol(name)
pdf = 1/(sqrt(nu)*beta_fn(S(1)/2,nu/2))*(1+x**2/nu)**(-(nu+1)/2)
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
return obj
def StudentT(name, nu):
r"""
Create a continuous random variable with a student's t distribution.
The density of the student's t distribution is given by
.. math::
f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)}
{\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)}
\left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
Parameters
==========
nu : Real number, `nu` > 0, the degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import StudentT, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> nu = Symbol("nu", positive=True)
>>> X = StudentT("x", nu)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ nu 1 \
| - -- - - |
| 2 2 |
| / 2 \ |
| |x | /nu 1\|
| |-- + 1| *gamma|-- + -||
| \nu / \2 2/|
Lambda|x, ------------------------------|
| ____ ____ /nu\ |
| \/ pi *\/ nu *gamma|--| |
\ \2 / /
References
==========
[1] http://en.wikipedia.org/wiki/Student_t-distribution
[2] http://mathworld.wolfram.com/Studentst-Distribution.html
"""
return StudentTPSpace(name, nu).value
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
class TriangularPSpace(SingleContinuousPSpace):
def __new__(cls, name, a, b, c):
a, b, c = sympify(a), sympify(b), sympify(c)
x = Symbol(name)
pdf = Piecewise(
(2*(x-a)/((b-a)*(c-a)), And(a<=x, x<c)),
(2/(b-a), Eq(x,c)),
(2*(b-x)/((b-a)*(b-c)), And(c<x, x<=b)),
(S.Zero, True))
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
return obj
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Triangular, density, E
>>> from sympy import Symbol
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> X = Triangular("x", a,b,c)
>>> density(X)
Lambda(_x, Piecewise(((2*_x - 2*a)/((-a + b)*(-a + c)),
And(_x < c, a <= _x)),
(2/(-a + b), _x == c),
((-2*_x + 2*b)/((-a + b)*(b - c)),
And(_x <= b, c < _x)),
(0, True)))
References
==========
[1] http://en.wikipedia.org/wiki/Triangular_distribution
[2] http://mathworld.wolfram.com/TriangularDistribution.html
"""
return TriangularPSpace(name, a, b, c).value
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
class UniformPSpace(SingleContinuousPSpace):
def __new__(cls, name, left, right):
left, right = sympify(left), sympify(right)
x = Symbol(name)
pdf = Piecewise(
(S.Zero, x<left),
(S.Zero, x>right),
(S.One/(right-left), True))
obj = SingleContinuousPSpace.__new__(cls, x, pdf)
obj.left = left
obj.right = right
return obj
def sample(self):
return {self.value: random.uniform(self.left, self.right)}
def Uniform(name, left, right):
r"""
Create a continuous random variable with a uniform distribution.
The density of the uniform distribution is given by
.. math::
f(x) := \begin{cases}
\frac{1}{b - a} & \text{for } x \in [a,b] \\
0 & \text{otherwise}
\end{cases}
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Uniform, density, cdf, E, variance, skewness
>>> from sympy import Symbol, simplify
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> X = Uniform("x", a, b)
>>> density(X)
Lambda(_x, Piecewise((0, _x < a), (0, _x > b), (1/(-a + b), True)))
>>> cdf(X)
Lambda(_z, _z/(-a + b) - a/(-a + b))
>>> simplify(E(X))
a/2 + b/2
>>> simplify(variance(X))
a**2/12 - a*b/6 + b**2/12
>>> simplify(skewness(X))
0
References
==========
[1] http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
[2] http://mathworld.wolfram.com/UniformDistribution.html
"""
return UniformPSpace(name, left, right).value
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
class UniformSumPSpace(SingleContinuousPSpace):
def __new__(cls, name, n):
n = sympify(n)
x = Symbol(name)
k = Dummy("k")
pdf =1/factorial(n-1)*Sum((-1)**k*binomial(n,k)*(x-k)**(n-1), (k,0,floor(x)))
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(0,n))
return obj
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
The probability distribution function depends on a single parameter
`n` which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\lfloor x\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : Integral number, `n` > 0
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import UniformSum, density
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> X = UniformSum("x", n)
>>> D = density(X)
>>> pprint(D, use_unicode=False)
/ floor(x) \
| ___ |
| \ ` |
| \ k n - 1 /n\|
| ) (-1) *(-k + x) *| ||
| / \k/|
| /__, |
| k = 0 |
Lambda|x, --------------------------------|
\ (n - 1)! /
References
==========
[1] http://en.wikipedia.org/wiki/Uniform_sum_distribution
[2] http://mathworld.wolfram.com/UniformSumDistribution.html
"""
return UniformSumPSpace(name, n).value
#-------------------------------------------------------------------------------
# Weibull distribution ---------------------------------------------------------
class WeibullPSpace(SingleContinuousPSpace):
def __new__(cls, name, alpha, beta):
alpha, beta = sympify(alpha), sympify(beta)
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
x = Symbol(name)
pdf = beta * (x/alpha)**(beta-1) * exp(-(x/alpha)**beta) / alpha
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set=Interval(0, oo))
obj.alpha = alpha
obj.beta = beta
return obj
def sample(self):
return {self.value: random.weibullvariate(self.alpha, self.beta)}
def Weibull(name, alpha, beta):
r"""
Create a continuous random variable with a Weibull distribution.
The density of the Weibull distribution is given by
.. math::
f(x) := \begin{cases}
\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1}
e^{-(x/\lambda)^{k}} & x\geq0\\
0 & x<0
\end{cases}
Parameters
==========
lambda : Real number, :math:`\lambda > 0` a scale
k : Real number, `k` > 0 a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Weibull, density, E, variance
>>> from sympy import Symbol, simplify
>>> l = Symbol("lambda", positive=True)
>>> k = Symbol("k", positive=True)
>>> X = Weibull("x", l, k)
>>> density(X)
Lambda(_x, k*(_x/lambda)**(k - 1)*exp(-(_x/lambda)**k)/lambda)
>>> simplify(E(X))
lambda*gamma(1 + 1/k)
>>> simplify(variance(X))
lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k))
References
==========
[1] http://en.wikipedia.org/wiki/Weibull_distribution
[2] http://mathworld.wolfram.com/WeibullDistribution.html
"""
return WeibullPSpace(name, alpha, beta).value
#-------------------------------------------------------------------------------
# Wigner semicircle distribution -----------------------------------------------
class WignerSemicirclePSpace(SingleContinuousPSpace):
def __new__(cls, name, R):
R = sympify(R)
x = Symbol(name)
pdf = 2/(pi*R**2)*sqrt(R**2-x**2)
obj = SingleContinuousPSpace.__new__(cls, x, pdf, set = Interval(-R, R))
return obj
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R` > 0 the radius
Returns
=======
A `RandomSymbol`.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol, simplify
>>> R = Symbol("R", positive=True)
>>> X = WignerSemicircle("x", R)
>>> density(X)
Lambda(_x, 2*sqrt(-_x**2 + R**2)/(pi*R**2))
>>> E(X)
0
References
==========
[1] http://en.wikipedia.org/wiki/Wigner_semicircle_distribution
[2] http://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return WignerSemicirclePSpace(name, R).value
| true |
6cda42f9c8a89c338a7390515b034531be02e908 | Python | kikofernandez/match-terminal | /match/backend/match.py | UTF-8 | 369 | 2.828125 | 3 | [] | no_license | from db import Database
class Matcher(object):
def __init__(self, strategy):
self._db = Database()
self._strategy = strategy
def select_match(self, request):
selected_user = self._strategy.select_match(self._db, request)
selected_user.pending.append(request) # append item to list, non-atomic op
return selected_user
| true |
60b8f00d36f9529ba96c265d49b2643772f49047 | Python | world9781/Sinamics_Testing_interface | /urwid_dummy.py | UTF-8 | 4,291 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import urwid
import logging
# import pydevd
# pydevd.settrace('localhost', port=8000, stdoutToServer=True, stderrToServer=True)
class urwidHandler(logging.Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a urwid section.
"""
_urwid_log = []
def __init__(self):
logging.Handler.__init__(self)
self._urwid_log = urwid.Text('')
def emit(self, record):
"""
Update message to urwid logger field.
"""
msg = self.format(record)
self._urwid_log.set_text(msg)
def get_log(self):
return self._urwid_log
main_choices = ['Toggle ON/OFF', 'Set Speed', 'Change V/F']
def menu(title, choices):
body = [urwid.Text(title), urwid.Divider()]
for c in choices:
button = urwid.Button(c)
urwid.connect_signal(button, 'click', item_chosen, c)
body.append(urwid.AttrMap(button, None, focus_map='reversed'))
# append quit option
quit_button = urwid.Button('Quit')
urwid.connect_signal(quit_button, 'click', exit_program)
body.append(urwid.AttrMap(quit_button, None, focus_map='reversed'))
return urwid.ListBox(urwid.SimpleFocusListWalker(body))
def return_main(button):
main_menu.original_widget = urwid.Padding(menu_render)
def set_seed(button, response):
try:
velocity = int(response.edit_text)
body_speed.set_text('{0:+05d} RPM'.format(velocity))
except ValueError:
logging.info("Velocity value must be an integer")
finally:
main_menu.original_widget = urwid.Padding(menu_render)
def item_chosen(button, choice):
if choice == 'Toggle ON/OFF':
response = urwid.Text([u'You chose ', choice, u'\n'])
done = urwid.Button(u'Ok')
urwid.connect_signal(done, 'click', return_main)
main_menu.original_widget = urwid.Filler(
urwid.Pile([response, urwid.AttrMap(done, None, focus_map='reversed')]))
elif choice == 'Set Speed':
response = urwid.Edit(caption='Enter RPMs\n', edit_text='0')
done = urwid.Button(u'Ok')
urwid.connect_signal(done, 'click', set_seed, response)
main_menu.original_widget = urwid.Filler(
urwid.Pile([response, urwid.AttrMap(done, None, focus_map='reversed')]))
def exit_program(button):
raise urwid.ExitMainLoop()
def quit_on_q(key):
if key == 'q':
raise urwid.ExitMainLoop
def trigger_log(loop=None, data=None):
logging.info("here is some text without meaning!")
return
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='[%(asctime)s] [%(name)-12s] [%(levelname)-8s] %(message)s',
datefmt='%m-%d %H:%M',
filename='mylog.log',
filemode='w')
# create handler for logger
formatter = logging.Formatter('%(name)-20s: %(levelname)-8s %(message)s')
root_logger = logging.getLogger('')
body_logger = urwidHandler()
body_logger.setLevel(logging.INFO)
body_logger.setFormatter(formatter)
root_logger.addHandler(body_logger)
# create frame for speed report
body_speed = urwid.Text('{0:+05d} RPM'.format(0))
header_speed = urwid.Text(['Estimated Speed'])
# create frame for current report
body_current = urwid.Text('{0:+08.2f} Arms'.format(0))
header_current = urwid.Text(['Estimated Current smoothed'])
# create logger window
header_logger = urwid.Text('Last 3 log messages')
menu_render = menu(u'Sinamics options', main_choices)
main_menu = urwid.Padding(menu_render, align='center', left=1, width=20)
rows = []
rows.append(header_speed)
rows.append(body_speed)
rows.append(urwid.Divider('-', top=1, bottom=1))
rows.append(header_current)
rows.append(body_current)
rows.append(urwid.Divider('-', top=2, bottom=2))
rows.append(header_logger)
rows.append(body_logger._urwid_log)
rows.append(urwid.Divider('-', top=2, bottom=2))
rows.append((6, main_menu))
pile = urwid.Pile(rows)
rows_filler = urwid.Filler(pile, valign='top', top=1, bottom=1)
v_padding = urwid.Padding(rows_filler, left=1, right=1)
rows_box = urwid.LineBox(v_padding)
main_loop = urwid.MainLoop(rows_box, unhandled_input=quit_on_q)
main_loop.set_alarm_in(5, trigger_log)
main_loop.run()
| true |
8db40c0a5d23ebe94e1127b0bb618a0451d8560c | Python | bigrobinson/Training-Data-Splitter | /org_data.py | UTF-8 | 4,951 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 14:31:37 2019
@author: Brian Robinson
"""
import os
import random
import shutil
def split_data(root_dir, sub_dirs, test_ratio=0.1, val_ratio=0.1):
# Function to split data and labels into train, test, and validation sets
# INPUTS:
# root_dir = path to top level directory where data and labels are stored
# sub_dirs = directories under root where data and labels for different classes
# are kept--label directory must be named like <data_directory>_labels
# test_ratio = proportion of data held out for testing
# val_ratio = proportion of data held out for validation
# OUTPUTS:
# Returns void but creates images and labels directories with desired splits
try:
(test_ratio>0 and
test_ratio<=1 and
val_ratio>0 and
val_ratio<=1 and
test_ratio+val_ratio>0 and
test_ratio+val_ratio<=1)
except ValueError:
print('Test and validation ratios and their sum must lie between 0 and 1')
# Creat directories for train, test, and validation if they don't exist
for data_type in ['images', 'labels']:
if os.path.isdir(os.path.join(root_dir, data_type)):
shutil.rmtree(os.path.join(root_dir, data_type))
os.mkdir(os.path.join(root_dir, data_type))
for dir_type in ['train', 'test', 'val']:
os.mkdir(os.path.join(root_dir, data_type, dir_type))
# Split files for each class into train, test, and validation
# Create (or overwrite existing) text files
train_txt = open(os.path.join(root_dir, 'train.txt'), 'w+')
test_txt = open(os.path.join(root_dir, 'test.txt'), 'w+')
val_txt = open(os.path.join(root_dir, 'val.txt'), 'w+')
for images_dir in sub_dirs:
images_files = os.listdir(os.path.join(root_dir, images_dir))
num_images = len(images_files)
print('The number of ' + images_dir + ' images is: ' + str(num_images))
num_test = int(test_ratio*num_images)
num_val = int(val_ratio*num_images)
num_train = num_images - num_test - num_val
# Populate training text file and images and labels directories
for file in images_files[0:num_train]:
if os.path.isfile(os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt')):
train_txt.write(os.path.join(root_dir, 'images', 'train', file)+'\n')
shutil.copy(os.path.join(root_dir, images_dir, file),
os.path.join(root_dir, 'images', 'train'))
shutil.copy(os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt'),
os.path.join(root_dir, 'labels', 'train'))
else:
print('WARNING: Label file \n' +
os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt') +
'\n' + 'does not exist, go to next file')
# Populate test text file and images and labels directories
for file in images_files[num_train:num_train+num_test]:
if os.path.isfile(os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt')):
test_txt.write(os.path.join(root_dir, 'images', 'test', file)+'\n')
shutil.copy(os.path.join(root_dir, images_dir, file),
os.path.join(root_dir, 'images', 'test'))
shutil.copy(os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt'),
os.path.join(root_dir, 'labels', 'test'))
else:
print('WARNING: Label file \n' +
os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt') +
'\n' + 'does not exist, go to next file')
# Populate validation text file and images and labels directories
for file in images_files[num_train+num_test:num_images]:
if os.path.isfile(os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt')):
val_txt.write(os.path.join(root_dir, 'images', 'val', file)+'\n')
shutil.copy(os.path.join(root_dir, images_dir, file),
os.path.join(root_dir, 'images', 'val'))
shutil.copy(os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt'),
os.path.join(root_dir, 'labels', 'val'))
else:
print('WARNING: Label file \n' +
os.path.join(root_dir, images_dir+'_labels', file[:-3]+'txt') +
'\n' + 'does not exist, go to next file')
train_txt.close()
test_txt.close()
val_txt.close()
return
if __name__ == '__main__':
test_ratio = 0.1
val_ratio = 0.1
root_dir = '$HOME/data/'
sub_dirs = ['Class1',
'Class2',
'Class3',
'Class4',
'Negatives']
split_data(root_dir, sub_dirs, test_ratio, val_ratio)
| true |
2bca29b6f2e9464caec8869960081f71f653eade | Python | chipperrip/IN1900 | /veke 6/water_wave_velocity.py | UTF-8 | 1,230 | 3.703125 | 4 | [] | no_license | """
Exercise 5.31: Explore a complicated function graphically
The wave speed c of water surface waves depends on the length lambda of the waves.
The following formula relates c to lambda:
c(lambda) = sqrt( (g*lambda)/(2*pi) * (1 + (s*(4*p**2)/(rho*g*lambda**2)) * tanh ((2*pi*h)/lambda))
"""
import numpy as np
import matplotlib.pyplot as plt
#wave speed c in m/s as function of the length lambda of the waves
def c(l):
# l = is lambda in m
g = 9.81 # m/s^2 acceleration of gravity
s = 7.9e-2 # N/m air-water surface tension
rho = 1000 # kg/m^3 density of water
h = 50 # m water depth
#splitter opp formelen i 3 deler for å gjere den meir oversiktleg
f1 = (g*l)/(2*np.pi)
f2 = 1 + s*((4*np.pi**2)/(rho*g*l**2))
f3 = np.tanh((2*np.pi*h)/l)
return np.sqrt(f1*f2*f3)
small_lambdas = np.linspace(0.001,0.1, 2001)
large_lambdas = np.linspace(1, 2000)
small_c = c(small_lambdas)
large_c = c(large_lambdas)
plt.title('Water-wave velocity')
plt.plot(small_lambdas, small_c, label = 'l=[0.001,0.1]')
plt.legend()
# lag ny figur
plt.figure()
plt.title('Water-wave velocity')
plt.plot(large_lambdas, large_c, 'r-', label = 'l=[1,2000]')
plt.legend()
plt.grid()
plt.show()
"""
To vindauge med figurar som ser fine ut
"""
| true |
7e66349ea550a641678b158cd0b5339d67cef68a | Python | Litwilly/fitbit-python | /sample-get-sleep-data.py | UTF-8 | 1,163 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# https://dev.fitbit.com/docs
import requests
import json
import time
import datetime
import os
filename = "{path to]/refresh.py"
execfile(filename)
def get_sleep(datevar):
# date should be a datetime.date object ie "2016-03-23".
url = "https://api.fitbit.com/1/user/-/sleep/date/"+datevar+".json"
access_path = "{path declared in refresh.py}/access.txt"
# open and read refresh.txt to var remove newline
opr = open(access_path, "r")
token = opr.readline().strip()
access_token = "Bearer %s" % (token)
opr.close()
headers = {
'authorization': access_token,
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers)
#print(response.text)
return response.json()
#get todays date
to = time.strftime("%Y-%m-%d")
#run funtion with todays date
get_sleep(to)
#if there is sleep data print specific json elements
if len(todayvar['sleep']) >= 1:
myvar = todayvar['sleep'][0]['isMainSleep']
startTime = todayvar['sleep'][0]['startTime']
print(myvar)
print(startTime)
else:
print("No Sleep Data")
| true |
0a9e1b9981cfc14a153fed7fbb158b5104d4fa8a | Python | TharunMohandoss/MNIST | /Generators/Generator.py | UTF-8 | 2,170 | 2.578125 | 3 | [] | no_license | import torch.nn as nn
import torch.nn.functional as F
import torch
from utils.custom_layers import EqualizedConv2d, NormalizationLayer, EqualizedLinear
import numpy as np
#in,out,kernel_size,stride,padding
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.random_vector_size = 50
self.no_of_upscales = 3
out_channels_final = 1
self.upscale_list = nn.ModuleList([])
self.to_rgb_list = nn.ModuleList([])
self.dense = EqualizedLinear(self.random_vector_size, 4*4*50)
self.conv1 = EqualizedConv2d(50,50,3,1)
self.to_rgb_4x4 = EqualizedConv2d(50,out_channels_final,1,0)
self.conv_1 = EqualizedConv2d(50,50,3,1)
self.conv_2 = EqualizedConv2d(50,50,3,1)
self.to_rgb1 = EqualizedConv2d(50,out_channels_final,1,0)
self.conv_3 = EqualizedConv2d(50,50,3,1)
self.conv_4 = EqualizedConv2d(50,50,3,1)
self.to_rgb2 = EqualizedConv2d(50,out_channels_final,1,0)
self.conv_5 = EqualizedConv2d(50,25,3,1)
self.conv_6 = EqualizedConv2d(25,25,3,1)
self.to_rgb3 = EqualizedConv2d(25,out_channels_final,1,0)
self.norm_layer = NormalizationLayer()
self.lr = nn.LeakyReLU(negative_slope=0.2)
def forward(self,batch_size):
# batch_size = len(one_hot)
out_images_list = []
z = torch.randn(batch_size, self.random_vector_size).cuda()
# z = torch.cat( (z,one_hot),1)
x = self.dense(z)
x = torch.reshape(x,(batch_size,50,4,4))
x = self.lr(self.norm_layer(self.conv1(x)))
image_4x4 = self.to_rgb_4x4(x)
out_images_list.append(image_4x4)
x = F.interpolate(x,scale_factor=2,mode='nearest')
x = self.lr(self.norm_layer(self.conv_1(x)))
x = self.lr(self.norm_layer(self.conv_2(x)))
image = self.to_rgb1(x)
out_images_list.append(image)
x = F.interpolate(x,scale_factor=2,mode='nearest')
x = self.lr(self.norm_layer(self.conv_3(x)))
x = self.lr(self.norm_layer(self.conv_4(x)))
image2 = self.to_rgb2(x)
out_images_list.append(image2)
x = F.interpolate(x,scale_factor=2,mode='nearest')
x = self.lr(self.norm_layer(self.conv_5(x)))
x = self.lr(self.norm_layer(self.conv_6(x)))
image3 = self.to_rgb3(x)
out_images_list.append(image3)
return out_images_list
| true |
bc0fb7f1bf4d27e0636bd59071be7b18cc80be51 | Python | Andrewnplus/leetCodeChallenges | /leetcode/editor/src/Stack/[155][Easy]Min Stack.py | UTF-8 | 1,895 | 3.84375 | 4 | [] | no_license | # Design a stack that supports push, pop, top, and retrieving the minimum elemen
# t in constant time.
#
# Implement the MinStack class:
#
#
# MinStack() initializes the stack object.
# void push(val) pushes the element val onto the stack.
# void pop() removes the element on the top of the stack.
# int top() gets the top element of the stack.
# int getMin() retrieves the minimum element in the stack.
#
#
#
# Example 1:
#
#
# Input
# ["MinStack","push","push","push","getMin","pop","top","getMin"]
# [[],[-2],[0],[-3],[],[],[],[]]
#
# Output
# [null,null,null,null,-3,null,0,-2]
#
# Explanation
# MinStack minStack = new MinStack();
# minStack.push(-2);
# minStack.push(0);
# minStack.push(-3);
# minStack.getMin(); // return -3
# minStack.pop();
# minStack.top(); // return 0
# minStack.getMin(); // return -2
#
#
#
# Constraints:
#
#
# -231 <= val <= 231 - 1
# Methods pop, top and getMin operations will always be called on non-empty sta
# cks.
# At most 3 * 104 calls will be made to push, pop, top, and getMin.
#
# Related Topics Stack Design
# 👍 4847 👎 453
# leetcode submit region begin(Prohibit modification and deletion)
import sys
import unittest
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, x):
self.stack.append((x, min(self.getMin(), x)))
def pop(self):
self.stack.pop()
def top(self):
if self.stack:
return self.stack[-1][0]
def getMin(self):
if self.stack:
return self.stack[-1][1]
return sys.maxsize
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(val)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
# leetcode submit region end(Prohibit modification and deletion)
| true |
0fecbc2d1420871e7cf32ac418cf0fbb85a11492 | Python | bsmrvl/twitoff | /twitoff/twitter.py | UTF-8 | 1,497 | 2.875 | 3 | [
"MIT"
] | permissive | """Functions for connecting to Twitter API, retrieving tweets, and vectorizing them."""
from os import getenv
import pickle
import spacy
import tweepy
from .models import DB, Tweet, User
TWITTER_AUTH = tweepy.OAuthHandler(getenv('TWITTER_API_KEY'),
getenv('TWITTER_API_KEY_SECRET'))
TWITTER = tweepy.API(TWITTER_AUTH)
# nlp = pickle.load(open('final_pickle', 'rb'))
nlp = spacy.load('nlp_model')
def add_update_user(username):
"""Attempt to add/update Twitter user, and return number of new tweets (-1 if no user exists)."""
try:
twit_user = TWITTER.get_user(username)
db_user = User.query.get(twit_user.id) \
or User(id=twit_user.id, name=username)
DB.session.add(db_user)
tweets = twit_user.timeline(
count=200,
exclude_replies=True,
include_rts=False,
tweet_mode='extended',
since_id=db_user.newest_tweet_id
)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
t_text = tweet.full_text
db_tweet = Tweet(
id=tweet.id,
text=t_text,
vect=nlp(t_text).vector
)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
DB.session.commit()
if tweets:
return len(tweets)
else:
return 0
except:
return -1 | true |
a16fae064eec6a78b83fe8a60220857ccb1a6c9b | Python | Shaunwei/Leetcode-python-1 | /String/SimplifyPath/simplifyPath.py | UTF-8 | 1,404 | 4.09375 | 4 | [] | no_license | #!/usr/bin/python
# Simplify Path
#Given an absolute path for a file (Unix-style), simplify it.
#For example,
#path = "/home/", => "/home"
#path = "/a/./b/../../c/", => "/c"
#click to show corner cases.
#Corner Cases:
#Did you consider the case where path = "/../"?
#In this case, you should return "/".
#Another corner case is the path might contain multiple slashes '/' together, such as "/home//foo/".
#In this case, you should ignore redundant slashes and return "/home/foo".
class Solution:
# @param path, a string
# @return a string
def simplifyPath(self, path):
stack = ['/']
for i in path.strip('/').split('/'):
if i=='.' or i=='': continue
if i == '..':
if len(stack) > 1: stack.pop()
else:
stack.append(i+'/')
return ''.join(stack).rstrip('/') if len(stack) > 1 else ''.join(stack)
if __name__=="__main__":
path1 = '/home/'
path2 = '/a/./b/../../c/'
path3 = '/../'
path4 = '/home//foo/'
print Solution().simplifyPath(path1)
print Solution().simplifyPath(path2)
print Solution().simplifyPath(path3)
print Solution().simplifyPath(path4)
'''
(1) Use a stack to store the path.
(2) Use a int flag to store the '/' pair
(3) First remove the "//" in the path.
(4) meets ".", do nothing, meets ".." pop stack if not empty, other strings push into stack.
'''
| true |
a176e16468b8250f2e0bba3c6af56a20e1655c53 | Python | notesonartificialintelligence/07-01-20 | /chapter_9/my_electric_car.py | UTF-8 | 314 | 2.96875 | 3 | [] | no_license | #Gabriel Abraham
#notesonartificialintelligence
#Python Crash Course - Chapter 9
#Import the electricCar class from the file car
from electric_car import ElectricCar
my_tesla = ElectricCar('tesla', 'model s', '2019')
print(my_tesla.get_descriptive_name())
my_tesla.battery.describe_battery()
my_tesla.battery.get_range() | true |
81ddb6d489cd27d775084eec62b6569ed7117c25 | Python | orange9426/FOGs | /solver/pomcp/obs_node.py | UTF-8 | 1,086 | 3.0625 | 3 | [] | no_license | import numpy as np
class ObservationNode(object):
"""A node that represents the observation in the search tree."""
def __init__(self, obs, depth=-1):
self.obs = obs
self.depth = depth
self.visit_count = 0
self.particle_bin = []
self.children = []
def find_child(self, action):
"""Returns the child action node according to the given action."""
candi = [c for c in self.children if c.action == action]
if candi:
return np.random.choice(candi)
else:
return None
def find_child_by_uct(self, uct_c):
"""Randomly returns a child action node according to uct policy."""
return max(self.children,
key=lambda c: c.uct_value(self.visit_count, uct_c))
def best_child(self):
"""Returns the best child in order of the sort key."""
return max(self.children, key=ObservationNode.sort_key)
def sort_key(self):
"""The key function for searching best child."""
return (self.visit_count, self.total_reward)
| true |
954bb63d4afc35f4a970a89774cdc386842d11e2 | Python | davidlu2002/AID2002 | /dict.py | UTF-8 | 564 | 2.734375 | 3 | [] | no_license | import pymysql
import re
print("Github")
f = open("dict.txt",mode='r')
# 连接数据库
db = pymysql.connect(host='localhost',port=3306,user='root',password='123456',database='dict',charset='utf8')
# 获取游标 (操作数据库,执行sql语句)
cur = db.cursor()
sql = "insert into words values (%s,%s)"
for line in f:
tup = re.findall(r"(\S+)\s+(.*)",line)[0]
try:
cur.execute(sql,tup)
db.commit()
except Exception as e:
print(e)
db.rollback()
f.close()
# 关闭数据库
cur.close()
db.close()
| true |
1488bde50a8263fa77c0dc1e4b8a504594768ef8 | Python | ella-ballou/unit-testing | /unit_testing.py | UTF-8 | 3,045 | 4.03125 | 4 | [] | no_license | # ella ballou
# software development fundamentals
# programming lab 10
# github.com/ella-ballou/unit-testing <-- link to repository
import unittest
from ListManipulator import ListManipulator
class TestListManipulatorMin(unittest.TestCase):
def test_1(self):
testlist = [-2, -7, -9, 0, 4, 6, -1, 7, 3, 2] # random set of positive and negative integers
list = ListManipulator(testlist)
self.assertEqual(list.min(), -9) # smallest number in the list is -9, so it should return -9
def test_2(self):
testlist = [] # list with no items
list = ListManipulator(testlist)
self.assertEqual(list.min(), None) # when there are no items in the list, it should return None
def test_3(self):
testlist = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # all inputs are the same digit
list = ListManipulator(testlist)
self.assertEqual(list.min(), 0) # all numbers are the same, should still return 0
class TestListManipulatorMax(unittest.TestCase):
def test_1(self):
testlist = [-5, 4, -2, 9, 8, -7, -4, -6, -9, 10] # random set of positive and negative integers
list = ListManipulator(testlist)
self.assertEqual(list.max(), 10) # the largest number in the set is 10, should return 10
def test_2(self):
testlist = [] # list with no values inputted
list = ListManipulator(testlist)
self.assertEqual(list.max(), None) # when there are no items in the list, it should return None
def test_3(self):
testlist = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10] # list of all the same value entered
list = ListManipulator(testlist)
self.assertEqual(list.max(), 10) # even though all inputs were the same, it should still return 10
class TestListManipulatorRemove(unittest.TestCase):
def test_1(self):
testlist = [2, 1, 5, 1, 1, 10, -5, 10, 0, 8] # original list, random set
list = ListManipulator(testlist)
list.remove(1) # removes all 1's from the list
self.assertEqual(list.list, [2, 5, 10, -5, 10, 0, 8]) # list should be equal to the testlist w all 1's removed
def test_2(self):
testlist = [] # a list with no values
list = ListManipulator(testlist)
list.remove(0) # removes all 0's from that blank list (does nothing)
self.assertEqual(list.list, []) # list should stay empty
def test_3(self):
testlist = [-2, -3, -9, -8, -2, -9, 8, 10, -3, -8] # a random list of integers
list = ListManipulator(testlist)
list.remove(6) # removes all 6's from list
self.assertEqual(list.list, testlist) # the list shouldn't change, because there were no 6's in the list
def test_4(self):
testlist = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4] # a list with all the same values
list = ListManipulator(testlist)
list.remove(4) # removes all 4's from list
self.assertEqual(list.list, []) # the list should become empty, because there are only 4's in list
unittest.main()
| true |
0c124734d56c5601023cf78b1702f868828c88ee | Python | congor/otus_task_3 | /code_analyzer/modules/clone_repository.py | UTF-8 | 923 | 2.75 | 3 | [
"MIT"
] | permissive | import os
from datetime import datetime
from urllib.parse import urlparse
from modules.remote_sources.git import git
def get_clone_function(source):
clone_functions = {'github.com': git}
return clone_functions.get(source)
def determine_source(project_url):
parsed_url = urlparse(project_url)
return parsed_url.netloc
def clone_repository(project_url):
cloned_repositories_local_path = 'cloned_repositories'
project_name = project_url.split('/')[-1] + '_' + str(datetime.now()).replace(':', '-')
cloned_path = os.path.abspath(os.path.join(cloned_repositories_local_path, project_name))
source = determine_source(project_url)
clone_function = get_clone_function(source)
if clone_function is None:
print('{} is not supported as an remote repository'.format(source))
return None
elif clone_function(project_url, cloned_path) is True:
return cloned_path | true |
10720bd6963eb0eed6fe7a0f634d1ce6f9da4050 | Python | bestchenwu/PythonStudy | /Numpy/study/seniorNumpy/Datetime.py | UTF-8 | 504 | 3.53125 | 4 | [] | no_license | import numpy as np
import datetime
date64 = np.datetime64('2018-02-04 23:10:10')
# print(date64)
# 只取天的方法
dt64 = np.datetime64(date64, 'D')
print(dt64)
# 取后十天、后十分等方法
print("after ten days:", dt64 + 10)
# 取后十分
tenminutes = np.timedelta64(10, 'm')
print("after ten minutes", date64 + tenminutes)
print(np.datetime_as_string(date64))
# 将np的时间对象转换为datetime的时间对象
datetime64 = date64.tolist()
print(datetime64.day)
print(datetime64.month)
| true |
36e38456fffd216af5415b3a3e48a7cbe9c3c8f4 | Python | ngehlenborg/pandazzz | /pandazzz/views.py | UTF-8 | 894 | 2.921875 | 3 | [
"MIT"
] | permissive | # views.py
from rest_pandas import PandasSimpleView
import pandas as pd
class ItemView(PandasSimpleView):
def get_data(self, request, *args, **kwargs):
# Replace this with a smarter way to load a data file
df = pd.read_table('data/movies.csv', sep=';')
# return columns requested in "fields" query parameter
return df.filter( items=request.query_params['attributes'].split(","))
class AttributeView(PandasSimpleView):
def get_data(self, request, *args, **kwargs):
# Replace this with a smarter way to load a data file
df = pd.read_table('data/movies.csv', sep=';')
attributes = []
index = 0
for datatype in df.dtypes:
attributes.append( { 'name': str( df.columns.values[index] ), 'type': str( datatype ) } )
index += 1
print( datatype )
return attributes | true |
d78d20733a0a714c0e8e4e7eef5bee388538d0e1 | Python | chandralegend/pid-controlled-dc-motor-model | /motor.py | UTF-8 | 793 | 2.828125 | 3 | [] | no_license | # motor object class
class Motor(object):
def __init__(self, R, L, B, Kt, J, Kb, dt):
self.R, self.L, self.B, self. Kt, self.J, self.Kb = R, L, B, Kt, J, Kb
self.dt = dt
self.outputs = [0, 0] # stores outputs of the motor
# updates the output with the pid output
def update(self, v):
output_now = (v + (self.L * self.B * self.outputs[-1] / self.dt / self.Kt) + (self.L * self.J * (2 * self.outputs[-1] - self.outputs[-2]) / self.Kt / (self.dt ** 2))+(
self.R * self.J * self.outputs[-1] / self.Kt)) / ((self.L * self.B / self.dt / self.Kt) + (self.L * self.J / self.Kt / (self.dt**2))+(self.R * (self.B + self.J) / self.Kt) - self.Kb)
self.outputs.append(output_now)
def get_outputs(self):
return self.outputs[2:]
| true |
fa71c4e4a36c33d11c4f9a9085f7d91d1824d296 | Python | skanin/NTNU | /Informatikk/Bachelor/H2017/ITGK/Øvinger/Øving 5/Generelt om funksjoner/b.py | UTF-8 | 105 | 3.4375 | 3 | [] | no_license | def arg(argument):
return print(argument)
argument = input("Skriv inn et argument: ")
arg(argument) | true |
cdc7cc254c047a212007d5e2c40f65821b7da427 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_118/2658.py | UTF-8 | 938 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import math
def main():
raw_input() # Dummy line
case = 1
while True:
pref = "Case #%d: " % case
try:
ran = raw_input()
ran = map(int, ran.split())
c = 0
for n in range(ran[0], ran[1]+1):
if fairsquare(n):
c += 1
print pref + str(c)
except Exception as e:
break
case += 1
def fairsquare(n):
if fair(n):
n = square(n)
if fair(n):
return True
return False
def square(n):
s = math.sqrt(n)
if int(s) == s:
return int(s)
else:
return 12 # a non fair
def fair(n):
n = str(n)
for i in xrange(len(n)):
if n[i] != n[-(i+1)]:
return False
return True
if __name__ == '__main__':
main()
| true |
2b4a33529fbc57ee372510f3642d7386166bbec8 | Python | hantek/baby-ai-game | /model/sentenceEmbedder.py | UTF-8 | 2,614 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 18:27:57 2017
@author: simon
"""
import sys
import traceback
import nltk
import torch
from torch.autograd import Variable
import os
directory=os.getcwd()
if(not directory[-5:]=='model'):
directory=directory+ '\\model'
sys.path.insert(0,directory)
print("new path added to sys.path : ", directory)
class Sentence2Vec(object):
def __init__(self,
glove_path=directory+"/InferSent/dataset/GloVe/glove.840B.300d.txt",
useCuda=False,
Nwords=10000,
pathToInferSentModel=directory+'/InferSent/infersent.allnli.pickle',
modelDirectory=directory+"/InferSent"):
print ("Loading Glove Model")
#adding directory to the InferSent module
if (not modelDirectory in sys.path):
print("adding local directory to load the model")
sys.path.append(modelDirectory)
else:
print("directory already in the sys.path")
nltk.download('punkt')
#loading model
if (useCuda):
print("you are on GPU (encoding ~1000 sentences/s, default)")
self.infersent = torch.load(pathToInferSentModel)
else:
print("you are on CPU (~40 sentences/s)")
self.infersent = torch.load(pathToInferSentModel, map_location=lambda storage, loc: storage)
self.infersent.set_glove_path(glove_path)
print("loading the {} most common words".format(Nwords))
try:
self.infersent.build_vocab_k_words(K=Nwords)
print("vocab trained")
except Exception as e:
print("ERROR")
print(e)
print("\nPOSSIBLE SOLUTION")
print("if you have an encoding error, specify encoder='utf8' in the models.py file line 111 " )
print("done")
def encodeSent(self,sentence):
if(type(sentence)==str):
#print("processing one sentence")
return(torch.from_numpy((self.infersent).encode([sentence],tokenize=True)))
else:
#print("processing {} sentences".format(len(sentence)))
return(torch.from_numpy((self.infersent).encode(sentence,tokenize=True)))
#test code
#model=Sentence2Vec()
#sentence='Hello I am Simon'
#sentences=[sentence,'How are you ?']
#x=model.encodeSent(sentence)
#print(x.size())
#x=model.encodeSent(sentences)
#print(x.size())
#model.infersent.visualize(sentence)
#
#
| true |
5c0f45bbe1282f13fe292811ba47ac4da42fd375 | Python | osvaldohg/hacker_rank | /interview_preparation_kit/python/warm_up/jumping_on_the_clouds.py | UTF-8 | 732 | 3.1875 | 3 | [] | no_license | #!/bin/python
#https://www.hackerrank.com/challenges/jumping-on-the-clouds/problem
#by oz
import math
import os
import random
import re
import sys
# Complete the jumpingOnClouds function below.
def jumpingOnClouds(c):
jumps=0
pos=0
while pos!=len(c)-1:
if pos+2 <=len(c)-1:
if c[pos+2]==1:
pos+=1
else:
pos+=2
jumps+=1
else:
pos+=1
jumps+=1
return jumps
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(raw_input())
c = map(int, raw_input().rstrip().split())
result = jumpingOnClouds(c)
fptr.write(str(result) + '\n')
fptr.close()
| true |
1a2cf7a2d2a68e7cd2d2dc7895d2b25d2dc063ce | Python | maria-kuruvilla/effective_computing | /ptagis.py | UTF-8 | 1,638 | 3.265625 | 3 | [
"MIT"
] | permissive | """
Code to retrieve data from ptagis.py
"""
# imports
import ftplib
import pandas as pd
"""
ftp://ftp.ptagis.org/RawDataFiles/Interrogation/Loaded/158/2011/
"""
#directory to save the data files
out_dir = '../../data/effective_computing/'
def retrieve_data(year = '2005', day_of_year = '001', extension = '.A1'):
try:
folder = 'BO1/' # name of the folder in which contains the file you want (also the name of the damn)
year = year + '/' #data from year 2014
path = 'RawDataFiles/Interrogation/Loaded/' + folder + year #latter part of the url (path to file in the website)
filename = folder[0:-1] + year[2:-1] + day_of_year + extension #'15811333.INT' #name of the file we want to download
# first three digits are name of the folder, next 2 indicated the year
#and the last three indicate day of year. This can be put in a loop.
ftp = ftplib.FTP("ftp.ptagis.org") #server IP of the website we want to download from
ftp.login() #we do not need username and password for this data
ftp.cwd(path) #change currect working path on the website to the location where the file is
ftp.retrbinary("RETR " + filename ,open(out_dir+filename, 'wb').write) #this will download the file into the same folder as your code
ftp.quit()
print(' Retrieved ' + filename)
except:
print(' -- Failed to retrieve' + filename)
pass
df = pd.read_csv(out_dir+filename,delim_whitespace=True, skiprows=4, skipfooter = 3,
engine = 'python')#last argument might not be required
| true |
22ecd4c52bc79bae8d008f2c3dad547dbf14ff51 | Python | jack-nikky/untitled1 | /pytorchtest/test1.py | UTF-8 | 682 | 2.953125 | 3 | [] | no_license | from __future__ import print_function
import torch
x = torch.ones(3, 3, requires_grad=True)
print(x)
y = x + 2
print(y)
# 每个张量都有一个 .grad_fn 属性保存着创建了张量的 Function 的引用,
# (如果用户自己创建张量,则g rad_fn 是 None )
print(y.grad_fn)
z = y * y * 3
out = z.mean()
print(z, out)
# a = torch.randn(2, 2)
# a = ((a * 3) / (a - 1))
# # 输入的标记默认为 False
# print(a.requires_grad)
# # .requires_grad_(True)使它为True,
# a.requires_grad_(True)
# print(a.requires_grad)
# b = (a * a).sum()
# print(b.grad_fn)
'''
O = 1/4*(3(xi + 1)^2)
O' = 3/2*(xi + 1) xi = 1
x.grad = 4.5
'''
out.backward()
print(x.grad)
| true |
ae3cf47f700bf78d9ab1ec72c0ced07314b5a8d0 | Python | Woooosz/StudentSystem | /api/dao/echart.py | UTF-8 | 5,442 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | import json
import pymysql
import config
def echart1():
nianji_list = []
nianji_data = []
with pymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT,user=config.MYSQL_USRT, password=config.MYSQL_PASSWD, db=config.MYSQL_DB) as conn:
conn.execute("select nianji, count(*) as num from student group by nianji")
results = conn.fetchall()
for row in results:
nianji_list.append(row[0])
nianji_data.append(row[1])
data = {'xAxis': {
'data': nianji_list
},
'yAxis': {},
'series': [{
'name': '人数',
'type': 'bar',
'data': nianji_data
}]}
return data
def echart2():
nianji_list = []
nianji_data = []
data_list = []
with pymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT,user=config.MYSQL_USRT, password=config.MYSQL_PASSWD, db=config.MYSQL_DB) as conn:
conn.execute("select zhuanye, count(*) as num from student group by zhuanye")
results = conn.fetchall()
for row in results:
nianji_list.append(row[0])
nianji_data.append(row[1])
data_list.append({'value':row[1], 'name':row[0]})
data = {
'series': [{
'name':'经济管理学院',
'type':'pie',
'radius' : [30, 110],
'roseType' : 'area',
'data':data_list}],
'data':nianji_list
}
return data
def echart3():
res_dict = {}
zhuanye_set = set()
nianji_set = set()
with pymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT,user=config.MYSQL_USRT, password=config.MYSQL_PASSWD, db=config.MYSQL_DB) as conn:
conn.execute("select nianji, zhuanye, count(*) from student group by nianji, zhuanye")
results = conn.fetchall()
for row in results:
zhuanye_set.add(row[1])
res_dict[row[1]] = {row[0]:row[2]}
nianji_set.add(row[0])
for k,v in res_dict.items():
for nianji in nianji_set:
if not nianji in v.keys():
v[nianji] = 0
res_dict[k] = sorted(v.items(), key=lambda d:d[0], reverse = True)
nianji_list = sorted(list(nianji_set), reverse=True)
data = {}
data['series'] = []
for k,v in res_dict.items():
sublist = []
for vv in v:
sublist.append(vv[1])
subdata = {
'name': k,
'type': 'bar',
'stack': '人数',
'label': {
'normal': {
'show': 'true',
'position': 'insideRight'
}
},
'data': sublist
}
data['series'].append(subdata)
data['nianji'] = nianji_list
data['zhuanye'] = list(zhuanye_set)
return data
def echart4():
workroom_list = ['教研室']
used_list = ['已使用']
ununsed_list = ['未使用']
with pymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT,user=config.MYSQL_USRT, password=config.MYSQL_PASSWD, db=config.MYSQL_DB) as conn:
conn.execute("select roomname, capacity, used from vw_workroom order by used/capacity desc limit 8")
results = conn.fetchall()
for row in results:
workroom_list.append(row[0])
used_list.append(int(row[2]))
ununsed_list.append(int(row[1]) - int(row[2]))
data = {}
data['source'] = []
data['source'].append(workroom_list)
data['source'].append(used_list)
data['source'].append(ununsed_list)
data['series'] = []
center_list = [['20%', '30%'],['40%', '30%'],['60%', '30%'],['80%', '30%'],
['20%', '70%'],['40%', '70%'],['60%', '70%'],['80%', '70%']]
for idx in range(len(center_list)):
subdata = {
'type': 'pie',
'radius': 70,
'name':workroom_list[idx+1],
'center': center_list[idx],
'encode': {
'itemName': '教研室',
'value': workroom_list[idx+1]
}
}
data['series'].append(subdata)
return data
def echart5():
xueweileixing_list = []
peiyangfangshi_list = []
with pymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT,user=config.MYSQL_USRT, password=config.MYSQL_PASSWD, db=config.MYSQL_DB) as conn:
conn.execute("select xueweileixing, count(*) as num from student group by xueweileixing")
results = conn.fetchall()
cnt = 0
for row in results:
if cnt != 0:
xueweileixing_list.append({'value':row[1], 'name':row[0]})
else:
xueweileixing_list.append({'value':row[1], 'name':row[0], 'selected':'true'})
cnt += 1
conn.execute("select peiyangfangshi, count(*) as num from student group by peiyangfangshi")
results = conn.fetchall()
for row in results:
peiyangfangshi_list.append({'value':row[1], 'name':row[0]})
data = {
'xueweileixing': xueweileixing_list,
'peiyangfangshi':peiyangfangshi_list
}
return data
def getechart():
data = {}
data['chart1'] = echart1()
data['chart2'] = echart2()
data['chart3'] = echart3()
data['chart4'] = echart4()
data['chart5'] = echart5()
return data | true |
bf27ce4f7f4899b0a6c3f2ef06bd09fc4a5a0e7c | Python | canonical/basic-auth-service | /dev/api-client | UTF-8 | 3,753 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python3
"""API client for the basic-auth service."""
import sys
import os
import argparse
import json
from urllib.parse import quote
import requests
ACTIONS = {
'add': {'method': 'post', 'help': 'Add a resource', 'id': False},
'list': {'method': 'get', 'help': 'List all resources', 'id': False},
'remove': {'method': 'delete', 'help': 'Remove a resource', 'id': True},
'get': {'method': 'get', 'help': 'Get a single resource', 'id': True},
'update': {'method': 'put', 'help': 'Update a resource', 'id': True},
}
def detail_type(detail):
"""Split details in key/value pairs."""
split = detail.split('=')
if len(split) != 2 or not all(split):
raise argparse.ArgumentTypeError(
'Details must be in the form "key=value"')
return split
def basic_auth_type(auth):
split = auth.split(':')
if len(split) != 2 or not all(split):
raise argparse.ArgumentTypeError(
'Basic auth must be in the form "user:password"')
return tuple(split)
class DetailsAction(argparse.Action):
"""Save details as a dict."""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict(values))
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--url', help='The API endpoint URL',
default='http://localhost:8080/api')
parser.add_argument(
'--creds', type=basic_auth_type,
help=('Basic-auth user for the API endpoint. Alternatively, the '
'BASIC_AUTH_API_CREDS environment can be set.'),
default=os.environ.get('BASIC_AUTH_API_CREDS'))
parser.add_argument(
'--debug', action='store_true', help='Print out debug information')
parser.add_argument(
'resource', help='The resource to operate on',
choices=['credentials'])
subparsers = parser.add_subparsers(
help='The action to perform', dest='action', metavar='action')
subparsers.required = True
for action, info in ACTIONS.items():
subparser = subparsers.add_parser(action, help=info['help'])
if info['id']:
subparser.add_argument('id', help='The resource identifier')
if action in ('add', 'update'):
nargs = '+' if action == 'add' else '*'
subparser.add_argument(
'details', help='Request details, in the "key=value" format',
type=detail_type, action=DetailsAction, nargs=nargs)
return parser.parse_args()
def main():
args = parse_args()
try:
response = make_request(args)
except requests.ConnectionError as error:
sys.exit(str(error))
else:
print_response(response)
def make_request(args):
"""Make an API request."""
info = ACTIONS[args.action]
method = info['method']
url = '{}/{}'.format(args.url, args.resource)
if info['id']:
url += '/' + quote(args.id, safe='')
headers = {
'Content-Type': 'application/json;profile=basic-auth.api;version=1.0'}
details = getattr(args, 'details', None)
data = json.dumps(details) if details is not None else None
if args.debug:
print('{} {} - {}'.format(method.upper(), url, details or {}))
return requests.request(
method, url, headers=headers, data=data, auth=args.creds)
def print_response(response):
if response.headers['Content-Type'].startswith('application/json'):
content = response.json()
else:
content = response.text
print('{} {} - {}'.format(response.status_code, response.reason, content))
if __name__ == '__main__':
main()
| true |
eabbe54a54312cb1e2c249f349c0fd8d7cfc37ec | Python | wojciech-wojcik/portfolio | /Rekomendacje/tests.py | UTF-8 | 11,450 | 2.703125 | 3 | [] | no_license | import findspark
findspark.init()
import logging
import pytest
from pyspark.sql import SparkSession, Row
import spark_functions
import pandas as pd
def quiet_py4j():
""" turn down spark logging for the test context """
logger = logging.getLogger('py4j')
logger.setLevel(logging.WARN)
@pytest.fixture(scope="session")
def spark_context(request):
spark = SparkSession.builder.appName("pytest-recommendations") \
.master("local[2]").getOrCreate()
sc = spark.sparkContext
request.addfinalizer(sc.stop)
quiet_py4j()
return sc
@pytest.mark.usefixtures('spark_context')
def test_split_genres(spark_context):
test_input = [Row(movieId=1, title='Toy Story (1995)', genres='Adventure|Animation|Children|Comedy|Fantasy'),
Row(movieId=2, title='Jumanji (1995)', genres='Adventure|Children|Fantasy'),
Row(movieId=3, title='Grumpier Old Men (1995)', genres='Comedy|Romance'),
Row(movieId=4, title='Waiting to Exhale (1995)', genres='Comedy|Drama|Romance'),
Row(movieId=5, title='Father of the Bride Part II (1995)', genres='Comedy')]
input_rdd = spark_context.parallelize(test_input, 2)
print(input_rdd.collect())
results = spark_functions.split_genres(input_rdd).collect()
expected_results = {(1, 'adventure'), (1, 'animation'), (1, 'children'), (1, 'comedy'), (1, 'fantasy'),
(2, 'adventure'), (2, 'children'), (2, 'fantasy'),
(3, 'comedy'), (3, 'romance'),
(4, 'comedy'), (4, 'romance'), (4, 'drama'),
(5, 'comedy')}
assert set(results) == expected_results
@pytest.mark.usefixtures('spark_context')
def test_count_genres(spark_context):
test_input = [(1, 'adventure'), (1, 'animation'), (1, 'children'), (1, 'comedy'), (1, 'fantasy'),
(2, 'adventure'), (2, 'children'), (2, 'fantasy'),
(3, 'comedy'), (3, 'romance'),
(4, 'comedy'), (4, 'romance'), (4, 'drama'),
(5, 'comedy')]
input_rdd = spark_context.parallelize(test_input, 2)
results = spark_functions.count_genres(input_rdd).collect()
expected_results = {'adventure': 2, 'animation': 1, 'children': 2,
'comedy': 4, 'fantasy': 2, 'romance': 2, 'drama': 1}
assert dict(results) == expected_results
@pytest.mark.usefixtures('spark_context')
def test_ratings_stats(spark_context):
test_input1 = [(1, 'adventure'), (1, 'animation'), (1, 'children'), (1, 'comedy'), (1, 'fantasy'),
(2, 'adventure'), (2, 'children'), (2, 'fantasy'),
(3, 'comedy'), (3, 'romance'),
(4, 'comedy'), (4, 'romance'), (4, 'drama'),
(5, 'comedy')]
test_input2 = [Row(userId=1, movieId=1, rating=1, timestamp=1112486027),
Row(userId=1, movieId=2, rating=2, timestamp=1112484676),
Row(userId=1, movieId=3, rating=3, timestamp=1112484819),
Row(userId=1, movieId=4, rating=4, timestamp=1112484727),
Row(userId=1, movieId=5, rating=5, timestamp=1112484580)]
input_rdd1 = spark_context.parallelize(test_input1, 2)
input_rdd2 = spark_context.parallelize(test_input2, 2)
results = spark_functions.ratings_stats(input_rdd1, input_rdd2).collect()
expected_results = {('animation', 1, 1, 1.0),
('comedy', 13, 4, 3.25),
('children', 3, 2, 1.5),
('fantasy', 3, 2, 1.5),
('romance', 7, 2, 3.5),
('adventure', 3, 2, 1.5),
('drama', 4, 1, 4.0)}
assert set(results) == expected_results
@pytest.mark.usefixtures('spark_context')
def test_get_films_ids(spark_context):
test_input = [Row(movieId=1, title='Toy Story (1995)', genres='Adventure|Animation|Children|Comedy|Fantasy'),
Row(movieId=2, title='Jumanji (1995)', genres='Adventure|Children|Fantasy'),
Row(movieId=3, title='Grumpier Old Men (1995)', genres='Comedy|Romance'),
Row(movieId=4, title='Waiting to Exhale (1995)', genres='Comedy|Drama|Romance'),
Row(movieId=5, title='Father of the Bride Part II (1995)', genres='Comedy')]
input_df = spark_context.parallelize(test_input, 2).toDF()
titles = spark_context.broadcast([('toy story', 5), ('jumanji', 5)])
results = spark_functions.get_films_ids(input_df, titles).collect()
titles.unpersist()
expected_results = [(1, 5), (2, 5)]
assert set(results) == set(expected_results)
@pytest.mark.usefixtures('spark_context')
def test_get_similarity_no_diff(spark_context):
test_input = [Row(userId=1, movieId=0, rating=5.0, timestamp=1094785698),
Row(userId=1, movieId=1, rating=5.0, timestamp=1011209096),
Row(userId=1, movieId=2, rating=5.0, timestamp=994020680),
Row(userId=1, movieId=3, rating=5.0, timestamp=1230857185),
Row(userId=1, movieId=4, rating=5.0, timestamp=1230788346)]
input_rdd = spark_context.parallelize(test_input, 2)
d = dict([(i, i) for i in range(5)])
db = spark_context.broadcast(d)
vb = spark_context.broadcast([5] * len(d))
idsb = spark_context.broadcast([0, 1, 2, 3, 4])
results = spark_functions.get_similarity(input_rdd, db, vb, idsb)
idsb.unpersist()
db.unpersist()
vb.unpersist()
expected_results = [(1, 0)]
assert results == expected_results
@pytest.mark.usefixtures('spark_context')
def test_get_similarity_small_diff(spark_context):
test_input = [Row(userId=1, movieId=0, rating=5.0, timestamp=1094785698),
Row(userId=1, movieId=1, rating=5.0, timestamp=1011209096),
Row(userId=1, movieId=2, rating=5.0, timestamp=994020680),
Row(userId=2, movieId=3, rating=5.0, timestamp=1230857185),
Row(userId=2, movieId=4, rating=5.0, timestamp=1230788346)]
input_rdd = spark_context.parallelize(test_input, 2)
d = dict([(i, i) for i in range(5)])
db = spark_context.broadcast(d)
vb = spark_context.broadcast([5] * len(d))
idsb = spark_context.broadcast([0, 1, 2, 3, 4])
results = spark_functions.get_similarity(input_rdd, db, vb, idsb)
idsb.unpersist()
db.unpersist()
vb.unpersist()
expected_results = [(1, 10), (2, 15)]
assert results == expected_results
@pytest.mark.usefixtures('spark_context')
def test_get_similarity_max_diff(spark_context):
test_input = [Row(userId=1, movieId=0, rating=0, timestamp=1094785698),
Row(userId=1, movieId=1, rating=0, timestamp=1011209096),
Row(userId=1, movieId=2, rating=0, timestamp=994020680),
Row(userId=1, movieId=3, rating=0, timestamp=1230857185),
Row(userId=1, movieId=4, rating=0, timestamp=1230788346)]
input_rdd = spark_context.parallelize(test_input, 2)
d = dict([(i, i) for i in range(5)])
db = spark_context.broadcast(d)
vb = spark_context.broadcast([5] * len(d))
idsb = spark_context.broadcast([0, 1, 2])
results = spark_functions.get_similarity(input_rdd, db, vb, idsb)
idsb.unpersist()
db.unpersist()
vb.unpersist()
expected_results = [(1, 25)]
assert results == expected_results
@pytest.mark.usefixtures('spark_context')
def test_get_films_stats(spark_context):
test_input1 = [(1, 0), (2, 0), (3, 1)]
test_input2 = [Row(userId=1, movieId=0, rating=5, timestamp=1094785698),
Row(userId=1, movieId=1, rating=5, timestamp=1011209096),
Row(userId=1, movieId=2, rating=5, timestamp=994020680),
Row(userId=2, movieId=0, rating=5, timestamp=1230857185),
Row(userId=2, movieId=1, rating=5, timestamp=1230788346),
Row(userId=2, movieId=3, rating=5, timestamp=1230788346)]
input_rdd1 = spark_context.parallelize(test_input1, 2)
input_rdd2 = spark_context.parallelize(test_input2, 2)
ids = spark_context.broadcast([0, 2])
results = spark_functions.get_films_stats(input_rdd1, input_rdd2, ids)
ids.unpersist()
expected_results = [(1, (5.0, 2, 2.857142857142857)),
(3, (5.0, 1, 1.6666666666666667))]
assert results == expected_results
@pytest.mark.usefixtures('spark_context')
def test_get_recommendations(spark_context):
test_input1 = [(2, (10, 4.7, 6.394557823129252)),
(3, (10, 4.65, 6.348122866894197)),
(4, (10, 4.5, 6.206896551724139))]
test_input2 = [Row(movieId=1, title='Toy Story (1995)', genres='Adventure|Animation|Children|Comedy|Fantasy'),
Row(movieId=2, title='Jumanji (1995)', genres='Adventure|Children|Fantasy'),
Row(movieId=3, title='Grumpier Old Men (1995)', genres='Comedy|Romance'),
Row(movieId=4, title='Waiting to Exhale (1995)', genres='Comedy|Drama|Romance'),
Row(movieId=5, title='Father of the Bride Part II (1995)', genres='Comedy')]
input_rdd1 = spark_context.parallelize(test_input1, 2)
input_rdd2 = spark_context.parallelize(test_input2, 2)
results = spark_functions.get_recommendations(input_rdd1, input_rdd2).toPandas()
data = [[10, 4.7, 6.394557823129252, 'Jumanji (1995)',
'Adventure|Children|Fantasy'],
[10, 4.65, 6.348122866894197, 'Grumpier Old Men (1995)',
'Comedy|Romance'],
[10, 4.5, 6.206896551724139, 'Waiting to Exhale (1995)',
'Comedy|Drama|Romance']]
columns = ['rating', 'users_seen', 'f_score', 'title', 'genres']
expected_results = pd.DataFrame(data, columns=columns)
assert not (results != expected_results).sum().sum()
@pytest.mark.usefixtures('spark_context')
def test_recommendations_pipe(spark_context):
test_input1 = [Row(movieId=1, title='Film 1', genres='Adventure|Animation|Children|Comedy|Fantasy'),
Row(movieId=2, title='Film 2', genres='Adventure|Children|Fantasy'),
Row(movieId=3, title='Film 3', genres='Comedy|Romance'),
Row(movieId=4, title='Film 4', genres='Comedy|Drama|Romance'),
Row(movieId=5, title='Film 5', genres='Comedy')]
test_input2 = [Row(userId=1, movieId=1, rating=3.5, timestamp=1112486027),
Row(userId=1, movieId=2, rating=3.5, timestamp=1112484676),
Row(userId=1, movieId=3, rating=5., timestamp=1112484819),
Row(userId=2, movieId=1, rating=3.5, timestamp=1112484727),
Row(userId=2, movieId=2, rating=3.5, timestamp=1112484580),
Row(userId=2, movieId=4, rating=4., timestamp=1112484580)]
input_df1 = spark_context.parallelize(test_input1, 2).toDF()
input_df2 = spark_context.parallelize(test_input2, 2).toDF()
titles = [('film 1', 5), ('film 2', 5)]
results = spark_functions.recommendations_pipe(spark_context, input_df1, input_df2, titles).toPandas()
data = [[5.0, 1, 1.6666666666666667, 'Film 3', 'Comedy|Romance'],
[4.0, 1, 1.6, 'Film 4', 'Comedy|Drama|Romance']]
columns = ['rating', 'users_seen', 'f_score', 'title', 'genres']
expected_results = pd.DataFrame(data, columns=columns)
assert not (results != expected_results).sum().sum()
| true |
97ed6d3f65c47e23df9f59f9bee4d3b761978b8b | Python | showonlady/ui_test | /game.py | UTF-8 | 320 | 3.03125 | 3 | [] | no_license | #!/user/bin/env python
#coding utf-8
import random
x = ['a', 'b', 'c']
y = [('a', 'b'),('b', 'c'),('c', 'a')]
i = 0;
count = 0;
while i<3:
h = raw_input("input:")
l = random.choice(x)
print l
if (h, l) in y:
count += 1
i += 1
if count>=2:
print "successfull"
else:
print "Fail"
| true |
4b381eb3dbd62edd3122052cc7ff19944db00d44 | Python | zhulf0804/Coding.Python | /codeforces/977E_Cyclic_Components.py | UTF-8 | 813 | 2.8125 | 3 | [] | no_license | n, m = list(map(int, input().strip().split()))
visited = [0] * (n + 1)
edges = [[] for _ in range(n + 1)]
#print(edges)
#
for i in range(m):
x, y = list(map(int, input().strip().split()))
edges[x].append(y)
edges[y].append(x)
res = 0
for i in range(1, n + 1):
if visited[i]:
continue
visited[i] = 1
if len(edges[i]) <= 1 or len(edges[i]) > 2:
continue
next = edges[i][0]
cur = i
while next != i and not visited[next]:
visited[next] = 1
if len(edges[next]) <= 1 or len(edges[next]) > 2:
visited[next] = 1
break
if edges[next][0] == cur:
cur = next
next = edges[next][1]
else:
cur = next
next = edges[next][0]
if next == i:
res += 1
print(res) | true |
531cee5e6af79be237c4c890f2769a9dd134cbea | Python | draghicivlad/SAT-solver-Python | /BDD_SAT.py | UTF-8 | 3,677 | 3.390625 | 3 | [] | no_license | import datetime
class Tree:
def __init__(self):
self.left = None
self.right = None
self.data = None
def evaluateLevel(tree, level, variables):
if level == len(variables):
return False
actEquation = tree.data
newEquationT = []
newEquationF = []
for i in range(0, len(actEquation)):
newEquationT.append(actEquation[i].copy())
newEquationF.append(actEquation[i].copy())
variableToEval = variables[level]
tree.right = Tree()
i = -1
while i < len(newEquationT) - 1:
i = i + 1
literalValue = newEquationT[i].get(variableToEval)
if literalValue == None:
continue
if literalValue == 1:
del newEquationT[i]
i = i - 1
continue
else:
newEquationT[i].pop(variableToEval)
if len(newEquationT[i]) == 0:
tree.right.data = False
if len(newEquationT) == 0:
tree.right.data = True
return True
if(tree.right.data == None):
tree.right.data = newEquationT
if evaluateLevel(tree.right, level + 1, variables) == True:
return True
tree.left = Tree()
i = -1
while i < len(newEquationF) - 1:
i = i + 1
literalValue = newEquationF[i].get(variableToEval)
if literalValue == None:
continue
if literalValue == -1:
del newEquationF[i]
i = i - 1
continue
else:
newEquationF[i].pop(variableToEval)
if len(newEquationF[i]) == 0:
tree.left.data = False
if len(newEquationF) == 0:
tree.left.data = True
return True
if(tree.left.data == None):
tree.left.data = newEquationF
if evaluateLevel(tree.left, level + 1, variables) == True:
return True
return False
def BDD_SAT(equation):
clauses = equation.split("^")
variables = []
equationFormated = []
for i in range(0, len(clauses)):
equationFormated.append({})
actClause = clauses[i]
actClause = actClause.split("(")[1]
actClause = actClause.split(")")[0]
literals = actClause.split("V")
literals = list(filter(None, literals))
for j in range(0, len(literals)):
actLiteral = literals[j]
if(actLiteral[0] == "~"):
actVariable = actLiteral[1:]
value = -1
else:
actVariable = actLiteral
value = 1
index = -1
for k in range(0, len(variables)):
if(actVariable == variables[k]):
index = k
break
if(index == -1):
index = len(variables)
variables.append(actVariable)
equationFormated[i].update({actVariable : value})
nrVar = len(variables)
print(str(nrVar) + "\t", end = "")
root = Tree()
root.data = equationFormated
ans = evaluateLevel(root, 0, variables)
return ans
def printTree(tree, level):
if tree == None:
return
print("level", end ="")
for _ in range(0, level):
print("\t", end ="")
print(tree.data)
printTree(tree.left, level + 1)
printTree(tree.right, level + 1)
equation = input()
start_time = datetime.datetime.now()
ans = BDD_SAT(equation)
end_time = datetime.datetime.now()
time_diff = (end_time - start_time)
print(time_diff.total_seconds()) | true |
2b163dea0a10d4fca2eb2963cec1e8a11604734c | Python | team19hackathon2021/ChRIS_ultron_backEnd | /chris_backend/pacsfiles/models.py | UTF-8 | 4,241 | 2.625 | 3 | [
"MIT"
] | permissive |
from django.db import models
import django_filters
from django_filters.rest_framework import FilterSet
from core.utils import filter_files_by_n_slashes
class PACS(models.Model):
identifier = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.identifier
class PACSFile(models.Model):
creation_date = models.DateTimeField(auto_now_add=True)
fname = models.FileField(max_length=512, unique=True)
PatientID = models.CharField(max_length=100, db_index=True)
PatientName = models.CharField(max_length=150, blank=True)
PatientBirthDate = models.DateField(blank=True, null=True)
PatientAge = models.IntegerField(blank=True, null=True)
PatientSex = models.CharField(max_length=1, choices=[('M', 'Male'), ('F', 'Female')],
blank=True)
StudyDate = models.DateField(db_index=True)
AccessionNumber = models.CharField(max_length=100, blank=True, db_index=True)
Modality = models.CharField(max_length=15, blank=True)
ProtocolName = models.CharField(max_length=64, blank=True)
StudyInstanceUID = models.CharField(max_length=100)
StudyDescription = models.CharField(max_length=400, blank=True)
SeriesInstanceUID = models.CharField(max_length=100)
SeriesDescription = models.CharField(max_length=400, blank=True)
pacs = models.ForeignKey(PACS, on_delete=models.CASCADE)
class Meta:
ordering = ('-fname',)
def __str__(self):
return self.fname.name
class PACSFileFilter(FilterSet):
min_creation_date = django_filters.IsoDateTimeFilter(field_name='creation_date',
lookup_expr='gte')
max_creation_date = django_filters.IsoDateTimeFilter(field_name='creation_date',
lookup_expr='lte')
fname = django_filters.CharFilter(field_name='fname', lookup_expr='startswith')
fname_exact = django_filters.CharFilter(field_name='fname', lookup_expr='exact')
fname_icontains = django_filters.CharFilter(field_name='fname',
lookup_expr='icontains')
fname_nslashes = django_filters.CharFilter(method='filter_by_n_slashes')
PatientName = django_filters.CharFilter(field_name='PatientName',
lookup_expr='icontains')
ProtocolName = django_filters.CharFilter(field_name='ProtocolName',
lookup_expr='icontains')
StudyDescription = django_filters.CharFilter(field_name='StudyDescription',
lookup_expr='icontains')
SeriesDescription = django_filters.CharFilter(field_name='SeriesDescription',
lookup_expr='icontains')
pacs_identifier = django_filters.CharFilter(field_name='pacs__identifier',
lookup_expr='exact')
min_PatientAge = django_filters.NumberFilter(field_name='PatientAge',
lookup_expr='gte')
max_PatientAge = django_filters.NumberFilter(field_name='PatientAge',
lookup_expr='lte')
class Meta:
model = PACSFile
fields = ['id', 'min_creation_date', 'max_creation_date', 'fname', 'fname_exact',
'fname_icontains', 'fname_nslashes', 'PatientID', 'PatientName',
'PatientSex', 'PatientAge', 'min_PatientAge', 'max_PatientAge',
'PatientBirthDate', 'StudyDate', 'AccessionNumber', 'ProtocolName',
'StudyInstanceUID', 'StudyDescription', 'SeriesInstanceUID',
'SeriesDescription', 'pacs_identifier']
def filter_by_n_slashes(self, queryset, name, value):
"""
Custom method to return the files that have the queried number of slashes in
their fname property. If the queried number ends in 'u' or 'U' then only one
file per each last "folder" in the path is returned (useful to efficiently get
the list of immediate folders under the path).
"""
return filter_files_by_n_slashes(queryset, value)
| true |
a1afae8c693d1029dfb3d9ac9c326a2c0bb6910d | Python | meehawk/speechmix | /models/envnet.py | UTF-8 | 1,147 | 2.671875 | 3 | [] | no_license | """
Implementation of EnvNet [Tokozume and Harada, 2017]
opt.fs = 16000
opt.inputLength = 24014
"""
import chainer
import chainer.functions as F
import chainer.links as L
from models.convbnrelu import ConvBNReLU
class EnvNet(chainer.Chain):
def __init__(self, n_classes):
super(EnvNet, self).__init__(
conv1=ConvBNReLU(1, 40, (1, 8)),
conv2=ConvBNReLU(40, 40, (1, 8)),
conv3=ConvBNReLU(1, 50, (8, 13)),
conv4=ConvBNReLU(50, 50, (1, 5)),
fc5=L.Linear(50 * 11 * 14, 4096),
fc6=L.Linear(4096, 4096),
fc7=L.Linear(4096, n_classes)
)
self.train = True
def __call__(self, x):
h = self.conv1(x, self.train)
h = self.conv2(h, self.train)
h = F.max_pooling_2d(h, (1, 160))
h = F.swapaxes(h, 1, 2)
h = self.conv3(h, self.train)
h = F.max_pooling_2d(h, 3)
h = self.conv4(h, self.train)
h = F.max_pooling_2d(h, (1, 3))
h = F.dropout(F.relu(self.fc5(h)), train=self.train)
h = F.dropout(F.relu(self.fc6(h)), train=self.train)
return self.fc7(h)
| true |
658f27f7428fcde71a2e8762709b81a58c49ef74 | Python | sskimdev/cvmfs-docker-worker | /webhook.py | UTF-8 | 2,168 | 2.53125 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | import cvmfs
import re
def job(payload):
if "events" in payload:
# these events are from GitLab
rootdir = ''
for event in payload['events']:
if is_tag_event(event):
image_info = get_image_info(event)
if is_accepted_tag(image_info.tag):
cvmfs.publish_docker_image(image_info,
'ligo-containers.opensciencegrid.org', rootdir)
return True
elif "repository" in payload:
# these events are from DockerHub
rootdir = 'dockerhub'
namespace = payload['repository']['namespace']
project = payload['repository']['name']
digest = None
tag = payload['push_data']['tag']
image_info = cvmfs.ImageInfo('', namespace, project, digest, tag)
cvmfs.publish_docker_image(image_info,
'ligo-containers.opensciencegrid.org', rootdir)
return True
else:
return None
def is_tag_event(event):
try:
target = event['target']
return (event['action'] == "push" and "tag" in target and
target['mediaType'] == "application/vnd.docker.distribution.manifest.v2+json")
except:
return False
def is_accepted_tag(tag):
explicit_tags = [ 'latest', 'nightly', 'master', 'production']
# (1) matches arbirtary alphanumeric characters separated by periods
# (2) matches ISO dates (no time) with optional alpha appended
regex_tags = [ '^v?([0-9]+)\.([0-9]+)\.([0-9]+)(?:-([0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+[0-9A-Za-z-]+)?$',
'^\d{4}\-\d\d\-\d\d[a-zA-Z]?$' ]
if tag in explicit_tags:
return True
for regex_tag in regex_tags:
p = re.compile(regex_tag)
if p.match(tag):
return True
return False
def get_image_info(event):
try:
return cvmfs.ImageInfo(event['request']['host'],
event['target']['repository'].rpartition("/")[0],
event['target']['repository'].rpartition("/")[2],
event['target']['digest'],
event['target']['tag'])
except:
return None
| true |
cb0388cd5da5f112e4d7ba0aff32900d1e9cb34d | Python | kfiramar/Homework | /1.a.py | UTF-8 | 200 | 3.390625 | 3 | [] | no_license |
def sum():
reshima = input("enter num ")
sum2 = 0
while(reshima != 'stop'):
sum2 += int(reshima)
reshima = input("enter num 3")
print("the sum is:" + str(sum2))
sum() | true |
5d8be7e02270138ba79d97530673815e1dedc96d | Python | Raghurambsv/16April2019Questcode | /16Apr2019/[spyder]Validation_Script_for_TextClassficationModel.py | UTF-8 | 5,537 | 2.734375 | 3 | [] | no_license | #IMPORTING THE PACKAGES
import re
import glob
import pickle
import pandas as pd
import os
#part='Final_Model_'
part=''
#Load the Pickled Model to respective Target variables
loaded_model_origin = pickle.load(open('./PickleFiles/'+part+'Originator.pkl', 'rb'))
loaded_model_disc = pickle.load(open('./PickleFiles/'+part+'Discipline.pkl', 'rb'))
loaded_model_doc_type_and_subtype = pickle.load(open('./PickleFiles/'+part+'combi.pkl', 'rb'))
#Load the Pickled LabelEncoder files & Decode them respectively
Label_Org=pickle.load(open('./PickleFiles/labelencoder/'+part+'Orginator_LableEncoder.pkl','rb'))
Label_Disc=pickle.load(open('./PickleFiles/labelencoder/'+part+'Discipline_LableEncoder.pkl','rb'))
Label_DTST=pickle.load(open('./PickleFiles/labelencoder/'+part+'Type_and_Subtype_LableEncoder.pkl','rb'))
#Cleaning/Preprocessing the Data
def clean_text(text):
text=str(text)
text = text.lower()
text = re.sub(r'[^a-zA-Z0-9 \/]*','',text)
text = re.sub('[\.]{2,}','',text)
text = re.sub('[\-]{2,}','',text)
text = re.sub('[\_]{2,}','',text)
text = re.sub(r'[\s]+',' ',text)
text = [ word for word in text.split(' ') if not len(word) == 1]
text=str(text)
text = re.sub('\W', ' ', text)
text = re.sub('\s+', ' ', text)
text = text.strip(' ')
# print(text)
return text
#Predicting the TextFile Category for the Fresh Textfiles
df = pd.DataFrame(columns=['Originator', 'Discipline', 'Doc_Type_and_SubType', 'Originator Predicted','Discipline Predicted','TypeSubtype Predicted'])
df1 = pd.DataFrame(columns=['Originator', 'Discipline', 'Doc_Type_and_SubType', 'Originator Predicted','Discipline Predicted','TypeSubtype Predicted'])
print("\nThe text files considered for this run are as below:")
print("------------------------------------------------------")
count=0
for txtfile in os.listdir("./Fresh_Text_Files/"):
if txtfile.endswith(".txt"):
txtfile=os.path.join("./Fresh_Text_Files/", txtfile)
print(txtfile.split('/')[-1])
with open(txtfile,'r',encoding="utf-8") as file:
data=file.read().replace('\n', '')
data=clean_text(data)
data=pd.Series(data)
df1['Originator']=loaded_model_origin.predict(data)
df1['Discipline']=loaded_model_disc.predict(data)
df1['Doc_Type_and_SubType']=loaded_model_doc_type_and_subtype.predict(data)
df1['Originator_prob']= max(loaded_model_origin.predict_proba(data).round(2).tolist()[0])
df1['Discipline_prob']= max(loaded_model_disc.predict_proba(data).round(2).tolist()[0])
df1['Doc_Type_and_SubType_prob']= max(loaded_model_doc_type_and_subtype.predict_proba(data).round(2).tolist()[0])
df1['Originator Predicted']=df1['Originator'].apply(lambda x: Label_Org.inverse_transform(df1['Originator']))
df1['Discipline Predicted']=df1['Discipline'].apply(lambda x: 'Others' if x > 7 else Label_Disc.inverse_transform(df1['Discipline']))
df1['TypeSubtype Predicted']=df1['Doc_Type_and_SubType'].apply(lambda x: 'Others' if x > 7 else Label_DTST.inverse_transform(df1['Doc_Type_and_SubType']))
df1['FileName']=txtfile.split('/')[-1]
df=df.append(df1,sort=None)
count=count+1
print("##################################")
print("Total No of files processed :",count)
print("##################################")
#Reset the index
df.reset_index(drop=True,inplace=True)
dfinal = pd.DataFrame(columns=['Original_FileName','Originator Predicted','Discipline Predicted','TypeSubtype Predicted','Filename predicted',' <Originator-Confidence> ',' <Discipline-Confidence> ',' <TypeSubtype-Confidence> '])
def output_extract(text):
text=str(text)
text=text.replace("[","")
text=text.replace("]","")
text=text.replace("'","")
return str(text)
dfinal['Original_FileName']=df['FileName']
dfinal['Originator Predicted']=df['Originator Predicted'].apply(output_extract)
dfinal['Discipline Predicted']=df['Discipline Predicted'].apply(output_extract)
dfinal['TypeSubtype Predicted']=df['TypeSubtype Predicted'].apply(output_extract)
dfinal[' <Originator-Confidence> ']=df['Originator_prob']
dfinal[' <Discipline-Confidence> ']=df['Discipline_prob']
dfinal[' <TypeSubtype-Confidence> ']=df['Doc_Type_and_SubType_prob']
dfinal['Filename predicted']=""
for i, row in df.iterrows():
index=str(i)
dfinal.at[i,'Filename predicted'] = 'BIRF-'+ dfinal.at[i,'Originator Predicted'] +'-'+ dfinal.at[i,'Discipline Predicted'] +'-'+ dfinal.at[i,'TypeSubtype Predicted']+'-'+index+'.pdf'
dfinal.dropna(how='any')
dfinal[' <Originator-Confidence> ']=dfinal[' <Originator-Confidence> '].map(str)
dfinal[' <Originator-Confidence> '] = dfinal[['Originator Predicted', ' <Originator-Confidence> ']].apply(lambda x: '- '.join(x), axis=1)
dfinal[' <Discipline-Confidence> ']=dfinal[' <Discipline-Confidence> '].map(str)
dfinal[' <Discipline-Confidence> '] = dfinal[['Discipline Predicted', ' <Discipline-Confidence> ']].apply(lambda x: '- '.join(x), axis=1)
dfinal[' <TypeSubtype-Confidence> ']=dfinal[' <TypeSubtype-Confidence> '].map(str)
dfinal[' <TypeSubtype-Confidence> '] = dfinal[['TypeSubtype Predicted', ' <TypeSubtype-Confidence> ']].apply(lambda x: '- '.join(x), axis=1)
dfinal.to_csv('./Fresh_Text_Files/'+part+'Prediction_output.csv',index=False,encoding="utf-8")
| true |
84632829971bbd4f36ff7358a13efb0c55a2aff5 | Python | luyuehm/scrapy_v1 | /venv/lib/python3.7/site-packages/xlwings/pro/tables.py | UTF-8 | 964 | 2.625 | 3 | [] | no_license | try:
import pandas as pd
except ImportError:
pd = None
def update(self, data):
type_error_msg = 'Currently, only pandas DataFrames are supported by update'
if pd:
if not isinstance(data, pd.DataFrame):
raise TypeError(type_error_msg)
col_diff = len(self.range.columns) - len(data.columns) - len(data.index.names)
nrows = len(self.data_body_range.rows) if self.data_body_range else 1
row_diff = nrows - len(data.index)
if col_diff > 0:
self.range[:, len(self.range.columns) - col_diff:].delete()
if row_diff > 0 and self.data_body_range:
self.data_body_range[len(self.data_body_range.rows) - row_diff:, :].delete()
self.header_row_range.value = list(data.index.names) + list(data.columns)
self.range[1:, :].options(index=True, header=False).value = data
return self
else:
raise TypeError(type_error_msg)
| true |
8f41f4aa60c2e26f88ef9bba5758781a53d7184c | Python | hitechparadigm/Programming-for-Everybody | /10_00/10.py | UTF-8 | 503 | 3.109375 | 3 | [] | no_license | fhand = open('romeo.txt')
counts = dict()
for line in fhand:
words = line.split()
for word in words:
counts[word] = counts.get(word, 0) + 1
#print(counts)
lst = list()
for key, val in counts.items():
newtup = (val, key)
lst.append(newtup)
#print(lst)
lst = sorted(lst, reverse=True)
#print(lst)
#print(sorted([(v,k) for k,v in counts.items()], reverse=True))
for val, key in lst[:10]:
print(key, val)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
print(days[2])
| true |
95be5e0983ff1c4d4a9e20109308607e793e3c85 | Python | bundy-dns/bundy | /src/lib/python/bundy/server_common/datasrc_clients_mgr.py | UTF-8 | 7,864 | 2.59375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"BSL-1.0"
] | permissive | # Copyright (C) 2013 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import bundy.dns
import bundy.datasrc
import threading
import json
class ConfigError(Exception):
"""Exception class raised for data source configuration errors."""
pass
class DataSrcClientsMgr:
"""A container of data source client lists.
This class represents a set of bundy.datasrc.ConfigurableClientList
objects (currently per RR class), and provides APIs to configure
the lists and access to a specific list in a thread safe manner.
It is intended to be used by applications that refer to the global
'data_sources' module. The reconfigure() method can be called from
a configuration callback for the module of the application. The
get_client_list() method is a simple search method to get the configured
ConfigurableClientList object for a specified RR class (if any),
while still allowing a separate thread to reconfigure the entire lists.
"""
def __init__(self, use_cache=False):
"""Constructor.
In the initial implementation, most user applications of this class
are generally expected to NOT use in-memory cache; the only expected
exception is the memory (cache) manager, which, by definition,
needs to deal with in-memory data. In future, some more applications
such as outbound zone transfer may want to set it to True.
Parameter:
use_cache (bool): If set to True, enable in-memory cache on
(re)configuration.
"""
self.__use_cache = use_cache
# Map from RRClass to ConfigurableClientList. Resetting this map
# is protected by __map_lock. Note that this lock doesn't protect
# "updates" of the map content (currently it's not a problem, but
# if and when we support more operations such as reloading
# particular zones in in-memory cache, remember that there will have
# to be an additional layer of protection).
self.__clients_map = {}
self.__map_lock = threading.Lock()
# The generation ID of the configuration corresponding to
# current __clinets_map.
self.__gen_id = None
def get_clients_map(self):
"""Returns a dict from RR class to ConfigurableClientList with gen ID.
It corresponds to the generation of data source configuration at the
time of the call. It can be safely called while reconfigure() is
called from another thread.
The mapping of the dict should be considered "frozen"; the caller
shouldn't modify the mapping (it can use the mapped objects in a
way modifying its internal state).
Note: in a future version we may also need to return the
"generation ID" of the corresponding configuration so the caller
application can handle migration between generations gradually.
"""
with self.__map_lock:
return (self.__gen_id, self.__clients_map)
def get_client_list(self, rrclass):
"""Return the configured ConfigurableClientList for the RR class.
If no client list is configured for the specified RR class, it
returns None.
This method should not raise an exception as long as the parameter
is of valid type.
This method can be safely called from a thread even if a different
thread is calling reconfigure(). Also, it's safe for the caller
to use the returned list even if reconfigure() is called while or
after the call to this thread.
Note that this class does not protect further access to the returned
list from multiple threads; it's the caller's responsbility to make
such access thread safe. In general, the find() method on the list
and the use of ZoneFinder created by a DataSourceClient in the list
cannot be done by multiple threads without explicit synchronization.
On the other hand, multiple threads can create and use ZoneUpdater,
ZoneIterator, or ZoneJournalReader on a DataSourceClient in parallel.
Parameter:
rrclass (bundy.dns.RRClass): the RR class of the ConfigurableClientList
to be returned.
"""
with self.__map_lock:
client_list = self.__clients_map.get(rrclass)
return client_list
def reconfigure(self, new_config, config_data):
"""(Re)configure the set of client lists.
This method takes a new set of data source configuration, builds
a new set of ConfigurableClientList objects corresponding to the
configuration, and replaces the internal set with the newly built
one. Its parameter is expected to be the "new configuration"
parameter of a configuration update callback for the global
"data_sources" module. It should match the configuration data
of the module spec (see the datasrc.spec file).
Any error in reconfiguration is converted to a ConfigError
exception and is raised from the method. This method guarantees
strong exception safety: unless building a new set for the new
configuration is fully completed, the old set is intact.
This method can be called from a thread while some other thread
is calling get_client_list() and using the result (see
the description of get_client_list()). In general, however,
only one thread can call this method at one time; while data
integrity will still be preserved, the ordering of the change
will not be guaranteed if multiple threads call this method
at the same time.
Parameter:
new_config (dict): configuration data for the data_sources module
(actually unused in this method).
config_data (bundy.config.ConfigData): the latest full config data
for the data_sources module. Usually the second parameter of
the (remote) configuration update callback for the module.
"""
try:
new_map = {}
# We only refer to config_data, not new_config (diff from the
# previous). the latter may be empty for the initial default
# configuration while the former works for all cases.
for rrclass_cfg, class_cfg in \
config_data.get_value('classes')[0].items():
rrclass = bundy.dns.RRClass(rrclass_cfg)
new_client_list = bundy.datasrc.ConfigurableClientList(rrclass)
new_client_list.configure(json.dumps(class_cfg),
self.__use_cache)
new_map[rrclass] = new_client_list
with self.__map_lock:
self.__clients_map = new_map
self.__gen_id = config_data.get_value('_generation_id')[0]
except Exception as ex:
# Catch all types of exceptions as a whole: there won't be much
# granularity for exceptions raised from the C++ module anyway.
raise ConfigError(ex)
| true |
c705ea009d61cf50191cabf5d84aa2dc52da8bf8 | Python | m-mohsin-zafar/mr-chef | /mr-chef-pi/mr-chef/RecipeLoader/__init__.py | UTF-8 | 539 | 2.671875 | 3 | [] | no_license | from RecipeLoader import Loader
if __name__ == '__main__':
recipe = Loader.Recipe_Loader()
recipe.load_recipe('test_recipe')
instructions = recipe.instructions[0].split(",")
for x in range(instructions.__len__()):
if instructions[x].split(" ")[0] == "add":
print(recipe.ing_angles[instructions[x].split(" ")[1]].split(":"))
elif instructions[x].split(" ")[0] == "switch" or instructions[x].split(" ")[0] == "place":
print(recipe.utn_angles[instructions[x].split(" ")[1]].split(":"))
| true |
af8bf999286e7ca5dd64e1b60c9863f640bc7104 | Python | jiaziming/new-old | /day4/re正则表达式.py | UTF-8 | 460 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python
# -*-coding:utf-8-*-
import re
#(pattern,date_source)
# 规则 数据源
m = re.match('ab','abasdqwe12easd')
#print(m.group())
m = re.match('[0-9]','1asdnio12a')
m = re.match('[0-9]{0,15}','12893ndmao12')
m = re.match('[0-9]{10}','12893ndmao12')
m = re.findall('[0-9]{1,10}','12893ndmao12')
m = re.findall('[a-zA-Z]{1,10}','12893ndmao12')
m = re.findall(".*",'12893ndmao12')
m = re.findall(".+",'12893ndmao12')
if m:
print(m) | true |
7630fe39517ea75e3a72fda5b71bb01045f513e1 | Python | andyhou2000/exercises | /chapter-5/ex-5-2.py | UTF-8 | 2,023 | 4.34375 | 4 | [] | no_license | # Programming Exercise 5-2
#
# Program to calculate final purchase details.
# This program takes a purchase amount from a user,
# then calculates state tax, county tax and total tax,
# and passes them to a function to be totaled
# and displayed
# Global constants for the state and county tax rates
# define the main function
# Define local float variables for purchase, state tax and county tax
# Get the purchase amount from the user
# Calculate the state tax using the global constant for state tax rate
# Calculate the county tax using the global constant for county tax rate
# Call the sale details function, passing the purchase, state tax and county tax
# define a function to display purchase details
# this function accepts purchase, stateTax, and countyTax as arguments,
# calculates the total tax and sale total,
# then displays the purchase details
# Define local float variables for total tax and sale total
# Calculate the total tax
# Calculate the total sale
# Display the purchase details, including purchase, state tax, county tax,
# total tax, and sale total, each on a line. Format floats to 2 decimal places.
# Call the main function to start the program.
stateTaxRate = 0.06
stateTaxRate = float(stateTaxRate)
countyTaxRate = 0.02
countyTaxRate = float(countyTaxRate)
def main():
purchase = input("Input purchase amount: $")
purchase = float(purchase)
sale_details(purchase)
def sale_details(purchase):
state_tax_on_purchase = purchase * stateTaxRate
county_tax_on_purchase = purchase * countyTaxRate
total_tax = state_tax_on_purchase + county_tax_on_purchase
sale_total = total_tax + purchase
print("State tax: $", format(state_tax_on_purchase,'.2f'))
print("County tax: $", format(county_tax_on_purchase,'.2f'))
print("Total tax: $", format(total_tax,'.2f'))
print("Sale total: $", format(sale_total,'.2f'))
main() | true |
6a831000fd96dc19cc264f553fa441b396092704 | Python | ZhiyuSun/leetcode-practice | /1001-/1254_统计封闭岛屿的数目.py | UTF-8 | 1,468 | 3.46875 | 3 | [] | no_license | """
有一个二维矩阵 grid ,每个位置要么是陆地(记号为 0 )要么是水域(记号为 1 )。
我们从一块陆地出发,每次可以往上下左右 4 个方向相邻区域走,能走到的所有陆地区域,我们将其称为一座「岛屿」。
如果一座岛屿 完全 由水域包围,即陆地边缘上下左右所有相邻区域都是水域,那么我们将其称为 「封闭岛屿」。
请返回封闭岛屿的数目。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/number-of-closed-islands
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
# 2021.04.19 直接抄了题解,先处理边界,后处理内部
class Solution:
def closedIsland(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
def dfs(x, y):
if grid[x][y] == 1:
return
grid[x][y] = 1
for mx, my in [(x-1, y), (x+1, y), (x, y-1), (x, y+1)]:
if 0 <= mx < m and 0 <= my < n:
dfs(mx, my)
for i in range(m):
dfs(i, 0)
dfs(i, n-1)
for j in range(n):
dfs(0, j)
dfs(m-1, j)
ans = 0
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
dfs(i, j)
ans += 1
return ans
| true |
21eed12ac77a48a101b5d9ce17b11a37bb5b89f5 | Python | tuepsky/BackPropLab | /src/BplTraining.py | UTF-8 | 13,411 | 2.609375 | 3 | [] | no_license | import tkinter as tk
import tkinter.scrolledtext as tkscrolled
import numpy as np
import time
import matplotlib.pyplot as plt
import BplGlobal as g
from BplNeuroNet import NeuronNet
class Training(tk.Frame):
column_1 = 20
column_2 = column_1 + 150
column_3 = column_2 + 110
column_4 = column_3 + 150
column_5 = column_4 + 110
column_6 = column_5 + 140
column_7 = column_6 + 60
lineSpace = 40
header_line = 10
row_1 = 60
def run_training(self):
g.gui.update_status('Training in progress, please wait!')
# Check parameter
try:
alpha = float(self.alpha.get())
except ValueError:
tk.messagebox.showinfo('Setup error:', 'Alpha has an invalid value')
return
try:
epochs = int(self.epochs.get())
except ValueError:
tk.messagebox.showinfo('Setup error:', 'Epochs has an invalid value')
return
try:
hidden_layer_size = int(self.hiddenLayerSize.get())
except ValueError:
tk.messagebox.showinfo('Setup error:', 'Hidden layer size has an invalid value')
return
try:
output_layer_size = int(g.outputLayerSize.get())
except ValueError:
tk.messagebox.showinfo('Setup error:', 'Output layer size has an invalid value')
return
try:
random_seed = int(self.randomSeed.get())
except ValueError:
tk.messagebox.showinfo('Setup error:', 'Random seed has an invalid value')
return
if not g.neuronNet:
num_rows = int(g.numRows.get())
num_cols = int(g.numCols.get())
g.neuronNet = NeuronNet(
num_rows * num_cols, # = input_layer_size \
hidden_layer_size,
output_layer_size,
random_seed)
all_pattern = g.allTrainingPattern
self.all_errors = []
start_time = time.clock()
for e in range(epochs):
self.current_epoch.set(str(e+1))
self.update()
errors = g.neuronNet.train(all_pattern, alpha)
self.all_errors.append(errors)
elapsed = time.clock() - start_time
self.training_time.set(int(elapsed))
self.update()
self.last_error.set("%6.4f" % self.all_errors[-1])
g.gui.update_status('')
def run_test(self):
if len(g.allTestPattern) == 0:
tk.messagebox.showinfo('Cannot run test:', 'No test data loaded')
return
all_pattern = g.allTestPattern
pattern_index = 1
failing_pattern_indexes = []
for p in all_pattern:
passed = g.neuronNet.test(p)
if not passed:
failing_pattern_indexes.append(str(pattern_index))
pattern_index += 1
failure_rate = len(failing_pattern_indexes) / len(all_pattern) * 100
self.performance.set("%4.2f" % (100 - failure_rate) + "%")
self.failure_rate.set("%4.2f" % failure_rate + "%")
failing_pattern = ", ".join(failing_pattern_indexes)
self.text_value_failing_records.config(state=tk.NORMAL)
self.text_value_failing_records.delete(1.0, tk.END)
self.text_value_failing_records.insert(tk.END, failing_pattern)
self.text_value_failing_records.config(state=tk.DISABLED)
def show_error_curve(self):
X = np.linspace(1, len(self.all_errors), len(self.all_errors))
plt.plot(X, self.all_errors)
plt.show()
def reset(self):
g.neuronNet = None
self.all_errors = None
self.current_epoch.set("")
self.last_error.set("")
self.training_time.set("")
def __init__(self, notebook):
g.numRows = tk.StringVar()
g.numCols = tk.StringVar()
g.outputLayerSize = tk.StringVar()
g.numberTestRecords = tk.StringVar()
g.numberTrainingRecords = tk.StringVar()
self.alpha = tk.StringVar()
self.alpha.set("1")
self.epochs = tk.StringVar()
self.epochs.set("50")
self.randomSeed = tk.StringVar()
self.randomSeed.set("1")
self.hiddenLayerSize = tk.StringVar()
self.hiddenLayerSize.set("20")
self.neuron_net_initialized = False
self.all_errors = None
self.current_epoch = tk.StringVar()
self.last_error = tk.StringVar()
self.performance = tk.StringVar()
self.failure_rate = tk.StringVar()
self.training_time = tk.StringVar()
super(Training, self).__init__(notebook, background=g.bgDark)
self.rows = [self.row_1 + n * self.lineSpace for n in range(10)]
# Left Column
lbl_header_left = tk.Label(self, text='Setup', font=g.fontTitle, background=g.bgDark)
lbl_header_left.place(x=self.column_1, y=self.header_line)
# Input Layer Width
lbl_input_layer_width = tk.Label(self, text='Input Layer Width', font=g.fontLabel, background=g.bgDark)
lbl_input_layer_width.place(x=self.column_1, y=self.rows[0])
lbl_value_input_layer_width = tk.Label(self, textvariable=g.numCols,
font=g.fontLabel, width=5, background=g.bgLight)
lbl_value_input_layer_width.place(x=self.column_2, y=self.rows[0])
# Input Layer Height
lbl_input_layer_height = tk.Label(self, text='Input Layer Height', font=g.fontLabel, background=g.bgDark)
lbl_input_layer_height.place(x=self.column_1, y=self.rows[1])
lbl_value_input_layer_height = tk.Label(self, textvariable=g.numRows,
font=g.fontLabel, width=5, background=g.bgLight)
lbl_value_input_layer_height.place(x=self.column_2, y=self.rows[1])
# Output layer size
lbl_output_layer_size = tk.Label(self, text='Output Layer Size', font=g.fontLabel, background=g.bgDark)
lbl_output_layer_size.place(x=self.column_1, y=self.rows[2])
val_output_layer_size = tk.Label(self, textvariable=g.outputLayerSize, justify=tk.CENTER,
font=g.fontLabel, width=5, background=g.bgLight)
val_output_layer_size.place(x=self.column_2, y=self.rows[2])
# Hidden layer size
lbl_hidden_layer_size = tk.Label(self, text='Hidden Layer Size', font=g.fontLabel, background=g.bgDark)
lbl_hidden_layer_size.place(x=self.column_1, y=self.rows[3])
e_hidden_layer_size = tk.Entry(self, textvariable=self.hiddenLayerSize, justify=tk.CENTER,
font=g.fontLabel, width=5, background=g.bgBlue)
e_hidden_layer_size.place(x=self.column_2, y=self.rows[3])
# Alpha
lbl_step_width = tk.Label(self, text='Step Width (alpha)', font=g.fontLabel, background=g.bgDark)
lbl_step_width.place(x=self.column_1, y=self.rows[4])
e_alpha = tk.Entry(self, textvariable=self.alpha, justify=tk.CENTER,
font=g.fontLabel, width=5, background=g.bgBlue)
e_alpha.place(x=self.column_2, y=self.rows[4])
# Epochs
lbl_epochs = tk.Label(self, text='Epochs', font=g.fontLabel, background=g.bgDark)
lbl_epochs.place(x=self.column_1, y=self.rows[5])
e_epochs = tk.Entry(self, textvariable=self.epochs, justify=tk.CENTER,
font=g.fontLabel, width=5, background=g.bgBlue)
e_epochs.place(x=self.column_2, y=self.rows[5])
# Random Seed
lbl_random = tk.Label(self, text='Random Seed', font=g.fontLabel, background=g.bgDark)
lbl_random.place(x=self.column_1, y=self.rows[6])
e_random = tk.Entry(self, textvariable=self.randomSeed, justify=tk.CENTER,
font=g.fontLabel, width=5, background=g.bgBlue)
e_random.place(x=self.column_2, y=self.rows[6])
# Middle Column ==============================================================
lbl_header_middle = tk.Label(self, text='Train', font=g.fontTitle, background=g.bgDark)
lbl_header_middle.place(x=self.column_3, y=self.header_line)
# Run button
button_run = tk.Button(self, text="Run Training", width=21,
font=g.fontLabel, background=g.bgDark, command=self.run_training)
button_run.place(x=self.column_3, y=self.rows[0] - 5)
# Number of training records
lbl_failure_rate = tk.Label(self, text='Training Records', font=g.fontLabel, background=g.bgDark)
lbl_failure_rate.place(x=self.column_3, y=self.rows[1])
lbl_value_failure_rate = tk.Label(self, textvariable=g.numberTrainingRecords,
font=g.fontLabel, width=5, background=g.bgLight)
lbl_value_failure_rate.place(x=self.column_4, y=self.rows[1])
# Current Epoch
lbl_current_epoch = tk.Label(self, text='Current Epoch', font=g.fontLabel, background=g.bgDark)
lbl_current_epoch.place(x=self.column_3, y=self.rows[2])
lbl_value_current_epoch = tk.Label(self, textvariable=self.current_epoch,
font=g.fontLabel, width=5, background=g.bgLight)
lbl_value_current_epoch.place(x=self.column_4, y=self.rows[2])
# Last error
lbl_last_error = tk.Label(self, text='Last Error', font=g.fontLabel, background=g.bgDark)
lbl_last_error.place(x=self.column_3, y=self.rows[3])
lbl_value_last_error = tk.Label(self, textvariable=self.last_error,
font=g.fontLabel, width=5, background=g.bgLight)
lbl_value_last_error.place(x=self.column_4, y=self.rows[3])
# Training duration
lbl_training_duration = tk.Label(self, text='Time Spent [sec]', font=g.fontLabel, background=g.bgDark)
lbl_training_duration.place(x=self.column_3, y=self.rows[4])
lbl_value_training_duration = tk.Label(self, textvariable=self.training_time,
font=g.fontLabel, width=5, background=g.bgLight)
lbl_value_training_duration.place(x=self.column_4, y=self.rows[4])
# Please stand by
self.lbl_please_stand_by = tk.Label(self, text='', fg='yellow', font=g.fontLabel, background=g.bgDark)
self.lbl_please_stand_by.place(x=self.column_3, y=self.rows[5])
# Show Error Curve
button_error_curve = tk.Button(self, text="Show Error Curve", width=21, font=g.fontLabel, background=g.bgDark,
command=self.show_error_curve)
button_error_curve.place(x=self.column_3, y=self.rows[6])
# Reset button
button_reset = tk.Button(self, text="Reset Neural Network", width=21,
font=g.fontLabel, background=g.bgDark, command=self.reset)
button_reset.place(x=self.column_3, y=self.rows[7])
# Right Column ==============================================================
lbl_header_right = tk.Label(self, text='Test', font=g.fontTitle, background=g.bgDark)
lbl_header_right.place(x=self.column_5, y=self.header_line)
# Run button
button_run = tk.Button(self, text="Run Test", width=21,
font=g.fontLabel, background=g.bgDark, command=self.run_test)
button_run.place(x=self.column_5, y=self.rows[0] - 5)
# Number of test records
lbl_failure_rate = tk.Label(self, text='Test Records', font=g.fontLabel, background=g.bgDark)
lbl_failure_rate.place(x=self.column_5, y=self.rows[1])
lbl_value_failure_rate = tk.Label(self, textvariable=g.numberTestRecords,
font=g.fontLabel, width=6, background=g.bgLight)
lbl_value_failure_rate.place(x=self.column_6, y=self.rows[1])
# Performance
lbl_performance = tk.Label(self, text='Performance', font=g.fontLabel, background=g.bgDark)
lbl_performance.place(x=self.column_5, y=self.rows[2])
lbl_value_performance = tk.Label(self, textvariable=self.performance,
font=g.fontLabel, width=6, background=g.bgLight)
lbl_value_performance.place(x=self.column_6, y=self.rows[2])
# Failure Rate
lbl_failure_rate = tk.Label(self, text='Failure Rate', font=g.fontLabel, background=g.bgDark)
lbl_failure_rate.place(x=self.column_5, y=self.rows[3])
lbl_value_failure_rate = tk.Label(self, textvariable=self.failure_rate,
font=g.fontLabel, width=6, background=g.bgLight)
lbl_value_failure_rate.place(x=self.column_6, y=self.rows[3])
# Failing Records
lbl_failing_records = tk.Label(self, text='Failing Records:', font=g.fontLabel, background=g.bgDark)
lbl_failing_records.place(x=self.column_5, y=self.rows[4])
#scrollbar = tk.Scrollbar(self)
#scrollbar.place(x=self.column_7, y=self.rows[5])
self.text_value_failing_records = \
tkscrolled.ScrolledText(self, font=g.fontLabel, height=20, width=22, background=g.bgLight,
relief=tk.FLAT, state=tk.DISABLED, wrap=tk.WORD)
self.text_value_failing_records.place(x=self.column_5, y=self.rows[5])
#scrollbar.config(command=self.text_value_failing_records.yview)
| true |
53952c8d9be5b051a4e3584736fe606c8f4abc95 | Python | niemitee/mooc-ohjelmointi-21 | /osa06-03_matriisi/src/matriisi.py | UTF-8 | 900 | 3.1875 | 3 | [] | no_license | # tee ratkaisu tänne
def lue_luvut():
with open('matriisi.txt') as tiedosto:
luvut = []
for rivi in tiedosto:
lukurivi = []
rivi = rivi.split(',')
for luku in rivi:
lukurivi.append(int(luku))
luvut.append(lukurivi)
return luvut
def yhdista(luvut: list):
lista = []
for rivi in luvut:
lista += rivi
return lista
def rivisummat():
luvut = lue_luvut()
rivisummat = []
for rivi in luvut:
rivisummat.append(sum(rivi))
return rivisummat
def summa():
lista = yhdista(lue_luvut())
return sum(lista)
def maksimi():
lista = yhdista(lue_luvut())
return max(lista)
if __name__ == '__main__':
print(summa())
print('#################################')
print(rivisummat())
print('#################################')
print(maksimi())
| true |
ea2f8204cb62b9ddd873d00031e8a519c00fdb0b | Python | andriisoldatenko/fan | /aoc22/day_013/main.py | UTF-8 | 834 | 3.03125 | 3 | [
"MIT"
] | permissive | import json
FILE = open("input.txt")
def compare(x, y):
while len(x) > 0:
ll = x.pop(0)
rr = y.pop(0)
if isinstance(ll, int) and isinstance(rr, int) and ll < rr:
print("in the right order")
break
def main(file):
lines = [line.strip() for line in file if line != "\n"]
index = 1
results = []
for left, right in [lines[i:i + 2] for i in range(0, len(lines), 2)]:
left_p, right_p = json.loads(left), json.loads(right)
while len(left_p) > 0:
ll = left_p.pop(0)
rr = right_p.pop(0)
if isinstance(ll, int) and isinstance(rr, int) and ll < rr:
print("in the right order")
break
if isinstance(ll, list)
# start = time.time()
print(main(FILE))
# end = time.time()
# print(end - start)
| true |
5e80eb798552e820e9deef739cbee1986b723b46 | Python | lincolnge/parsing | /parse.py | UTF-8 | 5,529 | 2.671875 | 3 | [] | no_license | # coding:utf8
dictFIRST = {}
# read test case file and grammer file, the return value is content of file
def readFile(fileName):
text_file = open(fileName, "r")
StrLine = ""
# text_file = open("testCase2.txt", "r")
for line in text_file:
print line
StrLine = StrLine + line
text_file.close()
return StrLine
# split string
def splitString(strSplit):
# split the str,if not specified the separator ,the whitespace is a separator,items is a sequence
# items = testCase_Str.split()
# print items
import re
# print re.split(' ', testCase_Str)
# to split if you find the symbols
splitString = re.split('(\n| |,|==|<=|>=|=|;|<|>|\(|\)|\*|\|\||{|}|\[|\]|\||!=|!|/|-|\+|\xa1\xfa)', strSplit)
# the kind of spliting above will have \n and space, so using the following way deal with it
sep = " "
splitString = sep.join(splitString)
# in order to spliting context free grammer
# splitString = splitString.split('\|')
# splitString = "\n".join(splitString)
# print splitString
return splitString
# first part====================
def scanner():
testCase_Str = readFile("testCase1.txt") # read first test case
# testCase_Str = readFile("testCase2.txt") # read second test case
splitString(testCase_Str)
splitSpace = splitString.split()
print splitSpace # the result of first part
# first part=====================
def readCFG():
print "="*25+" First LL(1) " + "="*25
strGram = readFile("testGrammer.txt")
# strGram = splitString("testGrammer.txt")
non_terminateStr = []
terminateStr = []
CFG_eachLine = []
lastCFG_eachLine = []
dictCFG = {}
singleton = 1
start_non_ter = ""
strGram_allLine = strGram.split('\n')
print "="*50
# print len(strGram_allLine)
for x_strGram_allLine in xrange(0, len(strGram_allLine)-1):
strGram_allLine[x_strGram_allLine] = splitString(strGram_allLine[x_strGram_allLine])
CFG_eachLine = strGram_allLine[x_strGram_allLine].split()
if start_non_ter == "": # to initialize start
start_non_ter = CFG_eachLine[0]
terminateStr = [[]]
for x_CFG_eachLine in xrange(1, len(CFG_eachLine)): # put eachLine into terminate
if CFG_eachLine[x_CFG_eachLine] == '\xa1\xfa':
pass
else:
if CFG_eachLine[x_CFG_eachLine] == '|':
terminateStr.append([])
else:
terminateStr[len(terminateStr)-1].append(CFG_eachLine[x_CFG_eachLine])
# print terminateStr # it has something beautiful
if CFG_eachLine[0] == '|':
lastCFG_eachLine = strGram_allLine[len(dictCFG.keys())-1].split()
if singleton == 1: # only one time lock
dictCFG[lastCFG_eachLine[0]] = dictCFG[lastCFG_eachLine[0]]
singleton = 0
dictCFG[lastCFG_eachLine[0]] += terminateStr
# print dictCFG[lastCFG_eachLine[0]] # output
dictCFG.update({lastCFG_eachLine[0]: dictCFG[lastCFG_eachLine[0]]})
else:
dictCFG.update({CFG_eachLine[0]: terminateStr})
non_terminateStr = dictCFG.keys()
# print non_terminateStr
# print dictCFG
# print start_non_ter
return dictCFG
def determingFIRST(dictCFG, input_Nonterm, insert_First, listFirst): # input_Nonterm is FIRST(X), insert_First is FIRST(Y1)
# the fourth condition of FIRST is not complete
# listFirst = []
eps = 0
non_terminateStr = dictCFG.keys()
# for x_len_Nonter in xrange(0, len(dictCFG.keys())):
# for x_len_orStr in xrange(0, len(dictCFG[dictCFG.keys()[x_len_Nonter]])):
for x_len_orStr in xrange(0, len(dictCFG[insert_First])):
if dictCFG[insert_First][x_len_orStr][0] == '\xa6\xc5':
eps += 1
for x_len_orStr in xrange(0, len(dictCFG[insert_First])):
checkTerminateStr = dictCFG[insert_First][x_len_orStr][0]
if eps:
try:
determingFIRST(dictCFG, input_Nonterm, dictCFG[insert_First][x_len_orStr][eps], listFirst)
except:
pass
if checkTerminateStr == insert_First:
pass
else:
# if dictCFG[insert_First][x_len_orStr][0] == '\xa6\xc5':
# determingFIRST(dictCFG, input_Nonterm, checkTerminateStr, listFirst)
if checkTerminateStr not in non_terminateStr:
# listFirst += [dictCFG[dictCFG.keys()[x_len_Nonter]][x_len_orStr][0]]
if (input_Nonterm != insert_First) & (dictCFG[insert_First][x_len_orStr][0] == '\xa6\xc5'): # deal with epsilon
pass
else:
listFirst += [dictCFG[insert_First][x_len_orStr][0]]
else:
# print checkTerminateStr
# print listFirst
determingFIRST(dictCFG, input_Nonterm, checkTerminateStr, listFirst)
# print dictCFG.keys()[x_len_Nonter]
listFirst = list(set(listFirst))
# dictFIRST.update({dictCFG.keys()[x_len_Nonter]: listFirst})
dictFIRST.update({input_Nonterm: listFirst})
listFirst = []
# print listFirst
eps = 0
return dictFIRST
def firstLL1():
dictCFG = readCFG()
# dictFIRST = {}
non_terminateStr = dictCFG.keys()
listFirst = []
print dictCFG
# print len(dictCFG.keys())
# print dictCFG.values()[0][0][0] # first [] is non_ter, second[] is |, third is FIRST
# print non_terminateStr
# print dictCFG[dictCFG.keys()[5]]
print "="*50
# determingFIRST(dictCFG[dictCFG.keys()[0]])
for x_len_Nonter in xrange(0, len(dictCFG.keys())):
# print dictCFG.keys()[x_len_Nonter]
dictFIRST = determingFIRST(dictCFG, dictCFG.keys()[x_len_Nonter], dictCFG.keys()[x_len_Nonter], listFirst)
listFirst = []
print dictFIRST
def followLL1():
pass
def parse_table():
pass
def parseLL1():
pass
if __name__ == '__main__':
# scanner()
firstLL1()
# print splitString("testGrammer.txt")
| true |
2a0983c6d7f7cb54c2b9029b3df702109d66f0ad | Python | kagomesakura/range | /range.py | UTF-8 | 93 | 3.59375 | 4 | [] | no_license | #division makes float, not int.
divisor = 2
for num in range(0, 10, 2):
print(num/divisor)
| true |
d82b56290fa98687940e049c85e5489d181ac617 | Python | rahulc97/final-project | /Gps/server_socket.py | UTF-8 | 1,171 | 2.53125 | 3 | [] | no_license | ### first run cli_soc.py progrm in p3 thonny
### next run server_socket.py in p2 terminal
import binascii
import socket
import struct
import sys
import serial
import time
import string
import pynmea2
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('localhost', 10000)
sock.connect(server_address)
while True:
port="/dev/ttyAMA0"
ser=serial.Serial(port, baudrate=9600, timeout=0.5)
dataout = pynmea2.NMEAStreamReader()
newdata=ser.readline()
if newdata[0:6] == "$GPRMC":
print("hasi")
newmsg=pynmea2.parse(newdata)
lat=newmsg.latitude
lng=newmsg.longitude
gps = "Latitude=" + str(lat) + "and Longitude=" + str(lng)
print(gps)
i=0
while i<1000000:
i+=1
values = (lat,lng)
packer = struct.Struct('f f')
packed_data = packer.pack(*values)
#try:
# Send data
print >>sys.stderr, 'sending "%s"' % binascii.hexlify(packed_data), values
sock.sendall(packed_data)
# finally:
# print >>sys.stderr, 'closing socket'
#sock.close()
| true |
2b71a5e72b9067c4d3f3730a8b9acbdb1c83f8b3 | Python | CliffordFung/Algorithms-Questions | /DP - 2. RodCutting.py | UTF-8 | 1,316 | 3.390625 | 3 | [] | no_license | def main():
print(rodCuttingTopDown([1, 2, 3, 4, 5], [2, 6, 7, 10, 13], 5))
print(rodCuttingBottomUp([1, 2, 3, 4, 5], [2, 6, 7, 10, 13], 5))
def rodCuttingTopDown(lengths, prices, n):
dp = [[-1 for _ in range(len(lengths) + 1)] for item in prices]
return rodCuttingTopDownRecursive(dp, lengths, prices, n, 0)
def rodCuttingTopDownRecursive(dp, lengths, prices, n, i):
if len(prices) != len(lengths) or n == 0 or i >= n:
return 0
if dp[i][n] == -1:
profit1 = 0
profit2 = 0
if lengths[i] <= n:
profit1 = prices[i] + rodCuttingTopDownRecursive(dp, lengths, prices, n - i - 1, i)
profit2 = rodCuttingTopDownRecursive(dp, lengths, prices, n, i + 1)
dp[i][n] = max(profit1, profit2)
return dp[i][n]
def rodCuttingBottomUp(lengths, prices, n):
if len(prices) != len(lengths) or n == 0:
return 0
dp = [[0 for _ in range(n + 1)] for _ in prices]
for i in range(n):
for l in range(n + 1):
profit1, profit2 = 0, 0
if lengths[i] <= l:
profit1 = prices[i] + dp[i][l - lengths[i]]
if i > 0:
profit2 = dp[i - 1][l]
dp[i][l] = max(profit1, profit2)
return dp[n-1][n]
if __name__ == "__main__":
main()
| true |
283bff8b92c1dd78be91ee56706b9b0516e62498 | Python | harcel/PyDataScienceIntroNL | /uitwerkingen/4-decisiontree.py | UTF-8 | 754 | 2.828125 | 3 | [
"MIT"
] | permissive | titanic = pd.read_csv(os.path.join('data', 'titanic3.csv'))
print(titanic.head())
labels = titanic.survived.values
features = titanic[['pclass', 'sex', 'age', 'sibsp', 'parch', 'fare', 'embarked']]
features_dummies = pd.get_dummies(features, columns=['pclass', 'sex', 'embarked'])
features_dummies.head()
data = features_dummies.values
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.preprocessing import Imputer
imp = Imputer()
# Fitten en transformeren kan in 1 stap!
decent_data = imp.fit_transform(data)
# Wat anders ook werkt
# imp.fit(data)
# decent_data = imp.transform(data)
tree = DecisionTreeClassifier()
tree.fit(decent_data, labels)
print("R**2 van de decision tree:", tree.score(decent_data, labels))
| true |
880ec4e78bfd37aa6eac0dda4c202edd56f3f5c2 | Python | soumen29dec/Soumen-s-work | /accidents_2017_barca_rev.py | UTF-8 | 26,210 | 2.765625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Sat May 18 15:48:10 2019
@author: Soumen Sarkar
"""
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import warnings
warnings.filterwarnings("ignore")
import io
import plotly.offline as py#visualization
py.init_notebook_mode(connected=True)#visulatization
import plotly.graph_objs as go#visualization
import plotly.tools as tls#visualization
import plotly.figure_factory as ff
accident=pd.read_csv('D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/accidents_2017.csv')
accident.head()
#accident.columns=[col.replace(' ', '_').lower() for col in accident.columns]
accident.columns=[col.replace(' ', '_') for col in accident.columns]
print("Rows :" ,accident.shape[0])
print("Columns :" ,accident.shape[1])
print("\nFeatures : \n", accident.columns.tolist())
print("\nMissing Values: ", accident.isnull().sum().values.sum())
print("\nUnique Values: \n", accident.nunique())
df_jan=accident[accident.Month=='January']
df_feb=accident[accident.Month=='February']
df_mar=accident[accident.Month=='March']
df_apr=accident[accident.Month=='April']
df_may=accident[accident.Month=='May']
df_jun=accident[accident.Month=='June']
df_jul=accident[accident.Month=='July']
df_aug=accident[accident.Month=='August']
df_sep=accident[accident.Month=='September']
df_oct=accident[accident.Month=='October']
df_nov=accident[accident.Month=='November']
df_dec=accident[accident.Month=='December']
ID_col = ["Id"]
cat_cols = accident.nunique()[accident.nunique() < 6].keys().tolist()
#cat_cols = [x for x in cat_cols if x not in target_col]
num_cols = [x for x in accident.columns if x not in cat_cols+ID_col]
lab = accident['Month'].value_counts().keys().tolist()
val = accident['Month'].value_counts().values.tolist()
trace = go.Pie(labels=lab,
values=val,
marker = dict(colors=[ 'royalblue', 'lime'],
line = dict(color='white',
width=1.3)
),
rotation=90,
hoverinfo="label+value+text")
layout=go.Layout(dict(title="Accidents by Month",
plot_bgcolor="rgb(243,243,243)",
paper_bgcolor="rgb(243,243,243)",
)
)
data=[trace]
fig = go.Figure(data=data, layout = layout)
py.iplot(fig, filename="Basic Pie Chart")
target_col=["Month"]
cat_cols_jan=df_dec.nunique()[df_dec.nunique()<6].keys().tolist()
cat_cols_jan=[x for x in cat_cols_jan if x not in target_col]
num_cols_jan = [x for x in df_dec.columns if x not in cat_cols_jan+ID_col+target_col]
def plot_pie(column):
trace=go.Pie(values=df_dec[column].value_counts().values.tolist(),
labels=df_dec[column].value_counts().keys().tolist(),
#hoeverinfo="label+percent+name",
#name="Accident by Months",
domain=dict(x=[0,.48]),
marker = dict(line=dict(width=2, color="rgb(243,243,243)")),
hole=.6)
layout=go.Layout(dict(title="Distribution of Accidents by" +" "+ column,
plot_bgcolor="rgb(243,243,243)",
paper_bgcolor="rgb(243,243,243)",
annotations=[dict(text="December Accidents",
font=dict(size=13),
showarrow=False,
x=.15, y=.5),
]
)
)
data=[trace]
fig=go.Figure(data=data, layout=layout)
py.iplot(fig)
for i in cat_cols_jan:
plot_pie(i)
def histogram(column):
trace=go.Histogram(x=df_dec[column],
histnorm="percent",
name="Accident in December",
marker=dict(line=dict(width=0.5,color="black",)),
opacity=0.9)
data=[trace]
layout=go.Layout(dict(title="Distirbution of December Accidents by"+" "+column,
plot_bgcolor = "rgb(243,243,243)",
paper_bgcolor = "rgb(243,243,243)",
xaxis=dict(gridcolor = 'rgb(255,255,255)',
title = column,
zerolinewidth=1,
ticklen=5,
gridwidth=2
),
yaxis = dict(gridcolor = 'rgb(255,255,255)',
title = "percent",
zerolinewidth = 1,
ticklen = 5,
gridwidth = 2
),
),
)
fig=go.Figure(data=data, layout=layout)
py.iplot(fig)
for i in num_cols_jan:
histogram(i)
#determine coefficients between features
header=['Id','Mild_injuries','Serious_injuries', 'Victims', 'Vehicles_involved',
'Longitude','Latitude']
new_df=pd.DataFrame()
new_df['Mild_injuries']=accident['Mild_injuries'].values
new_df['Serious_injuries']=accident['Serious_injuries'].values
new_df['Victims']=accident['Victims'].values
new_df['Vehicles_involved']=accident['Vehicles_involved'].values
new_df['Longitude']=accident['Longitude'].values
new_df['Latitude']=accident['Latitude'].values
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
Id_col = ["Id"]
#Target Columns
target_col = ["Victims"]
def plot_month_scatter(month_group, color):
tracer = go.Scatter(x = accident[accident["Month"]==month_group]["Victims"],
y = accident[accident["Month"]==month_group]["Vehicles_involved"],
mode = "markers", marker = dict(line = dict(color = "black",
width = .2),
size = 4, color = color,
symbol = "diamond-dot",
),
name = month_group,
opacity = .9,
)
return tracer
trace1 = plot_month_scatter("January","#FF3300")
trace2 = plot_month_scatter("February", "#6666FF")
trace3 = plot_month_scatter("March", "#99FF00")
trace4 = plot_month_scatter("April", "#996600")
trace5 = plot_month_scatter("May", "grey")
trace6 = plot_month_scatter("June","purple")
trace7 = plot_month_scatter("July", "brown")
trace8 = plot_month_scatter("August", "yellow")
trace9 = plot_month_scatter("September", "orange")
trace10 = plot_month_scatter("October", "red")
trace11= plot_month_scatter("November", "green")
trace12= plot_month_scatter("December", "blue")
data=[trace1,trace2,trace3,trace4,trace5,trace6,trace7,trace8,trace9,trace10,trace11,trace12]
def layout_title(title):
layout = go.Layout(dict(title = title,
plot_bgcolor = 'rgb(243,243,243)',
paper_bgcolor = 'rgb(243,243,243)',
xaxis=dict(gridcolor='rgb(255,255,255)',
title = "# Victims",
zerolinewidth=1, ticklen=5, gridwidth=2),
yaxis=dict(gridcolor='rgb(255,255,255)',
title="# Vehicles Involved",
zerolinewidth=1, ticklen=5, gridwidth=2),
height=600
)
)
return layout
layout = layout_title("No. of Victims & Vehicles involved by Months")
#layout2 = layout_title("Monthly Charges & Total Charges by Churn Group")
fig = go.Figure(data=data, layout=layout)
#fig2 = go.Figure(data=data2, layout=layout2)
py.iplot(fig)
#py.iplot(fig2)
avg_acc=accident.groupby(["Month"])[['Victims','Vehicles_involved']].mean().reset_index()
def mean_charges(column):
tracer = go.Bar(x = avg_acc["Month"],
y = avg_acc[column],
marker = dict(line = dict(width = 1)),
)
return tracer
def layout_plot(title, xaxis_lab, yaxis_lab):
layout = go.Layout(dict(title = title,
plot_bgcolor = "rgb(243,243,243)",
paper_bgcolor = "rgb(243,243,243)",
xaxis = dict(gridcolor = "rgb(255,255,255)", title=xaxis_lab,
zerolinewidth=1, ticklen=5, gridwidth=2),
yaxis = dict(gridcolor = "rgb(255,255,255)", title=yaxis_lab,
zerolinewidth=1, ticklen=5, gridwidth=2),
))
return layout
trace1 = mean_charges("Victims")
layout1 = layout_plot("Average No of Victims by Month",
"Month", '# Victims')
data1 = [trace1]
fig1 = go.Figure(data=data1, layout=layout1)
trace2 = mean_charges("Vehicles_involved")
layout2 = layout_plot("Average No of Vechicles by Month",
"Month", '# Vechicles')
data2 = [trace2]
fig2 = go.Figure(data=data2, layout=layout2)
py.iplot(fig1)
py.iplot(fig2)
#RUN IT FROM THIS POINT EVERYTIME YOU START SYSTEM FOR PREDICTIONS USING DIFFERENT REGRESSIONS
accident=pd.read_csv('D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/accidents_2017.csv')
df_acc=accident.copy()
df_acc.columns=[col.replace(' ', '_') for col in df_acc.columns]
import sklearn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
Id_col=["Id"]
target_col=['Victims']
cat_cols=df_acc.nunique()[df_acc.nunique()<6].keys().tolist()
cat_cols=[x for x in cat_cols if x not in target_col]
num_cols = [x for x in df_acc.columns if x not in cat_cols+target_col+Id_col]
bin_cols = df_acc.nunique()[df_acc.nunique()==2].keys().tolist()
multi_cols = [s for s in cat_cols if s not in bin_cols]
le = LabelEncoder()
for i in bin_cols:
df_acc[i] = le.fit_transform(df_acc[i])
df_acc=pd.get_dummies(data=df_acc, columns=multi_cols)
std=StandardScaler()
num_cols_scaled=num_cols[5:]
scaled=std.fit_transform(df_acc[num_cols_scaled])
scaled=pd.DataFrame(scaled, columns=num_cols_scaled)
#scaled=std.fit_transform(accident[num_cols])
#scaled=pd.DataFrame(scaled, columns=num_cols)
df_acc=df_acc.drop(columns=num_cols_scaled, axis=1)
df_acc=df_acc.merge(scaled, left_index=True, right_index=True, how='left')
Id_col=['Id']
summary=(df_acc[[i for i in df_acc.columns if i not in Id_col]].
describe().transpose().reset_index())
summary=summary.rename(columns={"index":"feature"})
summary=np.around(summary,3)
val_lst=[summary['feature'], summary['count'],
summary['mean'], summary['std'],
summary['min'], summary['25%'],
summary['50%'], summary['75%'], summary['max']]
trace=go.Table(header=dict(values=summary.columns.tolist(),
line=dict(color=['#506784']),
fill=dict(color=['#119DFF']),
),
cells=dict(values=val_lst,
line=dict(color=['#506784']),
fill=dict(color=["lightgrey",'#119DFF']),
),
columnwidth=[200,60,100,100,60,60,80,80,80])
layout=go.Layout(dict(title="Variable Summary"))
figure=go.Figure(data=[trace],layout=layout)
py.iplot(figure)
correlation=df_acc.corr()
matrix_cols=correlation.columns.tolist()
corr_array=np.array(correlation)
trace=go.Heatmap(z=corr_array,
x=matrix_cols,
y=matrix_cols,
colorscale='Viridis',
colorbar=dict(title="Pearson Correlation Coefficient",
titleside='right'),
)
layout=go.Layout(dict(title="Correlation Matrix for variables",
autosize=False,
height=720,
width=800,
margin=dict(r=0, l=210,
t=25, b=210),
yaxis=dict(tickfont=dict(size=9)),
xaxis=dict(tickfont=dict(size=9))))
fig=go.Figure(data=[trace], layout=layout)
py.iplot(fig)
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.metrics import roc_auc_score, roc_curve, scorer
from sklearn.metrics import f1_score
import statsmodels.api as sm
from sklearn.metrics import precision_score, recall_score
from yellowbrick.classifier import DiscriminationThreshold
import sklearn
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.metrics import classification_report
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score
id_col=['Id']
target_col=['Victims']
cols=[i for i in df_acc.columns if i not in Id_col + target_col]
cols=cols[5:]
x=df_acc[cols]
X=np.array(x)
y=df_acc[target_col]
train_x,test_x,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=100)
reg=LinearRegression()
reg.fit(train_x, train_y)
y_pred = reg.predict(test_x)
print(y_pred)
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/victim_prediction.csv",y_pred,delimiter=',')
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/test_data.csv",y_pred,delimiter=',')
train_x, ValData, train_y, ValLabel = train_test_split(X, y, test_size=0.2, random_state=100)
kvals=range(1,40,2)
accuracies=[]
for k in kvals:
model=KNeighborsRegressor(n_neighbors=k)
model.fit(train_x, train_y)
score=model.score(ValData, ValLabel)
print('k=%d, accuracy=%.2f%%' % (k, score * 100))
accuracies.append(score)
i=np.argmax(accuracies)
print("k=%d, achieved highest accuracy of %.2f%%" %(kvals[i], accuracies[i]*100))
KNN=KNeighborsRegressor(n_neighbors=kvals[i])
KNN.fit(train_x, train_y)
y_pred_knn = KNN.predict(test_x)
y_pred_knn=pd.DataFrame(y_pred_knn)
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/2017_pred_KNN.csv",y_pred_knn,delimiter=',')
plt.rc("font", size = 14)
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
'''logreg = LogisticRegression()
logreg.fit(train_x, train_y)
y_pred = logreg.predict(test_x)
print('Accuracy of logistic regression classifier on test set: {:2f}'.format(logreg.score(test_x,test_y)))
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
kfold=model_selection.KFold(n_splits=10, random_state=7)
modelCV = LogisticRegression()
scoring='accuracy'
results = model_selection.cross_val_score(modelCV, train_x, train_y, cv=kfold, scoring=scoring)
print('10-fold cross validation average accuracy:%0.3f' %(results.mean()))'''
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(test_y, y_pred)
print(confusion_matrix)
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from numpy import array
from numpy import argmax
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
values1 = array(y_pred)
y_pred_label_encoder=LabelEncoder()
y_pred_Integer_encoded=y_pred_label_encoder.fit_transform(values1)
y_pred_onehot_encoder=OneHotEncoder(sparse=False)
y_pred_Integer_encoded=y_pred_Integer_encoded.reshape(len(y_pred_Integer_encoded),1)
y_pred_onehot_encoded=y_pred_onehot_encoder.fit_transform(y_pred_Integer_encoded)
values = array(test_y)
y_test_label_encoder=LabelEncoder()
y_test_Integer_encoded=y_test_label_encoder.fit_transform(values)
y_test_onehot_encoder=OneHotEncoder(sparse=False)
y_test_Integer_encoded=y_test_Integer_encoded.reshape(len(y_test_Integer_encoded),1)
y_test_onehot_encoded=y_test_onehot_encoder.fit_transform(y_test_Integer_encoded)
'''logit_roc_auc = roc_auc_score(y_test_onehot_encoded,y_pred_onehot_encoded)
fpr, tpr, thresholds = roc_curve(test_y, logreg.predict_proba(test_x)[:,1], pos_label='yes')
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' %logit_roc_auc)'''
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
clf_gini = DecisionTreeClassifier(criterion="gini", random_state=100, max_depth=3, min_samples_leaf=5)
clf_gini.fit(train_x, train_y)
y_pred_gini = clf_gini.predict(test_x)
print("Predictions using GINI index:")
print("Predicted Values:")
print(y_pred_gini)
print("Confusion Matrix: ")
print(confusion_matrix(test_y, y_pred_gini))
print("Accuracy: ")
print(accuracy_score(test_y, y_pred_gini)*100)
print("Detailed Report using GINI Index: ")
print(classification_report(test_y, y_pred_gini))
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/2017_pred_GINI.csv",y_pred_gini,delimiter=',')
from sklearn import tree
tree.export_graphviz(clf_gini,out_file='D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/Gini.dot')
clf_entropy=DecisionTreeClassifier(criterion="entropy", random_state=100, max_depth=3, min_samples_leaf=5)
clf_entropy.fit(train_x,train_y)
y_pred_entropy=clf_entropy.predict(test_x)
print("Predictions using ENTROPY index:")
print("Predicted Values:")
print(y_pred_entropy)
print("Confusion Matrix: ")
print(confusion_matrix(test_y, y_pred_entropy))
print("Accuracy: ")
print(accuracy_score(test_y, y_pred_entropy)*100)
print("Detailed Report using ENTROPY Index: ")
print(classification_report(test_y, y_pred_entropy))
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/2017_pred_ENTROPY.csv",y_pred_entropy,delimiter=',')
from sklearn import tree
tree.export_graphviz(clf_entropy,out_file='D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/Entropy.dot')
from imblearn.over_sampling import SMOTE
cols = [i for i in df_acc.columns if i not in Id_col+target_col]
cols=cols[5:]
smote_X=df_acc[cols]
smote_Y=df_acc[target_col]
smote_train_x,smote_test_x,smote_train_y,smote_test_y=train_test_split(smote_X,smote_Y,test_size=.20,
random_state=100)
'''os=SMOTE(random_state=0)
os_smote_X,os_smote_Y=os.fit_sample(smote_train_x,smote_train_y)
os_smote_X=pd.DataFrame(data=os_smote_X,columns=cols)
os_smote_Y=pd.DataFrame(data=os_smote_Y,columns=target_col)'''
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectKBest
select=SelectKBest(score_func=chi2,k=3)
fit=select.fit(smote_X,smote_Y)
score=pd.DataFrame({"features":cols,"scores":fit.scores_,"p-values":fit.pvalues_})
score=score.sort_values(by="scores", ascending = False)
#Adding new columne "Feature Type in score dataframe
score["feature_type"]=np.where(score["features"].isin(num_cols),"Numerical","Categorical")
trace=go.Scatter(x=score[score["feature_type"]=="Categorical"]["features"],
y=score[score["feature_type"]=="Categorical"]["scores"],
name='Categorical', mode="lines+markers",
marker=dict(color='red',
line=dict(width=1))
)
trace1=go.Bar(x=score[score["feature_type"]=="Numerical"]["features"],
y=score[score["feature_type"]=="Numerical"]["scores"],name='Numerical',
marker=dict(color='royalblue',
line=dict(width=1)),
xaxis='x2',yaxis='y2')
layout=go.Layout(dict(title="Scores of Importance for Categorical & Numerical features",
plot_bgcolor='rgb(243,243,243)',
paper_bgcolor='rgb(243,243,243)',
xaxis=dict(gridcolor='rgb(255,255,255)',
tickfont=dict(size=10),
domain=[0,0.7],
tickangle=90, zerolinewidth=1,
ticklen=5, gridwidth=2),
yaxis=dict(gridcolor='rgb(255,255,255)',
title="scores",
zerolinewidth=1, ticklen=5, gridwidth=2),
margin=dict(b=200),
xaxis2=dict(domain=[0.8,1], tickangle=90,
gridcolor='rgb(255,255,255)'),
yaxis2=dict(anchor="x2",gridcolor='rgb(255,255,255)')))
data=[trace, trace1]
fig=go.Figure(data=data, layout=layout)
py.iplot(fig)
id_col=['Id']
target_col=['Victims']
cols=[i for i in df_acc.columns if i not in Id_col + target_col]
cols=cols[5:]
x=df_acc[cols]
X=np.array(x)
y=df_acc[target_col]
train_x,test_x,train_y,test_y = train_test_split(X,y,test_size=0.2,random_state=100)
#Random Forest Estimator
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
#import pydot
rf = RandomForestRegressor(n_estimators = 1000)
#Train the model on training data
rf.fit(train_x, train_y)
pred_rf = rf.predict(test_x)
#pred_rf = pd.DataFrame(pred_rf)
errors = abs(pred_rf - test_y)
print('Mean Absolute Error: ', round(np.mean(errors), 2), "degrees")
tree=rf.estimators_[100]
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/2017_pred_RF.csv",pred_rf,delimiter=',')
from sklearn import tree
tree.export_graphviz(tree,out_file='D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/Random_Forest.dot')
np.savetxt("D:/Kaggle-DataSet/Barcelona-Dataset/accidents_2017/2017_predicted.csv",test_y,delimiter=',')
#Predictive resutls for all Regressions:
#For Linear Regression - model reg and y_pred
#For KNN Regression - model KNN and y_pred_KNN
#For Decision Tree (GINI) - model clf_gini and y_pred_gini
#For Decision Tree (ENTROPY) - model clf_entropy and y_pred_entropy
#For Random Forest - model rf and pred_rf
#Model Report
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.metrics import roc_auc_score, roc_curve, scorer
from sklearn.metrics import f1_score
import statsmodels.api as sm
from sklearn.metrics import precision_score, recall_score
from yellowbrick.classifier import DiscriminationThreshold
import sklearn
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.metrics import classification_report
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score
def model_report(model, training_x,testing_x,training_y,testing_y,name):
model.fit(training_x,training_y)
predictions=model.predict(testing_x)
MSE=mean_squared_error(testing_y,predictions)
R2=r2_score(testing_y,predictions)
MAE=mean_absolute_error(testing_y,predictions)
EVS=explained_variance_score(testing_y,predictions)
df=pd.DataFrame({"Model" :[name],
"Mean Sq.Error" :[MSE],
"R-Square" :[R2],
"Mean Abs.Error" :[MAE],
"Variance Score" :[EVS],
})
return df
model1=model_report(reg,train_x,test_x,train_y,test_y,
"Linear Regression")
model2=model_report(KNN,train_x,test_x,train_y,test_y,"KNN Regression")
model3=model_report(clf_gini,train_x,test_x,train_y,test_y,"Decision Tree (GINI)")
model4=model_report(clf_entropy,train_x,test_x,train_y,test_y,"Decision Tree(Entropy)")
model5=model_report(rf,train_x,test_x,train_y,test_y,"Random Forest Regression")
model_performance=pd.concat([model1,model2,model3,model4,model5],axis=0).reset_index()
model_performance=model_performance.drop(columns="index",axis=1)
table=ff.create_table(np.round(model_performance,4))
py.iplot(table)
def output_tracer(metric, color):
tracer=go.Bar(y=model_performance["Model"],
x=model_performance[metric],
orientation='h', name=metric,
marker=dict(line=dict(width=.7),
color=color))
return tracer
layout=go.Layout(dict(title="Model Performances",
plot_bgcolor='rgb(243,243,243)',
paper_bgcolor='rgb(243,243,243)',
xaxis=dict(gridcolor='rgb(255,255,255)',
title='metric',
zerolinewidth=1,
ticklen=5, gridwidth=2),
yaxis=dict(gridcolor='rgb(255,255,255)',
zerolinewidth=1,
ticklen=5, gridwidth=2),
margin=dict(l=250),
height=700))
trace1=output_tracer("Mean Sq.Error",'#6699FF')
trace2=output_tracer('R-Square', 'red')
trace3=output_tracer('Mean Abs.Error','#33CC99')
trace4=output_tracer('Variance Score', 'lightgrey')
data=[trace1,trace2,trace3,trace4]
fig=go.Figure(data=data, layout=layout)
py.iplot(fig)
| true |
3cda99d7bbf49edde66e527686b6c5601a1eb479 | Python | Cromlech/cromlech.content | /src/cromlech/content/tests/test_preserve.py | UTF-8 | 867 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from cromlech.content import schema
from zope.interface import Interface
from zope.schema import Choice
class IViking(Interface):
"""Defines a Norseman
"""
rank = Choice(
title="Rank of the viking warrior",
default="Jarl",
values=["Bondi", "Hersir", "Jarl", "Einherjar"])
@schema(IViking)
class Ynglingar(object):
pass
@schema(IViking)
class JomsWarrior(object):
rank = "Bondi"
class Slave(JomsWarrior):
rank = "Thraell"
def test_preserve():
"""
A `cromlech.content` content type can provide values described in the
schema at the class level. These values are thus preserved::
"""
harfagri = Ynglingar()
assert harfagri.rank == "Jarl"
gormsson = JomsWarrior()
assert gormsson.rank == "Bondi"
gunnar = Slave()
assert gunnar.rank == "Thraell"
| true |
4d5ff1bad37cae3d112e5562d685fa80161caecd | Python | nikit2121/Bidirectional-LSTM-for-text-classification | /keras_toxic_comments.py | UTF-8 | 3,272 | 2.515625 | 3 | [] | no_license | from keras.models import Sequential,Model
from keras.preprocessing import sequence,text
from keras.layers import Embedding, Dense, LSTM,Bidirectional,Dropout,Input,GlobalMaxPool1D
from keras.callbacks import EarlyStopping,ModelCheckpoint
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
""" Load Data and sample submission file """
train_data = pd.read_csv('/home/nikit/Desktop/Kaggle/toxic_comments/data/train/train.csv')
test_data = pd.read_csv('/home/nikit/Desktop/Kaggle/toxic_comments/data/test/test.csv')
submission = pd.read_csv('/home/nikit/Desktop/Kaggle/toxic_comments/data/sample_submission.csv')
Max_features = 30000
maxlen = 200
embed_size = 50
list_train_data = train_data.comment_text.fillna('missing text').values
list_test_data = test_data.comment_text.fillna('missing text').values
label = train_data.columns[2:]
y = train_data[label].values
tokenizer = text.Tokenizer(num_words=Max_features)
tokenizer.fit_on_texts(list(list_train_data))
list_tokenized_train = tokenizer.texts_to_sequences(list_train_data)
x_train = sequence.pad_sequences(list_tokenized_train,maxlen=maxlen)
list_tokenized_test = tokenizer.texts_to_sequences(list_test_data)
x_test = sequence.pad_sequences(list_tokenized_test,maxlen=maxlen)
vocab_size = len(tokenizer.word_index)+1
with open('/home/nikit/Desktop/Glove_word_vectos/glove.twitter.27B.50d.txt') as glove_twitter:
embedding_index = dict()
for line in glove_twitter:
value = line.split()
word = value[0]
vector = np.asarray(value[1:],dtype="float32")
embedding_index[word] = vector
glove_twitter.close()
#embeddings = np.stack(embedding_index.values())
nb_words = min(Max_features,vocab_size)
embedding_matrix = np.random.normal(0.0209404, 0.6441043, (nb_words, embed_size))
for word,i in tokenizer.word_index.items():
if i>= Max_features: continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
def get_model():
embed_size = 50
inp = Input(shape=(maxlen, ))
x = Embedding(Max_features, embed_size,weights=[embedding_matrix])(inp)
x = Bidirectional(LSTM(50, return_sequences=True,dropout=0.1,recurrent_dropout=0.1))(x)
x = GlobalMaxPool1D()(x)
x = Dense(50, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
model = get_model()
batch_size = 32
epochs = 1
save_parameter_file_path = '/home/nikit/Desktop/Kaggle/toxic_comments/weights.best.hdf5'
checkpoint = ModelCheckpoint(save_parameter_file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early = EarlyStopping(monitor="val_loss", mode="min", patience=20)
callbacks_list = [checkpoint, early] #early
model.fit(x_train, y, batch_size=batch_size, epochs=epochs, validation_split=0.1, callbacks=callbacks_list)
model.load_weights(save_parameter_file_path)
y_test = model.predict(x_test)
submission[label] = y_test
submission.to_csv('/home/nikit/Desktop/Kaggle/toxic_comments/result_keras1.csv',index=False)
| true |
de769ffd46e3f663c8ce6d2f82fd8bb5ca05599c | Python | Jump1556/codecademy | /print_dictionary.py | UTF-8 | 739 | 3.828125 | 4 | [] | no_license | # print key and value of dictionary
prices = {
"banana": 4,
"apple": 2,
"orange": 1.5,
"pear": 3
}
stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15
}
for key in prices:
print (key)
print ("Prices: %s" % prices[key])
print ("Stock: %s" % stock[key])
# line 29 print items such a tuples
# line 30 print keys
# line 31 print values
# line 34 print keys and values each in a line
my_dict = {
'Mom' : 64,
'papa' : 113,
'weight' : True
}
print (my_dict.items())
print (my_dict.keys())
print (my_dict.values())
for key in my_dict:
print (key, my_dict[key])
# total value of inventory
total = 0
for key in prices:
i = prices[key]*stock[key]
total += i
print (i)
print (total)
| true |
117c14756577f867b499a4a8002f8a3524d1ca7e | Python | hongsungheejin/Algo-Study | /ganta/divide_conqure/1629_곱셈.py | UTF-8 | 267 | 3.25 | 3 | [] | no_license | a = 0
b = 0
c = 0
def func(a,b):
if b == 1:
return a % c
if b % 2 == 1:
return ((func(a,b//2)**2)*a)%c
else:
return (func(a,b//2)**2)%c
if __name__ == "__main__":
a, b, c = map(int, input().split())
print(func(a,b)) | true |
c1311dac6882c84d783764a4ce31e4c30edc52fe | Python | bhup99/shiny-octo-bear | /Python Programs/assignment5im.py | UTF-8 | 2,507 | 2.71875 | 3 | [] | no_license | def fun(a,b):
x=a
y=a
for i in range(a,b):
if line[0][i]==' ' or i==len(line[0])-1:
if i==len(line[0])-1:
y=y+1
if sub[y-x]==1:
for k in range(0,n):
if len(inp[k])==y-x:
break
print k,y,x,i
for j in range(x,y):
dic[line[0][j]]=inp[k][j-x]
x=y+1
y=y+1
else:
y=y+1
print "Enter the value of n"
n=input()
inp=[]
print "Enter the dictionary words"
ma=0
for i in range(0,n):
inp.append(raw_input())
if ma<len(inp[i]):
ma=len(inp[i])
line=[]
line.append(raw_input())
dic={}
sub=[]
ma=ma+1
for i in range(0,ma):
sub.append(0)
for i in range(0,n):
sub[len(inp[i])]=sub[len(inp[i])]+1
x=0
y=0
print sub
fun(0,len(line[0]))
print dic
fin=[]
for i in dic.keys():
fin.append(i)
print fin
space=0
out={}
for i in range(0,len(fin)):
x=0
y=0
space=0
print out
print "Smart"
print fin
print y
for j in range(0,len(line[0])):
star=[]
if line[0][j]==' ' or j==len(line[0])-1:
if j==len(line[0])-1:
y+=1
sco=0
for k in range(x,y):
if line[0][k]==fin[i]:
sco=1
break
print x,y,fin[i]
print k
if sco==1:
for l in range(0,n):
if len(inp[l])>k-x:
if inp[l][k-x]==dic[fin[i]]:
star.append(l)
if len(star)==1:
out[space]=inp[star[0]]
else:
for l in range(x,y):
for m in range(0,len(fin)):
if fin[m]==line[0][l]:
break
if m!=len(fin):
for m in range(0,len(star)):
if dic[line[0][l]]!=inp[m][l-x]:
star.remove(star[m])
if len(star)==1:
out[space]=star[0]
break
space+=1
y+=1
x=y
else:
y+=1
print out
| true |
bba2fbc63be54e3926760584e5b6421f7136dc3a | Python | JenniferWang/projectEuler | /CountingSummations.py | UTF-8 | 909 | 3.671875 | 4 | [] | no_license | # problem 76
class Count:
def __init__(self):
self.cache = {}
def memoSearch(self, target, maxCoin):
if (target, maxCoin) in self.cache:
return self.cache[(target, maxCoin)]
if target == 0:
return 1
if maxCoin < 1:
return 0
if maxCoin > target:
self.cache[(target, maxCoin)] = self.memoSearch(target, maxCoin - 1)
else:
self.cache[(target, maxCoin)] = self.memoSearch(target - maxCoin, maxCoin) \
+ self.memoSearch(target, maxCoin - 1)
return self.cache[(target, maxCoin)]
def count(self, target, maxCoin):
return self.memoSearch(target, maxCoin)
def count_another_verison(self, target):
ways = [1] + [0] * target
for coin in range(1, target):
for t in range(coin, target + 1):
ways[t] += ways[t - coin]
return ways[target]
sol = Count()
print sol.count(100, 99)
print sol.count_another_verison(100)
| true |
a03e6d9a637a76c3c2afe0f4573a8aa4a000ad5f | Python | mihudec/nuaal | /nuaal/Parsers/PatternsLib_Old.py | UTF-8 | 13,710 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | import re
class Patterns:
"""
This class holds all the necessary regex patterns, which are used by `ParserModule`. This class returns compiled regex patterns based on specified device
type.
"""
def __init__(self, device_type, DEBUG=False):
"""
:param str device_type: String representation of device type, such as `cisco_ios`
:param bool DEBUG: Enables/disables debugging output
"""
raise DeprecationWarning("This class is deprecated. Please use nuaal.Parsers.PatternsLib instead.")
self.device_type = device_type
self.DEBUG = DEBUG
self.map = {
"cisco_ios": self.cisco_ios_patterns()
}
def get_patterns(self):
"""
Function used for retrieving compiled regex patterns.
:return: Dictionary of compiled regex patterns
"""
return self.map[self.device_type]
def cisco_ios_patterns(self):
"""
This function holds regex patterns for `cisco_ios` device type.
:return:
"""
patterns = {
"level0": {
"show inventory": [
re.compile(
pattern=r"^NAME:\s\"(?P<name>[\w\s()/]+)\",\s+DESCR:\s\"(?P<desc>(?:[\w\s(),.\-_+/#:&]+))\"\s*PID:\s(?P<pid>\S+)\s*,\s+VID:\s(?P<vid>\S+)?\s*,\s+SN:\s+(?P<sn>\S+)",
flags=re.MULTILINE
)
],
"show vlan brief": [
re.compile(
pattern=r"^(?P<id>\d+)\s+(?P<name>\S+)\s+(?P<status>\S+)\s+(?P<access_ports>(?:[A-Za-z]+\d+(?:\/\d+){0,2},?\s+)+)?",
flags=re.MULTILINE
)
],
"show interfaces": [
re.compile(
pattern=r"^\S.*(?:$\s+^\s.*)+",
flags=re.MULTILINE
)
],
"show etherchannel summary": [
re.compile(
pattern="^(?P<group>\d+)\s+(?P<portchannel>Po\d{1,3})\((?P<status>[DIHRUPsSfMuwd]{1,2})\)\s+(?P<protocol>\S+)\s+(?P<ports>(?:(?:\w+\d+(?:\/\d+)*)\(\S\)\s*)+)",
flags=re.MULTILINE
)
],
"show cdp neighbors detail": [
re.compile(
pattern=r"(?<=-{25}\n).*?(?=-{25}|$)",
flags=re.DOTALL
)
],
"show version": [
re.compile(
pattern=r"^Cisco.*Configuration\sregister\sis\s\S+",
flags=re.DOTALL
)
],
"show mac address-table": [
re.compile(
pattern=r"^\s+(?P<vlan>\S+)\s+(?P<mac>(?:[\da-f]{4}\.?){3})\s+(?P<type>\S+)\s+(?P<ports>\S+)",
flags=re.MULTILINE
),
re.compile(
pattern=r"^(?P<mac>(?:[\da-f]{4}\.?){3})\s+(?P<type>\S+)\s+(?P<vlan>\S+)\s+(?P<ports>\S+)",
flags=re.MULTILINE
)
],
"show ip arp": [
re.compile(
pattern=r"^(?P<protocol>\S+)\s+(?P<ipAddress>((?:\d{1,3}.?){4}))\s+(?P<age>(?:\d+|-))\s+(?P<mac>(?:[\da-f]{4}\.?){3})\s+(?P<type>\S+)\s+(?P<interface>\S+)",
flags=re.MULTILINE
)
],
"show license": [
re.compile(
pattern=r"^Index.*(?:(?:$\s+^\s.*)+)?",
flags=re.MULTILINE
)
]
},
"level1": {
"show vlan brief": {
"access_ports": [
re.compile(
pattern=r"[A-Za-z]+\d+(?:\/\d+){0,2}"
)
]
},
"show version": {
"version": [
re.compile(
pattern=r"^(?P<vendor>[Cc]isco)\s(?P<software>IOS(?:\sXE)?)\sSoftware,.*Version\s(?P<version>[\w\.\(\)\-]+)(?:,)?",
flags=re.MULTILINE
),
re.compile(
pattern=r"^(?P<vendor>[Cc]isco)\s(?P<platform>[\w-]+).*with\s(?P<systemMemory>\d+K/\d+K)\sbytes\sof\smemory.",
flags=re.MULTILINE
),
re.compile(
pattern=r"^(?P<vendor>[Cc]isco)\s(?P<platform>[\w-]+).*with\s(?P<systemMemory>\d+K)\sbytes\sof\smemory.",
flags=re.MULTILINE
),
re.compile(
pattern=r"^$\s^(?P<hostname>[\w\-_]+)\suptime\sis\s(?P<uptime>.*)$",
flags=re.MULTILINE
),
re.compile(
pattern=r"^(?P<hostname>[\w\-_]+)\suptime\sis\s(?P<uptime>.*)$",
flags=re.MULTILINE
),
re.compile(
pattern=r"^System\simage\sfile\sis\s\"(?P<imageFile>.*)\"",
flags=re.MULTILINE
),
re.compile(
pattern=r"Experimental\sVersion\s(?P<experimental_version>\S+)",
flags=re.MULTILINE
)
]
},
"show interfaces": {
"name": [
re.compile(
pattern=r"^(?P<name>\S+)\sis\s(?P<status>.*),\sline\sprotocol\sis\s(?P<lineProtocol>\S+)",
flags=re.MULTILINE
),
re.compile(
pattern=r"^(?P<name>\S+)",
flags=re.MULTILINE
)
],
"address": [
re.compile(
pattern=r"^\s+Hardware\sis\s(?P<hardware>.*),\saddress\sis\s(?P<mac>\S+)\s\(bia\s(?P<bia>\S+)\)",
flags=re.MULTILINE
)
],
"description": [
re.compile(
pattern=r"^\s+Description:\s(?P<description>.*)",
flags=re.MULTILINE
)
],
"ipv4Address": [
re.compile(
pattern=r"^\s+Internet\saddress\sis\s(?P<ipv4Address>[\d\.]+)\/(?P<ipv4Mask>\d+)",
flags=re.MULTILINE
)
],
"rates": [
re.compile(
pattern=r"^\s+(?P<loadInterval>\d+\s\S+)\sinput\srate\s(?P<inputRate>\d+).*,\s(?P<inputPacketsInterval>\d+).*$"
r"\s+.*output\srate\s(?P<outputRate>\d+).*,\s(?P<outputPacketsInterval>\d+)",
flags=re.MULTILINE
)
],
"duplex": [
re.compile(
pattern=r"^\s+(?P<duplex>\S+)-duplex,\s(?P<speed>(?:\d+)?\S+)(?:,\s+link\stype\sis\s(?P<linkType>\S+))?,\smedia\stype\sis\s(?P<mediaType>.*)",
flags=re.MULTILINE
),
re.compile(
pattern=r"^\s+(?P<duplex>\S+)-duplex,\s(?P<sped>\S+)$",
flags=re.MULTILINE
)
],
"mtu": [re.compile(pattern=r"^\s+MTU\s(?P<mtu>\d+).*BW\s(?P<bandwidth>\d+)\sKbit(?:/sec)?,\sDLY\s(?P<delay>\d+).*$"
r"\s+reliability\s(?P<reliability>\S+),\stxload\s(?P<txLoad>\S+),\srxload\s(?P<rxLoad>\S+)",
flags=re.MULTILINE),
],
"pseudowire": [
re.compile(
pattern=r"^\s+Encapsulation\s(?P<encapsulation>\w+)",
flags=re.MULTILINE
),
re.compile(
pattern=r"^\s+RX\s+(?P<rxPackets>\d+)\spackets\s(?P<rxBytes>\d+)\sbytes\s(?P<rxDrops>\d+)\sdrops\s+TX\s+(?P<txPackets>\d+)\spackets\s(?P<txBytes>\d+)\sbytes\s(?P<txDrops>\d+)\sdrops",
flags=re.MULTILINE
),
re.compile(
pattern=r"^\s+Peer\sIP\s(?P<peerIP>[\d\.]+),\sVC\sID\s(?P<virtualCircuitID>\d+)",
flags=re.MULTILINE
),
re.compile(
pattern=r"^\s+MTU\s(?P<mtu>\d+)\sbytes",
flags=re.MULTILINE
)
],
"input_counters": [
re.compile(pattern=r"^\s+(?P<rxPackets>\d+)\spackets\sinput,\s(?P<rxBytes>\d+)\sbytes,\s(?P<noBuffer>\d+)\sno\sbuffer$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+Received\s(?P<rxBroadcasts>\d+).*\((?P<rxMulticasts>\d+).*$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+(?P<runts>\d+)\srunts,\s(?P<giants>\d+)\sgiants,\s(?P<throttles>\d+).*$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+(?P<inputErrors>\d+)\sinput\serrors,\s(?P<crc>\d+)\sCRC,\s(?P<frame>\d)\sframe,\s(?P<overrun>\d+)\soverrun,\s(?P<ignored>\d+).*$",
flags=re.MULTILINE),
re.compile(pattern=r"(?:\s+(?P<watchdog>\d+)\swatchdog,\s(?P<multicasts>\d+)\smulticast,\s(?P<pauseInput>\d+)\spause\sinput$\s+(?P<inputPacketsWithDribbleCondition>\d+)\sinput.*)?",
flags=re.MULTILINE)
],
"output_counters": [
re.compile(pattern=r"^\s+(?P<txPackets>\d+)\spackets\soutput,\s(?P<txBytes>\d+)\sbytes,\s(?P<underruns>\d+)\sunderruns$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+(?P<outputErrors>\d+)\soutput\serrors,\s(?:(?P<collision>\d+)\scollisions,\s)?(?P<interfaceResets>\d+)\sinterface\sresets$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+(?P<babbles>\d+)\sbabbles,\s(?P<lateCollision>\d+)\slate\scollision,\s(?P<deferred>\d+)\sdeferred$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+(?P<lostCarrier>\d+)\slost\scarrier,\s(?P<noCarrier>\d+)\sno\scarrier,\s(?P<pauseOutput>\d+)\sPAUSE\soutput$",
flags=re.MULTILINE),
re.compile(pattern=r"\s+(?P<outputBufferFailures>\d+)\soutput\sbuffer\sfailures,\s(?P<outputBufferSwappedOut>\d+)\soutput buffers swapped out$",
flags=re.MULTILINE)
]
},
"show etherchannel summary": {
"ports": [
re.compile(
pattern=r"(?P<port>\w+\d+(?:\/\d+)*)\((?P<status>[A-Za-z]+)\)"
)
]
},
"show cdp neighbors detail": {
"hostname": [
re.compile(pattern=r"^Device\sID:\s(?P<hostname>[\w\_\-\(\)]+)(?:.\S+)?", flags=re.MULTILINE),
re.compile(pattern=r"^Device\sID:\s(?P<hostname>\S+)", flags=re.MULTILINE)
],
"ipAddress": [re.compile(pattern=r"IP\saddress:\s(?P<ipAddress>(?:\d{1,3}\.?){4})", flags=re.MULTILINE)],
"platform": [re.compile(pattern=r"^Platform:\s(?:(?:Cisco|cisco\s)?(?P<platform>(?:\S+\s?)+))", flags=re.MULTILINE)],
"capabilities": [re.compile(pattern=r"Capabilities:\s(?P<capabilities>(?:\S+\s)+)")],
"localInterface": [re.compile(pattern=r"^Interface:\s(?P<localInterface>[A-Za-z]+\d+(?:\/\d+)*)", flags=re.MULTILINE)],
"remoteInterface": [re.compile(pattern=r"Port\sID\s\(outgoing\sport\):\s(?P<remoteInterface>[A-Za-z]+\d+(?:\/\d+)*)")]
},
"show license": {
"index": [re.compile(pattern=r"^Index\s(?P<index>\d+)")],
"feature": [re.compile(pattern=r"Feature:\s(?P<feature>\S+)")],
"period_left": [re.compile(pattern=r"Period\sleft:\s(?P<period_left>.*)")],
"period_used": [re.compile(pattern=r"Period\sUsed:\s(?P<period_used>.*)")],
"license_type": [re.compile(pattern=r"License\sType:\s(?P<license_type>.*)")],
"license_state": [re.compile(pattern=r"License\sState:\s(?P<license_state>.*)")],
"license_count": [re.compile(pattern=r"License\sCount:\s(?P<license_count>.*)")],
"license_priority": [re.compile(pattern=r"License\sPriority:\s(?P<license_priority>.*)")],
}
}
}
return patterns
| true |
786fe6f58195e1d7e8285812751b7f1b77b6837e | Python | zfha/youkeda-python-case | /lessonTest/util.py | UTF-8 | 1,671 | 3.125 | 3 | [] | no_license | def getLeftRightPoint(points):
leftPoint = points[0]
rightPoint = points[0]
for point in points:
if point[0] < leftPoint[0]:
leftPoint = point
if point[0] > rightPoint[0]:
rightPoint = point
return leftPoint, rightPoint
def getCenter(leftPoint, rightPoint):
return (leftPoint[0] + rightPoint[0]) / 2, (leftPoint[1] + rightPoint[1]) / 2
# 获取小女孩的左眼信息
def getBrowPoint(face_landmarks):
left_eye = face_landmarks['left_eye']
right_eye = face_landmarks['right_eye']
leftPoint, rightPoint = getLeftRightPoint(left_eye)
leftCenterPoint = getCenter(leftPoint, rightPoint)
leftPoint, rightPoint = getLeftRightPoint(right_eye)
rightCenterPoint = getCenter(leftPoint, rightPoint)
centerPoint = getCenter(leftCenterPoint, rightCenterPoint)
browPoint = (centerPoint[0], centerPoint[1] - (rightCenterPoint[0] - leftCenterPoint[0]) * 1.2)
return browPoint
def premultiply(im):
pixels = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
r, g, b, a = pixels[x, y]
if a != 255:
r = r * a // 255
g = g * a // 255
b = b * a // 255
pixels[x, y] = (r, g, b, a)
def unmultiply(im):
pixels = im.load()
for y in range(im.size[1]):
for x in range(im.size[0]):
r, g, b, a = pixels[x, y]
if a != 255 and a != 0:
r = 255 if r >= a else 255 * r // a
g = 255 if g >= a else 255 * g // a
b = 255 if b >= a else 255 * b // a
pixels[x, y] = (r, g, b, a)
| true |