blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4f8ab2d16d766b93ef722579cf9e12c8853cfe63 | Python | wncbb/prepare_meeting | /kruskal/main.py | UTF-8 | 1,229 | 3.21875 | 3 | [] | no_license |
class UnionFind:
def __init__(self, nodes):
self.parents={}
self.ranks={}
for n in nodes:
self.parents[n]=n
self.ranks[n]=0
def find(self, node):
if node!=self.parents[node]:
self.parents[node]=self.find(self.parents[node])
return self.parents[node]
def union(self, a, b):
pa=self.find(a)
pb=self.find(b)
if pa==pb:
return
if self.ranks[pa]>self.ranks[pb]:
self.parents[pb]=pa
if self.ranks[pa]<self.ranks[pb]:
self.parents[pa]=pb
if self.ranks[pa]==self.ranks[pb]:
self.parents[pb]=pa
self.ranks[pa]+=1
def prim(edges):
edges=sorted(edges, key=lambda x:x[0])
nodes=set()
for cost, src, dst in edges:
nodes.add(src)
nodes.add(dst)
uf=UnionFind(nodes)
res=[]
for cost, src, dst in edges:
if uf.find(src)==uf.find(dst):
continue
uf.union(src, dst)
res.append([cost, src, dst])
return res
edges=[
(8, 'A', 'B'),
(6, 'A', 'C'),
(3, 'C', 'D'),
(4, 'B', 'D'),
(5, 'A', 'D')
]
rst=prim(edges)
for v in rst:
print v | true |
04ab9f31a8e40dbf496f20e49729a99b58579422 | Python | Kewenjing1020/method_numeric | /pc1/PC1_finiteDiff.py | UTF-8 | 1,063 | 3.515625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 30 15:57:26 2016
@author: kewenjing
PC1: Finite Differences & ODE
"""
from pylab import *
#ex1.1
def f(t):
return sin(t)/(t**3)
def df_num(t,h):
return (f(t+h)-f(t))/h
def df_exacte(t):
return (t*cos(t)-3*sin(t))/(t**4)
def d2f_exacte(x):
return (-(x**2)*sin(x)-6*x*cos(x)+12*sin(x))/(x**5)
def error_1st(t,h):
return abs(df_num(t,h) - df_exacte(t))
def df_num_2nd(t,h):
return (f(t+h)-f(t-h))/(2*h)
def error_2nd(x,h):
return abs(ddf_num_2nd(x,h)-df_exacte(x))
def df_num_4th(x,h):
return (f(x-2*h)-8*f(x-h))
h=linspace(10**-4,1)
err1=error_1st(4,h)
err2=error_2nd(4,h)
plot(h,err1)
plot(h,err2)
xscale('log')
yscale('log')
#ex1.2
def dev_2nd_2h(x,h):
return (f(x+h)-2*f(x)+f(x-h))/(h**2)
def dev_2nd_4h(x,h):
return (f(x+h)-2*f(x)+f(x-2*h))/(4*h**4)
def dev_4th(x,h):
return (f(x-2*h)+16*f(x-h)-30*f(x)+16*f(x+h)-f(x+2*h))
fx=df_exacte(4)
plot(h, abs(dev_2nd_2h(4,h)-))
#ex2
| true |
ba74e13b725f84942df0b1a2a4bbb87796a8fa0d | Python | stevenpclark/aoc2019 | /08/08.py | UTF-8 | 608 | 2.9375 | 3 | [
"MIT"
] | permissive | import numpy as np
with open('input.txt', 'r') as f:
s = f.read().strip()
#s = '0222112222120000'
nr = 6
nc = 25
x = [int(c) for c in s]
d = np.array(x)
d = np.reshape(d, (-1, nr, nc))
nb = d.shape[0]
tups = []
for b in range(nb):
band = d[b,:,:]
unique, counts = np.unique(band, return_counts=True)
tups.append(list(counts))
tups.sort()
print(tups[0][1]*tups[0][2])
for r in range(nr):
for c in range(nc):
for b in range(nb):
v = d[b,r,c]
if v != 2:
break
print({0:' ', 1:'X', 2:'.'}[v], end='')
print('\n', end='')
| true |
76aae7e17884aaaa683d9c897c957965be698ff9 | Python | davidpsplace-pc/PythonPlay | /PyOne.py | UTF-8 | 111 | 3.578125 | 4 | [] | no_license | import requests
print("Hi!")
# Get name and greet the user
name = input("Your name? ")
print("Hello,", name)
| true |
d2d790d35d52d6f4229ab04f5e7f87a424f1199a | Python | squishykid/solax | /solax/__init__.py | UTF-8 | 1,370 | 2.578125 | 3 | [
"MIT"
] | permissive | """Support for Solax inverter via local API."""
import asyncio
import logging
from async_timeout import timeout
from solax.discovery import discover
from solax.inverter import Inverter, InverterResponse
_LOGGER = logging.getLogger(__name__)
REQUEST_TIMEOUT = 5
async def rt_request(inv: Inverter, retry, t_wait=0) -> InverterResponse:
"""Make call to inverter endpoint."""
if t_wait > 0:
msg = "Timeout connecting to Solax inverter, waiting %d to retry."
_LOGGER.error(msg, t_wait)
await asyncio.sleep(t_wait)
new_wait = (t_wait * 2) + 5
retry = retry - 1
try:
async with timeout(REQUEST_TIMEOUT):
return await inv.get_data()
except asyncio.TimeoutError:
if retry > 0:
return await rt_request(inv, retry, new_wait)
_LOGGER.error("Too many timeouts connecting to Solax.")
raise
async def real_time_api(ip_address, port=80, pwd=""):
i = await discover(ip_address, port, pwd)
return RealTimeAPI(i)
class RealTimeAPI:
"""Solax inverter real time API"""
# pylint: disable=too-few-public-methods
def __init__(self, inv: Inverter):
"""Initialize the API client."""
self.inverter = inv
async def get_data(self) -> InverterResponse:
"""Query the real time API"""
return await rt_request(self.inverter, 3)
| true |
671b2ac683ab908d5096b8ca607d8cfd6a37e38f | Python | jim58/moose | /archive.py | UTF-8 | 2,707 | 2.53125 | 3 | [] | no_license | #!/usr/bin/python
'''
archive.py - archive a picture file
'''
import os
import sys
import time
import re
import psycopg2
import string
import argparse
import datetime
import shutil
from PIL import Image
from PIL.ExifTags import TAGS
rootdir = '/mnt/Local/photosDB'
parser = argparse.ArgumentParser()
parser.add_argument('infile', help='name of file to add to pictures table')
parser.add_argument('-q', '--quiet', action="store_true", help='no output')
parser.add_argument('-v', '--verbose', action="store_true", help='verbose output')
parser.add_argument('-x', '--execute', action="store_true", help='copy file to data store')
args = parser.parse_args()
if args.verbose:
args.quiet = False
infile = args.infile
filename = os.path.basename(infile)
abspath = os.path.abspath(infile)
dirname = os.path.dirname(abspath)
thing = os.stat(abspath)
filesize = thing.st_size
filetime = datetime.datetime.fromtimestamp(int(round(thing.st_mtime)))
if filesize == 0:
exit(1)
try:
items = Image.open(abspath)._getexif().items()
except:
if args.verbose:
print(" ... no exif data")
exit(2)
value = {}
myTags = [
'DateTime', 'DateTimeOriginal', 'DateTimeDigitized',
'ExifImageWidth', 'ExifImageHeight', 'ImageWidth', 'ImageLength',
'Make', 'Model', 'Software' ]
for key in myTags:
value[key] = ''
if args.verbose:
print("{:20} = {}".format('Filename', infile))
for (k,v) in items:
keyname = TAGS.get(k)
if keyname in myTags:
value[keyname] = v
if args.verbose:
print("{:20} = {}".format(keyname, value[keyname]))
if value['DateTime']:
ymd = value['DateTime'].split(" ")[0]
elif value['DateTimeOriginal']:
ymd = value['DateTimeOriginal'].split(" ")[0]
elif value['DateTimeDigitized']:
ymd = value['DateTimeDigitized'].split(" ")[0]
else:
ymd = filetime.strftime("%Y-%m-%d")
ymd = ymd.replace(":", "-")
model = value['Model']
if model:
model = model.replace(' ', '_').replace('/', '_')
else:
model = "unknown"
year = ymd[0:4]
month = ymd[0:7]
targetdir = "{}/{}/{}/{}/{}".format(rootdir, model, year, month, ymd)
destination = "{}/{}/{}/{}/{}/{}".format(rootdir, model, year, month, ymd, filename)
if args.verbose:
print('os.mkdir("{}")'.format(targetdir))
print('copy to "{}"'.format(destination))
if not args.execute:
exit(0)
if args.verbose:
print("executing copy")
if not os.path.exists(targetdir):
os.makedirs(targetdir)
if os.path.exists(destination):
if not args.quiet:
print(" ... file already exists ({})".format(destination))
exit(1)
else:
shutil.copy2(abspath, destination)
if not args.quiet:
print(" ... file inserted")
| true |
c5206f5e96d7bde0eace822cdef2473d518e724d | Python | mepix/DataProcessING | /russell/countmotif.py | UTF-8 | 2,727 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 14:28:50 2019
@author: merrickcampbell
"""
#==============================================================================
# Derivative of RODEO2, modified just to count the number of sequence motifs
# for Chayanid Ongpipattanakul
#==============================================================================
# Copyright (C) 2017 Bryce L. Kille
# University of Illinois
# Department of Chemistry
#
# Copyright (C) 2017 Christopher J. Schwalen
# University of Illinois
# Department of Chemistry
#
# Copyright (C) 2017 Douglas A. Mitchell
# University of Illinois
# Department of Chemistry
#
# License: GNU Affero General Public License v3 or later
# Complete license availabel in the accompanying LICENSE.txt.
# or <http://www.gnu.org/licenses/>.
#
# This file is part of RODEO2.
#
# RODEO2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RODEO2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
#==============================================================================
# Special thanks goes to AntiSmash team, whose antiSMASH-rodeo repository at
# https://bitbucket.org/mmedema/antismash-rodeo/ provided the backbone code for
# a great deal of the heuristic calculations.
#==============================================================================
import re
class Ripp:
def __init__(self):
#Set Counter
self.countY = 0
self.countW = 0
self.countE = 0
def count_sequence(self, sequence):
# Count Y
matches = re.findall("(Y..P)", sequence)
#print(matches)
self.countY += len(matches)
#print(self.countY)
# Count W
matches = re.findall("(W..P)", sequence)
#print(matches)
self.countW += len(matches)
#print(self.countW)
# Count E
matches = re.findall("(E..P)", sequence)
#print(matches)
self.countE += len(matches)
#print(self.countE)
file = open("countedfile.txt","r")
# myRipp.sequence = "YAAPALAAAGAAAAATYAAPALAAAGBBBBBTYCCPCLCACGDDDDDT"
myRipp = Ripp()
for line in file:
if '>' in line:
continue
else:
# line = myRipp = Ripp(-1,-1,'FOO','BAR',-1)
myRipp.count_sequence(line)
print(myRipp.countY, myRipp.countW, myRipp.countE) | true |
8ce306335bea81f80d627a157d144db2f92c4469 | Python | Aasthaengg/IBMdataset | /Python_codes/p03523/s194541675.py | UTF-8 | 271 | 2.9375 | 3 | [] | no_license | S = input()
akb = 'AKIHABAR'
if len(S) <= len(akb)+1:
i = 0
for a in akb:
if i >= len(S):
print('NO')
exit()
if S[i] == a: i+=1
elif S[i] != a and a == 'A': continue
else:
print('NO')
exit()
print('YES')
else:
print('NO')
| true |
883723523f58d40a0b9912bdd56b666dbe38652f | Python | alpha-kwhn/Baekjun | /powerful104/2960.py | UTF-8 | 415 | 2.875 | 3 | [] | no_license | a,b= map(int, input().split())
li=list(range(2,a+1))
mi=0
ans=0
while True:
c=min(li)
mi+=1
check=0
if mi==b:
ans=c
break
del li[0]
for i in li:
if i%c==0:
del li[li.index(i)]
mi += 1
if mi == b:
ans = i
check=1
break
if check==1:
break
print(ans)
| true |
3e036588752751d25b44c9beb316329d4010e32d | Python | stevenc49/project_euler | /problem16/16.py | UTF-8 | 92 | 3.21875 | 3 | [] | no_license | sum = 0
result = str( pow(2,1000) )
for c in result:
sum += int(c)
print sum
| true |
c80079d7566f0150996b6be08ad1631ee2aaf4eb | Python | zcavic/RL_EnergyStorageScheduling | /obsolete/power_algorithms/network_management.py | UTF-8 | 3,625 | 2.625 | 3 | [] | no_license | import obsolete.power_algorithms.network_definition as grid
import pandapower as pp
import pandas as pd
class NetworkManagement:
def __init__(self):
self.power_grid = grid.create_cigre_network_mv()
def get_power_grid(self):
return self.power_grid
# For given capacitor switch name (CapSwitch1, CapSwitch2...) status is changed.
def change_capacitor_status(self, capSwitchName, closed):
switchIndex = pp.get_element_index(self.power_grid, "switch", capSwitchName)
self.power_grid.switch.closed.loc[switchIndex] = closed
def toogle_capacitor_status(self, capSwitchName):
switchIndex = pp.get_element_index(self.power_grid, "switch", capSwitchName)
currentState = self.power_grid.switch.closed.loc[switchIndex]
self.power_grid.switch.closed.loc[switchIndex] = not currentState
def get_all_capacitor_switch_names(self):
return self.power_grid.switch['name'].tolist()
def get_all_capacitors(self):
return pd.Series(self.power_grid.switch.closed.values, index=self.power_grid.switch.name).to_dict()
def set_scaling_to_all_load(self, scaling_factor):
for index in self.power_grid.load.index:
self.power_grid.load.scaling.loc[index] = scaling_factor
def set_scaling_to_all_generation(self, scaling_factor):
for index in self.power_grid.sgen.index:
self.power_grid.sgen.scaling.loc[index] = scaling_factor
def set_load_scaling(self, scaling_factors):
if (len(scaling_factors) != len(self.power_grid.load.index)):
print("(ERROR) Input list of scaling factors {} is not the same length as number of loads {}".format(
len(scaling_factors), len(self.power_grid.load.index)))
return
for index, load in self.power_grid.load.iterrows():
self.power_grid.load.scaling.loc[index] = scaling_factors[index]
def set_generation_scaling(self, scaling_factors):
if (len(scaling_factors) != len(self.power_grid.sgen.index)):
print("(ERROR) Input list of scaling factors {} is not the same length as number of generators {}".format(
len(scaling_factors), len(self.power_grid.load.index)))
return
for index, sgen in self.power_grid.sgen.iterrows():
self.power_grid.sgen.scaling.loc[index] = scaling_factors[index]
def set_storage_scaling(self, scaling_value, index):
self.power_grid.storage.scaling.loc[index] = scaling_value
def set_capacitors_initial_status(self, capacitors_statuses):
capacitor_indices = self.get_capacitor_indices_from_shunts()
if (len(capacitors_statuses) != len(capacitor_indices)):
print(
"(ERROR) Input list of capacitor statuses {} is not the same length as number of capacitors {}".format(
len(capacitors_statuses), len(self.power_grid.shunt.index)))
return
capacitor_switches = self.power_grid.switch.index.tolist()
input_status_index = 0
for switch_index in capacitor_switches:
self.power_grid.switch.closed.loc[switch_index] = capacitors_statuses[input_status_index]
input_status_index += 1
def get_capacitor_indices_from_shunts(self):
capacitors = []
for index, row in self.power_grid.shunt.iterrows():
if 'Cap' in row['name']:
capacitors.append(index)
return capacitors
def print_cap_status(self):
print(self.power_grid.switch)
def get_es_indexes(self):
return self.power_grid.storage.index
| true |
ac2ca5353d9794beee718a81ede94e878a29ad0b | Python | datpham2001/CSTTNT-Project1 | /19120297-19120473/maze_without_reward/uninformed_search.py | UTF-8 | 3,760 | 3.671875 | 4 | [] | no_license | import create_and_visualize as support
# Create a class Node to store Node's information(state, parent, action)
class Node():
def __init__(self, state, parent, action):
self.state = state
self.parent = parent
self.action = action
# Create class Stack to use find path by DFS
class Stack():
def __init__(self):
self.frontier = []
def add(self, node):
self.frontier.append(node)
def contains_state(self, state):
return any(node.state == state for node in self.frontier)
def empty(self):
return len(self.frontier) == 0
def remove(self):
if self.empty():
raise Exception('Frontier is empty!')
else:
node = self.frontier[-1]
self.frontier = self.frontier[:-1]
return node
# Create class Queue inheritance to use find path by BFS
class Queue(Stack):
def remove(self):
if self.empty():
raise Exception('Frontier is empty!')
else:
node = self.frontier[0]
self.frontier = self.frontier[1:]
return node
# Make wall from cell equal "x"
def makeWall(maze):
wall = []
row, col = len(maze), len(maze[0])
for i in range(row):
row_val = []
for j in range(col):
if maze[i][j] == 'x':
row_val.append(True)
else:
row_val.append(False)
wall.append(row_val)
return wall
# Get neighbors around the node is visited
def neighbors(node):
x, y = node.state
neighbors = [
('right', (x, y+1)),
('up', (x-1, y)),
('left', (x, y-1)),
('down', (x+1, y))
]
res = []
for action, (r, c) in neighbors:
if (0 <= r < row and 0 <= c < col and wall[r][c] == False):
temp = Node((r, c), node, action)
res.append(temp)
return res
def solveMaze(maze, start, end, type):
num_explored = 0 # get the numbers of cell have visited
start_node = Node(state=start, parent=None, action=None)
fringe = Stack()
if type == 1:
fringe = Queue()
elif type == 2:
fringe = Stack()
fringe.add(start_node)
visited = set()
while True:
if fringe.empty():
raise Exception('No solution')
node = fringe.remove()
num_explored += 1
if node.state == end:
actions = []
states = []
while node is not None:
actions.append(node.action)
states.append(node.state)
node = node.parent
states.reverse()
return states, num_explored
visited.add(node.state)
for neighbor_node in neighbors(node):
if neighbor_node.state not in visited and not fringe.contains_state(neighbor_node.state):
fringe.add(neighbor_node)
'''Calling functions in support functions
writeFile: create maze from text file
readFile: return bonus_points and maze read from file (can change filename to get another maze)
findStartAndExitPosition: return coordinate of start and exit point in the maze
'''
bonus, maze = support.readFile('./maze_without_reward1.txt')
(start, end) = support.findStartAndExitPosition(maze)
wall = makeWall(maze)
row = len(maze)
col = len(maze[0])
# Choose 1 or 2 in order to perform BFS or DFS algorithm
type_solve = int((input('BFS-->1 or DFS-->2 ? ')))
# Solution
path, num_explored = solveMaze(maze, start, end, type_solve)
# visualize maze and path (change type = 1(normal size) or 2(big size))
support.visualize_maze(maze, bonus, start, end, path, type=1)
print('Tong so cac trang thai da di: ', num_explored)
print('Chi phi duong di: ', len(path) - 1)
| true |
a2175e1f6c586499d3b48515c2811a3ad016429f | Python | wan-catherine/Leetcode | /test/test_N418_Sentence_Screen_Fitting.py | UTF-8 | 677 | 3.421875 | 3 | [] | no_license | from unittest import TestCase
from problems.N418_Sentence_Screen_Fitting import Solution
class TestSolution(TestCase):
def test_words_typing(self):
self.assertEqual(1, Solution().wordsTyping(rows = 2, cols = 8, sentence = ["hello", "world"]))
def test_words_typing_1(self):
self.assertEqual(2, Solution().wordsTyping(rows = 3, cols = 6, sentence = ["a", "bcd", "e"]))
def test_words_typing_2(self):
self.assertEqual(1, Solution().wordsTyping(rows = 4, cols = 5, sentence = ["I", "had", "apple", "pie"]))
def test_words_typing_3(self):
self.assertEqual(5293333, Solution().wordsTyping(["try","to","be","better"], 10000, 9001)) | true |
147a3912c8a847f4db23572c95e01a746dddb0a5 | Python | will214/noisyduck | /tests/test_annulus.py | UTF-8 | 5,643 | 2.984375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Testing decomposition results for cylindrical annulus.
Fixtures:
uniform_state: returns dictionary containing the state for a uniform axial
mean flow.
uniform_numerical: returns eigenvalues and eigenvectors from the numerical
decomposition of the uniform axial mean flow.
uniform_analytical: returns eigenvalues and eigenvectors from the analytical
decomposition of the uniform axial mean flow.
The fixtures here essentially precompute results, then these results are
used in tests for various purposes.
"""
from __future__ import division
import pytest
import numpy as np
import noisyduck as nd
@pytest.fixture
def uniform_state():
"""Return the mean state for uniform axial flow."""
ri=0.25
ro=1.0
res=50
r=np.linspace(ri,ro,res)
return {'gam':1.4, 'rho':1.2, 'vr' :0., 'vt' :0., 'vz' :100., 'p':100000., 'omega':3000., 'm':2, 'r':r}
@pytest.fixture
def uniform_numerical(uniform_state):
"""Compute the numerical eigendecomposition of a uniform axial
mean flow that can be used by tests.
"""
evals, evecs_l, evecs_r = nd.annulus.numerical.decomposition(uniform_state['omega'],
uniform_state['m'],
uniform_state['r'],
uniform_state['rho'],
uniform_state['vr'],
uniform_state['vt'],
uniform_state['vz'],
uniform_state['p'],
uniform_state['gam'],
filter='acoustic',alpha=0.0000001,perturb_omega=False)
return evals, evecs_l, evecs_r
@pytest.fixture
def uniform_analytical(uniform_state,uniform_numerical):
""" Compute the analytical eigendecomposition of a uniform axial
mean flow that can be used by tests.
"""
gam = uniform_state['gam']
p = uniform_state['p']
rho = uniform_state['rho']
c = np.sqrt(gam*p/rho)
mach = uniform_state['vz']/c
# Access the results of the numerical decomposition so we know how many
# eigenvalues to find.
evals_n, evecs_ln, evecs_rn = uniform_numerical
n = len(evals_n)
evals, evecs_r = nd.annulus.analytical.decomposition(uniform_state['omega']/c,
uniform_state['m'],
mach,
uniform_state['r'],
n)
return evals, evecs_r
def test_uniformaxialflow_analytical_unique(uniform_analytical):
""" Test the eigenvalues from the analytical decomposition are unique """
evals, evecs_r = uniform_analytical
assert len(evals) == len(set(evals))
def test_uniformaxialflow_analytical_matches_reference(uniform_analytical):
""" Test the analytical eigenvalues match previously computed reference values. """
evals_a, evecs_ra = uniform_analytical
# Previously computed reference eigenvalues for the analytical
# eigendecomposition of a uniform axial mean flow in an annular
# duct. rho=1.2, vz=100, p=100000, omega=3000/c, m=2, ri/ro=0.25
evals_ref = np.array([(11.888859931191348+0j),
(-6.263859931191352+0j),
(9.746593068700136+0j),
(-4.121593068700137+0j),
(2.8124999999999987+3.0004194877589363j),
(2.8124999999999987-3.0004194877589363j),
(2.812499999999999+10.162007091553026j),
(2.812499999999999-10.162007091553026j),
(2.8124999999999982+15.38697739666744j),
(2.8124999999999982-15.38697739666744j)])
eval_matches = np.zeros(len(evals_a), dtype=bool)
for i in range(len(evals_a)):
for j in range(len(evals_ref)):
if np.isclose(evals_a[i],evals_ref[j], atol=1.e-6):
eval_matches[i]=True
break
# Assert that we found a matching reference eigenvalue for each
# analytical eigenvalue.
assert eval_matches.all()
def test_uniformaxialflow_numerical_matches_analytical(uniform_analytical,uniform_numerical):
""" Test the annular cylindrical duct decomposition for the
case of uniform axial mean flow.
"""
# Unpack results from analytical and numerical decompositions
evals_a, evecs_ra = uniform_analytical
evals_n, evecs_ln, evecs_rn = uniform_numerical
# For each numerical eigenvalue, test that it matches closely
# with one of the analytical eigenvalues.
eval_matches = np.zeros(len(evals_n), dtype=bool)
for i in range(len(evals_n)):
# Search each entry in evals_a to try and find a close match
for j in range(len(evals_a)):
if np.isclose(evals_n[i],evals_a[j],atol=1.e-2):
eval_matches[i]=True
break
# Assert that we found a matching analytical eigenvalue for each
# numerical eigenvalue
assert eval_matches.all()
| true |
a9bbbbe2c3403eb71d0d3a4531c38b750a34170a | Python | cmartinezcab/per19-ejercicios | /persona.py | UTF-8 | 747 | 3.640625 | 4 | [] | no_license | class NIF():
def __init__(self, num1, num2, num3, num4, num5, num6, num7, num8):
self.num1 = num1
self.num2 = num2
self.num3 = num3
self.num4 = num4
self.num5 = num5
self.num6 = num6
self.num7 = num7
self.num8 = num8
def Calcular_Letra(self):
if self.num1 == 1 or self.num1== 3:
return 'A'
if self.num1 == 2 or self.num1== 4:
return 'M'
if self.num1 == 5 or self.num1== 7:
return 'K'
if self.num1 == 6 or self.num1== 8 or self.num1==9:
return 'L'
n=NIF(4,5,6,7,8,2,1,9)
print('El NIF es: ',n.num1, n.num2, n.num3, n.num4, n.num5, n.num6, n.num7, n.num8, n.Calcular_Letra())
| true |
5495970abbd07b389962b286ce5d4f3133a7588d | Python | afonsocrg/portfolio | /static_analyser/flow.py | UTF-8 | 6,023 | 2.640625 | 3 | [] | no_license | from vulnerability import Vulnerability
from itertools import product
from copy import deepcopy
from util import sort_dict
class Flow:
def __init__(self, previous_flows):
# List of tracked Sources, Sinks and Sanitizers per pattern
# {
# pattern_name : {
# pattern: Pattern,
# sources : [source, ...],
# sinks: [sink, ...],
# sanitizers: [sanitizer, ...]
# }
# }
self.tracked_patterns = []
self.reported_vuls = []
# previous_flows = deepcopy(previous_flows)
# remove redundant flows
all_flows = []
for flow in previous_flows:
tp = flow.get_tracked_patterns()
flow_patterns = []
for possible_pattern in tp: # for every {} in [ {} , ... ]
copy = {}
for pat_name, tracked in possible_pattern.items():
src = tracked['sources']
snk = tracked['sinks']
snt = tracked['sanitizers']
if len(src + snk + snt) > 0:
copy[pat_name] = deepcopy(tracked)
if len(copy.keys()) > 0:
flow_patterns.append(copy)
if len(flow_patterns) > 0:
all_flows.append(flow_patterns)
if len(all_flows) == 0:
self.tracked_patterns = [{}]
return
# all_flows = [flow.get_tracked_patterns() for flow in previous_flows if len(flow.get_tracked_patterns()) > 0]
# WARNING: These combinations point to the same lists!
all_combs = product(*all_flows)
for combination in all_combs:
comb_pattern = {}
for pat in combination:
for pat_name, tracked in pat.items():
if pat_name not in comb_pattern:
comb_pattern[pat_name] = deepcopy(tracked)
else:
# Pattern already exists. Adding unique sources/sinks/sanitizers
known_sources = comb_pattern[pat_name]['sources']
known_sinks = comb_pattern[pat_name]['sinks']
known_sanitizers = comb_pattern[pat_name]['sanitizers']
for source in tracked['sources']:
if source not in known_sources:
known_sources.append(source)
for sink in tracked['sinks']:
if sink not in known_sinks:
known_sinks.append(sink)
for sanitizer in tracked['sanitizers']:
if sanitizer not in known_sanitizers:
known_sanitizers.append(sanitizer)
self.tracked_patterns.append(comb_pattern)
def get_tracked_patterns(self):
return self.tracked_patterns
def remove_sanitizers(self):
for possible_flow in self.tracked_patterns:
for tracked in possible_flow.values():
tracked['sanitizers'] = []
def remove_sinks(self):
for possible_flow in self.tracked_patterns:
for tracked in possible_flow.values():
tracked['sinks'] = []
def remove_sources(self):
for possible_flow in self.tracked_patterns:
for tracked in possible_flow.values():
tracked['sources'] = []
def check_vulns(self):
vulns = []
for possible_flow in self.tracked_patterns:
for pat_name, tracked in possible_flow.items():
if len(tracked['sources']) > 0 and len(tracked['sinks']) > 0:
for sink in tracked['sinks']:
# [:] makes a copy of the array, so the reported vuln isn't changed
# after being reported
vuln_name = pat_name
src = tracked['sources'][:]
san = tracked['sanitizers'][:]
snk = [sink][:]
vuln = Vulnerability(vuln_name, src, san, snk)
duplicated = False
for rv in self.reported_vuls:
duplicated = False
if str(rv) == str(vuln):
duplicated = True
break
if not duplicated:
self.reported_vuls.append(vuln)
vulns.append(vuln)
# clear already reported sinks
tracked['sinks'] = []
return vulns
def merge(self, other_flow):
changed = False
incoming_patterns = deepcopy(other_flow.get_tracked_patterns())
if len(incoming_patterns) == 1 and incoming_patterns[0] == {}:
# if incoming is empty, did not change
return False
if len(self.tracked_patterns) == 1 and self.tracked_patterns[0] == {}:
# empty patterns: new patterns = incoming!
self.tracked_patterns = incoming_patterns
return True
# TODO: avoid duplicate patterns
for pattern in incoming_patterns:
matches_any = False
sorted_incoming_pattern = sort_dict(pattern)
for our_pattern in self.tracked_patterns:
sorted_our_pattern = sort_dict(our_pattern)
# since they are sorted, the str will produce the same string
if str(sorted_incoming_pattern) == str(sorted_our_pattern):
matches_any = True
break
if not matches_any:
self.tracked_patterns.append(pattern)
changed = True
return changed
def __repr__(self):
return f"<Flow {self.tracked_patterns}>"
| true |
f2abff4a06dc1ce010b266484030d75454a12e74 | Python | comet-ml/comet-examples | /fastai/train-example.py | UTF-8 | 1,465 | 2.640625 | 3 | [] | no_license | ## MNIST Example in fastai
## Note: this uses fastai version 1.0.38
## pip install fastai==1.0.38
from comet_ml import Experiment
import fastai
import fastai.vision
import glob
import os
# The model, also known as wrn_22:
model = fastai.vision.models.WideResNet(num_groups=3,
N=3,
num_classes=10,
k=6,
drop_p=0.)
## Get the MNIST_TINY dataset:
path = fastai.datasets.untar_data(fastai.datasets.URLs.MNIST_TINY)
print("data path:", path)
## Still too many for a CPU, so we trim it down to 10 in each category:
dirname = os.path.dirname(path)
for group in ["mnist_tiny/train/3/*.png",
"mnist_tiny/train/7/*.png",
"mnist_tiny/valid/3/*.png",
"mnist_tiny/valid/7/*.png"]:
for filename in glob.glob(os.path.join(dirname, group))[10:]:
os.remove(filename)
experiment = Experiment(project_name="fastai")
## Now we get the image data from the folder:
data = fastai.vision.ImageDataBunch.from_folder(path, bs=10) # bs: batch size
if data.device.type == 'cpu':
learn = fastai.basic_train.Learner(data, model, metrics=fastai.metrics.accuracy)
else: # GPU:
learn = fastai.basic_train.Learner(data, model, metrics=fastai.metrics.accuracy).to_fp16()
with experiment.train():
learn.fit_one_cycle(10, 3e-3, wd=0.4, div_factor=10, pct_start=0.5)
| true |
a5041058ecc98d1d9ad4b7e7bd8bdc1f5ca38697 | Python | Aasthaengg/IBMdataset | /Python_codes/p02912/s299400252.py | UTF-8 | 239 | 2.9375 | 3 | [] | no_license | import heapq
N, M = map(int, input().split())
A = list(map(int, input().split()))
ls = []
for item in A:
heapq.heappush(ls, -item)
for m in range(M):
tmp = -heapq.heappop(ls)
tmp = tmp // 2
heapq.heappush(ls, -tmp)
print(-sum(ls))
| true |
f80ddf81d57f2ff0e6018bce6a38aa1b8c08fe0c | Python | eareyan/noisyce | /unit_tests.py | UTF-8 | 711 | 2.796875 | 3 | [] | no_license |
num_consu = 2
num_goods = 5
scale = 10
# the_V = uniform_random_distribution(num_consu, num_goods, scale)
# the_V = preferred_subset_distribution(num_consu, num_goods, scale)
the_V = preferred_good_distribution(num_consu, num_goods, scale)
print(f'the_V = \n{the_V}')
the_matching, the_welfare = get_maximum_welfare(V=the_V, flag_print_matching=True)
# Test solve for highest and lowest CE prices.
# Lowest CE
lowest_prices = solve_ce_prices_lp(V=the_V, matching=the_matching, minimize=True)
compute_market_regret(the_V, the_matching, lowest_prices)
# Highest CE
highest_prices = solve_ce_prices_lp(V=the_V, matching=the_matching, minimize=False)
compute_market_regret(the_V, the_matching, highest_prices) | true |
723930308638a7585ff3027586b421b1c370df56 | Python | stonkgs/stonkgs | /src/stonkgs/data/fix_broken_pretraining_dataset.py | UTF-8 | 1,903 | 2.90625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""Fixes wrongly preprocessed lists of ints in the preprocessed pandas dataframes.
Run with:
python -m src.stonkgs.data.fix_broken_pretraining_dataset
"""
import os
import click
import pandas as pd
from tqdm import tqdm
from stonkgs.constants import (
PRETRAINING_DIR,
)
@click.command()
@click.option(
"--chunk_size",
default=100000,
help="Size of the chunks used for processing the corrupted file",
type=int,
)
@click.option(
"--input_path",
default=os.path.join(PRETRAINING_DIR, "pretraining_transe_preprocessed.tsv"),
help="Path to the corrupted file (a .tsv file)",
type=str,
)
@click.option(
"--output_path",
default=os.path.join(PRETRAINING_DIR, "pretraining_transe_preprocessed_fixed.pkl"),
help="Output file path specification (ending in .pkl)",
type=str,
)
def convert_tsv_to_pkl(
chunk_size: int,
input_path: str,
output_path: str,
):
"""Converts strings that look like lists into actual lists of ints and saves a pickle of the repaired dataframe."""
converter_dict = {
col: lambda x: [int(y) for y in x.strip("[]").split(", ")]
for col in [
"input_ids",
"attention_mask",
"token_type_ids",
"masked_lm_labels",
"ent_masked_lm_labels",
]
}
converter_dict["next_sentence_labels"] = lambda x: int(x) # type: ignore # noqa
chunks = []
# Process dataframe in bits with a progress bar
for chunk in tqdm(
pd.read_csv(
input_path,
sep="\t",
chunksize=chunk_size,
converters=converter_dict,
)
):
chunks.append(chunk)
complete_df = pd.concat(chunks, axis=0)
# Pickle the complete dataframe
complete_df.to_pickle(output_path)
if __name__ == "__main__":
# Fixing the dataset
convert_tsv_to_pkl()
| true |
63519f79e51f006be4dbc95bbf44c2d876ddd807 | Python | GuHan99/472Deliverable1 | /ReadFile.py | UTF-8 | 3,226 | 2.765625 | 3 | [] | no_license | import cmd
from Box import Box
from Tools import output_file
class ReadFile(cmd.Cmd):
prompt = 'Candy '
@staticmethod
def do_manual(self):
input_order = []
with open('./input.txt', 'rt') as f:
for line in f:
line = line.strip('\n')
c = line.split(' ')
input_order.append(c)
output_str = ''
for line in input_order:
box = Box(line)
box.manual_run()
output_str += box.output_string()
print('%dth puzzle complete.')
counter = 0
flag_counter = 0
for i in output_str.splitlines():
if flag_counter % 2 is 0:
counter += len(i)
flag_counter += 1
counter = str(counter)
output_str += counter
output_file(output_str)
@staticmethod
def do_auto(self):
level_num = input('choose level ')
level_num = int(level_num)
input_order = []
if level_num is 1:
with open('./input1.txt', 'rt') as f:
for line in f:
line = line.strip('\n')
c = line.split(' ')
input_order.append(c)
elif level_num is 2:
with open('./input2.txt', 'rt') as f:
for line in f:
line = line.strip('\n')
c = line.split(' ')
input_order.append(c)
elif level_num is 3:
with open('./input3.txt', 'rt') as f:
for line in f:
line = line.strip('\n')
c = line.split(' ')
input_order.append(c)
elif level_num is 4:
with open('./input4.txt', 'rt') as f:
for line in f:
line = line.strip('\n')
c = line.split(' ')
input_order.append(c)
elif level_num is 0:
with open('./input.txt', 'rt') as f:
for line in f:
line = line.strip('\n')
c = line.split(' ')
input_order.append(c)
index = 0
output_str = ''
for line in input_order:
box = Box(line)
box.auto_run()
output_str += box.output_string()
index += 1
if box.time_out:
print('%dth puzzle time-out.' % index)
else:
print('%dth puzzle complete.' % index)
counter = 0
flag_counter = 0
for i in output_str.splitlines():
if flag_counter % 2 is 0:
counter += len(i)
flag_counter += 1
counter = str(counter)
output_str += counter
output_str += '\n'
# sum_time = 0
# for i in output_str.splitlines():
# if 'ms' in i:
# m_position = i.index('m')
# sum_time += float(i[0:m_position])
# output_str += str(sum_time)
# output_str += 'ms\n'
output_file(output_str, level_num)
@staticmethod
def do_exit(self):
return True
if __name__ == '__main__':
ReadFile().cmdloop() | true |
e8c84b95784c28887d51f0d982fc93d3ab39147d | Python | ffelipegupe/holbertonschool-interview | /0x06-log_parsing/0-stats.py | UTF-8 | 1,060 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python3
""" Log parsing """
from sys import stdin
codes_dic = {
"200": 0,
"301": 0,
"400": 0,
"401": 0,
"403": 0,
"404": 0,
"405": 0,
"500": 0
}
size = 0
def printing():
""" Function that prints logs """
print("File size: {}".format(size))
for key in sorted(codes_dic.keys()):
if codes_dic[key] and isinstance(int(key), int):
print("{}: {}".format(key, codes_dic[key]))
if __name__ == "__main__":
count = 0
try:
for line in stdin:
count += 1
data = line.split()
size += int(data[-1])
if len(data) > 2:
if data[-2] in codes_dic and isinstance(int(data[-2]),
int):
codes_dic[data[-2]] += 1
if count % 10 == 0:
printing()
except Exception:
pass
finally:
printing()
| true |
46ac6409fdffc3f865c2b7398fe1fbccb4d271b3 | Python | yaovct/0008.String2Integer | /index.py | UTF-8 | 1,170 | 3.1875 | 3 | [
"MIT"
] | permissive | class Solution(object):
def myAtoi(self, str):
"""
:type str: str
:rtype: int
"""
p = 0
ans = 0
found = 0
sign = 1
while p < len(str):
if found == 0:
if str[p] == " ":
p += 1
continue
elif str[p] == "-":
sign = -1
found = 1
p += 1
continue
elif str[p] == "+":
found = 1
p += 1
continue
k = ord(str[p]) - 0x30
if k >= 0 and k < 10:
found = 1
if ans > 0:
ans *= 10
ans += k
if ans > 0x7FFFFFFF:
if sign == 1:
return 0x7FFFFFFF
else:
return -1<<31
else:
return ans*sign
p += 1
return ans*sign
my_test = Solution()
sample = ["42", " -42", "4193 with words", "words and 987", "-91283472332", "3.14159", " 2147483648", "", "+1", " ", "0 98", "+-1a"]
answer = [42, -42, 4193, 0, -2147483648, 3]
for m in sample:
print("%s => %d" % (m, my_test.myAtoi(m)))
| true |
7a34a9f1a51cb3f659d8be0706be6dd717006416 | Python | jackyxuebin/cs5234 | /datagen.py | UTF-8 | 1,650 | 2.546875 | 3 | [] | no_license | import random
import numpy as np
import uuid
import socket
import struct
import time
import numpy.random
def uniform_input_gen(n,m):
f = open('uniform_input.txt','w')
for i in range(n):
l = random.randint(0,m-1)
f.write(str(l)+'\n')
f.close()
def exp_input_gen(n,m):
p = []
for i in range(m):
p.append(1/(2**(i+1)))
p = p/np.sum(p)
f = open('exp_input.txt','w')
for i in range(n):
l = np.random.choice(m,p=p)
f.write(str(l)+'\n')
f.close()
def uuid_gen(n):
f = open('uuid.txt','w')
for i in range(n):
id = uuid.uuid4()
f.write(str(id)+'\n')
f.close()
def ip_gen(n):
f = open('ip.txt','w')
for i in range(n):
ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
f.write(ip+'\n')
f.close()
def ip_stream_gen(f,n,d,t):
for i in range(d):
t_i = t - d/2 + i
if i < d/2:
num_gen = int(n * 2 ** (-d/2+i))
else:
num_gen = int(n * 2 ** (d/2-i))
for j in range(num_gen):
ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
f.write(str(t_i)+' '+ip+'\n')
def ip_stream_gen_1(n,d):
f = open('ip_stream_3.txt','w')
t = int(time.time())
ip_stream_gen(f,n,d,t)
# uniform
def ip_stream_gen_2(n,d):
f = open('ip_stream_2.txt','w')
t = int(time.time())
num_gen = int(n/d)
for i in range(d):
t_i = t - d / 2 + i
for j in range(num_gen):
ip = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff)))
f.write(str(t_i)+' '+ip+'\n')
| true |
61d93762dcf88537ec0af5dfc16e4256039d2bcd | Python | Linda-Stadter/AdventOfCode2020 | /Day 11/solution.py | UTF-8 | 2,601 | 3.59375 | 4 | [] | no_license | input_path = "input.txt"
def read_input():
with open(input_path, "r") as input_file:
input_lines = input_file.readlines()
input_lines = [list(x.strip()) for x in input_lines]
return input_lines
def get_neighbors_adjacent(input_lines, y, x):
max_y = len(input_lines)
max_x = len(input_lines[0])
return [(row, column) for row in range(y-1, y+2) for column in range(x-1, x+2)
if (0 <= row < max_y and 0 <= column < max_x and (row != y or column != x))]
def get_neighbors_in_direction(input_lines, y, x):
max_y = len(input_lines)
max_x = len(input_lines[0])
neighbors = []
directions = [(y1, x1) for x1 in range(-1, 2) for y1 in range(-1, 2) if not (x1 == 0 and y1 == 0)]
for y1, x1 in directions:
y2 = y1
x2 = x1
while 0 <= y2 + y + y1 < max_y and 0 <= x2 + x + x1 < max_x and input_lines[y2+y][x2+x] == '.':
y2 += y1
x2 += x1
if 0 <= y + y2 < max_y and 0 <= x + x2 < max_x:
neighbors.append((y+y2, x+x2))
return neighbors
def model_seats(input_lines, get_neighbors, occupied_limit):
max_y = len(input_lines)
max_x = len(input_lines[0])
occupied = 0
old_occupied = -1
while old_occupied != occupied:
new_seats = [['.']*max_x for y in range(max_y)]
changes = 0
old_occupied = occupied
occupied = 0
for row in range(len(input_lines)):
for column in range(len(input_lines[row])):
neighbors = get_neighbors(input_lines, row, column)
current_seat = input_lines[row][column]
if current_seat == 'L' and all(input_lines[y][x] != '#' for y, x in neighbors):
new_seats[row][column] = '#'
changes += 1
occupied += 1
elif current_seat == '#' and len([(y, x) for y, x in neighbors if input_lines[y][x] == '#']) >= occupied_limit:
new_seats[row][column] = 'L'
changes += 1
else:
new_seats[row][column] = current_seat
if current_seat == '#':
occupied += 1
input_lines = new_seats
return occupied
def part_one(input_lines):
print(model_seats(input_lines, get_neighbors_adjacent, 4))
def part_two(input_lines):
print(model_seats(input_lines, get_neighbors_in_direction, 5))
input_lines = read_input()
part_one(input_lines)
part_two(input_lines)
| true |
36f391cee5ea876273fae37d811a909dd8353f04 | Python | IreneGabutti/SISTEMI | /python/vacanzeNatale/es02_punto13.py | UTF-8 | 208 | 3.8125 | 4 | [] | no_license | #PUNTO 3 EXERCISE 13
from sys import argv
script, animal1 = argv
print("The script is called:", script)
animal1 = input("What is your favorite animal?")
print(f"Your favorite animal is: {animal1}") | true |
7be1622f4836a700a392f9a721e2309008b3dbd0 | Python | ChuanleiGuo/AlgorithmsPlayground | /LeetCodeSolutions/python/57_Insert_Interval.py | UTF-8 | 1,293 | 3.71875 | 4 | [
"MIT"
] | permissive | class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
def __repr__(self):
return "[%d, %d]" % (self.start, self.end)
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
intervals.append(newInterval)
result = []
if len(intervals) < 1:
return []
sorted_intervals = sorted(intervals, key=lambda inter: inter.start)
left = sorted_intervals[0].start
right = sorted_intervals[0].end
for interval in sorted_intervals:
if interval.start <= right:
right = max(right, interval.end)
else:
result.append(Interval(left, right))
left = interval.start
right = interval.end
result.append(Interval(left, right))
return result
inters = [[1, 2], [3, 5], [6, 7], [8, 10], [12, 16]]
def make_intervals(intervals):
result = []
for l in intervals:
inter = Interval(l[0], l[1])
result.append(inter)
return result
intervals = make_intervals(inters)
print Solution().insert(intervals, Interval(4, 9))
| true |
2b715fb624968726756bd282fc46db6f6b1c3543 | Python | golam-saroar/Python_Learning | /SentDex/Intermediate/enumerate.py | UTF-8 | 393 | 4.09375 | 4 | [] | no_license |
'''
The enumerate function returns a tuple containing the count,
and then the actual value from the iterable.
'''
example=['left','right','up','down']
for i,j in enumerate(example):
print(i,j)
#That iterable can be a dict:
example_dict = {'left':'<','right':'>','up':'^','down':'v',}
for i,j in enumerate(example_dict):
print(i,j)
# [print(i,j) for i,j in enumerate(example_dict)] | true |
a16468bee17bdb54e0055a9c9cdbcc1a14574a25 | Python | honey-dew/report_automation | /report.py | UTF-8 | 4,378 | 3 | 3 | [] | no_license | import pandas as pd
from openpyxl import Workbook
from openpyxl.styles import Border, Side, Font, PatternFill, Alignment, numbers
from openpyxl.utils import column_index_from_string, get_column_letter
from openpyxl.formatting.rule import IconSet, FormatObject, Rule
df = pd.read_excel("data/data_input.xlsx")
# Add column Order Year
df["Order Year"] = df["Order Date"].dt.year.astype("str")
# Pivoting by Country as index, Order Year as column, and Sales as value
pv_country = pd.pivot_table(
data = df,
index = "Country",
columns = "Order Year",
values = "Sales",
aggfunc = "sum"
).reset_index()
def add_column_growth(input_df):
"""
Function to calculate growth from 2011 to 2014
"""
df = input_df
input_df["Growth"] = ((input_df["2014"] / input_df["2011"])**(1/4))-1
return df
# add_column_growth(pv_sub_category)
pv_country["Growth"] = (((pv_country["2014"] / pv_country["2011"])**(1/4))-1)
pv_country = pv_country.sort_values(by="Growth", ascending=False)
# Creating Workbook
wb = Workbook()
wb.active.title = "report"
# Creating tuple from pv_country
data_pv_country = pv_country.to_records(index=False).tolist()
ws = wb["report"]
ws["A2"].value = "Sales Growth per Country"
ws["A2"].font = Font(size=12, bold=True)
ws.append(pv_country.columns.tolist())
for row in data_pv_country:
ws.append(row)
ws[f'{get_column_letter(ws.min_column)}{ws.max_row + 1}'].value = "Total"
ws[f'{get_column_letter(ws.min_column)}{ws.max_row}'].font = Font(bold=True)
# Get Total
for col in range(ws.min_column+1, ws.max_column):
ws[f'{get_column_letter(col)}{ws.max_row}'].value = f'=SUM({get_column_letter(col)}{ws.min_row+2}:{get_column_letter(col)}{ws.max_row-1})'
# Get total growth
# ((present / past)**(1/4))-1
min_row_past = f'{get_column_letter(ws.min_column+1)}{ws.min_row+2}'
max_row_past = f'{get_column_letter(ws.min_column+1)}{ws.max_row-1}'
min_row_present = f'{get_column_letter(ws.max_column-1)}{ws.min_row+2}'
max_row_present = f'{get_column_letter(ws.max_column-1)}{ws.max_row-1}'
past = f'SUM({min_row_past}:{max_row_past})'
present = f'SUM({min_row_present}:{max_row_present})'
period = (ws.max_column - ws.min_column) - 1
total_growth = f'=(({present}/{past})^({1/period})-1)'
# ws[f'{get_column_letter(ws.max_column)}{ws.max_row}'].value = "=((E19/B19)^(1/4))-1"
ws[f'{get_column_letter(ws.max_column)}{ws.max_row}'].value = total_growth
# Adding border and change font size
for row in range(ws.min_row+1, ws.max_row+1):
for col in range(ws.max_column):
border_style = Side(border_style="thin", color="fcf3cf")
cell = ws.cell(column=col+1, row=row)
cell.border = Border(right=border_style, top=border_style, bottom=border_style, left=border_style)
cell.font = Font(size=10)
# Styling header
for col in range(ws.max_column):
cell = ws.cell(column=col+1, row=ws.min_row+1)
cell.font = Font(bold=True, color="fcf3cf")
cell.fill = PatternFill(fgColor="229954", fill_type="solid")
cell.alignment = Alignment(horizontal="center")
# Styling Total row
for col in range(ws.min_column, ws.max_column+1):
ws[f'{get_column_letter(col)}{ws.max_row}'].font = Font(bold=True, size=10)
border_style = Side(border_style="thick", color="229954")
ws[f'{get_column_letter(col)}{ws.max_row}'].border = Border(top=border_style)
def get_style_number(sheet_obj, column_letter, format_number):
"""
Function to style format number each row
"""
for row in range(sheet_obj.max_row):
cell = ws.cell(column=column_index_from_string(column_letter), row=row+1)
cell.number_format = format_number
# Styling Growth column
get_style_number(ws, "F", numbers.FORMAT_PERCENTAGE)
# Styling year column (column B to E)
for col in range(2,6):
get_style_number(ws, get_column_letter(col), numbers.FORMAT_NUMBER_COMMA_SEPARATED1)
# Adding conditional formatting
first = FormatObject(type='num', val=-100000)
second = FormatObject(type='num', val=0)
third = FormatObject(type='num', val=0.005)
iconset = IconSet(iconSet='3Arrows', cfvo=[first, second, third], showValue=None, percent=None, reverse=None)
rule = Rule(type='iconSet', iconSet=iconset)
ws.conditional_formatting.add("F4:F19", rule)
for col in range(1, ws.max_column+1):
ws.column_dimensions[get_column_letter(col)].autosize = True
wb.save("data/data_output.xlsx")
wb.close() | true |
9b8173435671ffa7bb1196ae6c02ad80abc31361 | Python | fgaliza/logbook_processor | /cli.py | UTF-8 | 589 | 2.53125 | 3 | [] | no_license | import click
from logbook_processor.processor.processors import LogbookListProcessor
from logbook_processor.processor.utils import generate_file_name, get_waypoints_from_json, save_trips
@click.command()
@click.option("--waypoint_file", help="Waypoints List")
def run(waypoint_file):
if waypoint_file:
waypoints = get_waypoints_from_json(waypoint_file)
processor = LogbookListProcessor(tuple(waypoints))
trip_list = processor.get_trips()
file_name = generate_file_name()
save_trips(file_name, trip_list)
if __name__ == "__main__":
run()
| true |
a2b87fd3bb2ac905f8f45636669d6a7f889e0473 | Python | ilante/programming_immanuela_englander | /simple_exercises/lanesexercises/ana/Python-strings_1.12.19.py | UTF-8 | 1,262 | 4.65625 | 5 | [
"MIT"
] | permissive | # Exercise 1
x = "Fire and ice"
#Exercise 2
print(x[3])
#Exercise 3
print(x[5])
#Exercise 4
print(x[10])
print(x[-1])
print(x[-2])
#Exercise 5: prints characters in even positions in a string
print(x[::2])
#Exercise 6: prints the characters in odd number positions in a string
print(x[1::2])
#Exercise 7: Prints the first half of a string
print(x[:len(x)//2])
#Exercise 8: This exercise takes a string and prints it upside down
print(x[::-1])
#Exercise 9: counts the number of e's and i's in a string
e = x.count("e")
i = x.count("i")
print("There are", e, "e's and", i, "i's in the string")
#Exercise 10
x = x.replace("and","&")
print(x)
#Exercise 11, 12 & 13: searches for a string or character in another string
print("re &" in x) # This option displays "true" is the character is in the string or false if it is not
print(x.count("re &")) #This option displays the number of times the string or character is found in the template string
print(x.find("re &")) #This option displays the position of the first character when found in the string, or -1 when not found
#Exercise 14 & 15: this algorithnm finds the first and the last e positions in the string
char="e"
x1 = x.find(char)
x = x[::-1]
x2 = len(x)-x.find(char)-1
w = [x1,x2]
print(w)
| true |
cf751d0eca81599b2b7e8849bc9f4818e39e28b5 | Python | lorrenlml/testingtensorflow | /1NN_model_sklearn.py | UTF-8 | 12,591 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
#This file creates the trained models for a given neural network configuration
###################################################################################
# CODIGO ORININAL DE Guillermo Yepes que genera y entrena un Perceptrón multicapa #
# mediante la librería sklean #
###################################################################################
import pandas as pd
import numpy as np
import sys
import os
import json
import optparse
import time
from sklearn.neural_network import MLPRegressor
from sklearn.externals import joblib
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def addOptions(parser):
parser.add_option("--NNfile", default="",
help="Config json file for the data to pass to the model")
parser = optparse.OptionParser()
addOptions(parser)
(options, args) = parser.parse_args()
if not options.NNfile:
print >> sys.stderr, "No configuration file specified\n"
sys.exit(1)
#with open('config.json', 'r') as cfg_file:
with open(options.NNfile, 'r') as cfg_file:
cfg_data = json.load(cfg_file)
orig_folder = cfg_data['orig_folder']
dest_folder = cfg_data['dest_folder']
train_size = cfg_data['train_size'] # [1/7, 2/7, 3/7, 4/7, 5/7, 6/7, 7/7]
hor_pred = cfg_data['hor_pred'] #folder_names
alpha_values = cfg_data['alpha'] #[0.0001, 0.001, 0.01, 0,1]
feature_values = cfg_data['features'] #[['dh3'], ['dh3','dh4','dh5','dh10','ap1'], ['all']]
hls = cfg_data['hls'] #we pass it as a list or int
days_info_file = cfg_data['days_info']
days_info = pd.read_csv(days_info_file)
day_length = days_info['length_day'][0]
days = days_info['number_train_days'][0]
tg = cfg_data['time_granularity']
seed = cfg_data['seed']
if isinstance(hls,list):
hls=tuple(hls)
out_folder = orig_folder + dest_folder
if not os.path.exists(out_folder):
os.makedirs(out_folder)
model_folder = out_folder+'/models'
if not os.path.exists(model_folder):
os.makedirs(model_folder)
csvs_folder = out_folder+'/csvs'
if not os.path.exists(csvs_folder):
os.makedirs(csvs_folder)
graphs_folder = out_folder+'/graphs'
if not os.path.exists(graphs_folder):
os.makedirs(graphs_folder)
print('Loading dataframes...\n')
load_start = time.time()
x_original = pd.read_csv(orig_folder+'/X_tr_val.csv')
y_original = pd.read_csv(orig_folder+'/Y_tr_val.csv')
load_end = time.time()
load_time = load_end - load_start
load_min = int(load_time / 60)
load_sec = load_time % 60
print('Dataframes loaded in {} minutes {} seconds! Splitting for train and validation...\n'.format(load_min,load_sec))
split_start = time.time()
#We get the number of days and split for train and validation
lenrow_original = len(x_original.values)
print('Days: {}\n'.format(days))
arr_days = np.arange(days)
ran_seed = seed #our seed to randomize data
np.random.seed(ran_seed)
np.random.shuffle(arr_days)
len_days_validation = int(round(days * 0.176470588,0))
days_validation = arr_days[0:len_days_validation]
days_train = arr_days[len_days_validation:]
#Now we take random DAYS for train and validation:
x_train = pd.DataFrame()
y_train = pd.DataFrame()
x_val_original = pd.DataFrame()
y_val_original = pd.DataFrame()
for day in days_train:
x_train = pd.concat([x_train,x_original.iloc[day*day_length:(day+1)*day_length]],ignore_index=True)
y_train = pd.concat([y_train,y_original.iloc[day*day_length:(day+1)*day_length]],ignore_index=True)
for day in days_validation:
x_val_original = pd.concat([x_val_original,x_original.iloc[day*day_length:(day+1)*day_length]],ignore_index=True)
y_val_original = pd.concat([y_val_original,y_original.iloc[day*day_length:(day+1)*day_length]],ignore_index=True)
lencol = len(x_train.columns) #number of columns for x
lenrow = len(x_train.values)
split_end = time.time()
split_time = split_end - split_start
split_min = int(split_time / 60)
split_sec = split_time % 60
print('Splitting completed in {} minutes {} seconds. Length for train: {}\n'.format(split_min,split_sec,len(y_train)))
forecast_prediction = []
nrmse_t_final = []
nrmse_v_final = []
skill_t_final = []
skill_v_final = []
#Since we configured our matrices with an offset we have to adjust to "jump" to the sample we want to actually predict
for hp in hor_pred:
if hp.endswith("min"):
hor_pred_indices = int(int(hp.replace('min','')) * 60 / tg)
if hp.endswith("s"):
hor_pred_indices = int(int(hp.replace('s','')) / tg)
forecast_prediction.append(hp)
#TRAIN SIZE:
for ts in train_size:
n_rows = int(lenrow*ts)
print('Taking less samples for train size = {}. y length: {} \n'.format(ts,n_rows))
y_t = y_train.sample(n_rows,random_state=seed)
y_t_index = y_t.index.values
y_t_index_valid = y_t_index[(y_t_index % day_length) < (day_length - hor_pred_indices)] #so we don't get values for the previous or next day
y_t_indices_lost = len(y_t_index) - len(y_t_index_valid)
print('Indices computed. {} indices lost \n.'.format(y_t_indices_lost))
print('Building randomized y matrix with valid indices...\n')
y_t = np.ravel(y_train.iloc[y_t_index_valid + hor_pred_indices])
print('Building y matrix removing invalid indices for persistence model...\n')
y_pred_persistence = np.ravel(y_train.iloc[y_t_index_valid])
y_val_index = y_val_original.index.values
y_val_index_valid = y_val_index[(y_val_index % day_length) < (day_length - hor_pred_indices)]
y_pred_persistence_val = np.ravel(y_val_original.iloc[y_val_index_valid])
print('Building X matrix...Same thing as before...\n')
x_t = x_train.iloc[y_t_index_valid] #like our randomization, just picking the same indices
x_val = x_val_original.iloc[y_val_index_valid]
y_val = np.ravel(y_val_original.iloc[y_val_index_valid + hor_pred_indices])
#STATIONS TO SELECT:
for ft in feature_values:
X_t = pd.DataFrame()
X_val = pd.DataFrame()
if ft[0] == 'all':
X_t = x_t
X_val = x_val
else:
for n in range(len(ft)):
for i in range(lencol):
if x.columns[i].startswith(ft[n]):
X_t = pd.concat([X,x[x.columns[i]]],axis=1,ignore_index=True)
X_val = pd.concat([X_val,x_val[x_val.columns[i]]],axis=1,ignore_index=True)
scrs = []
scrs_val = []
rmse_train_scores = []
rmse_validation_scores = []
rmse_train_pers_scores = []
rmse_validation_pers_scores = []
skill_train_scores = []
skill_validation_scores = []
nrmse_train_scores = []
nrmse_validation_scores = []
if isinstance(hls,tuple) == False:
if hls > 10:
neurons = (hls,)
len_hls = '1'
if isinstance(hls,tuple) == False:
if hls == 1:
neurons = int(len(X.columns)/2 + 1)
hls = (neurons,)
len_hls = '1'
if isinstance(hls,tuple) == False:
if hls == 2:
neurons = int(len(X.columns)/2 + 1)
hls = (neurons,neurons)
len_hls = '2'
if isinstance(hls,tuple) == False:
if hls == 3:
neurons = int(len(X.columns)/2 + 1)
hls = (neurons,neurons,neurons)
len_hls = '3'
else:
len_hls = str(len(hls))
hls_str = str(hls).replace('(','_').replace(', ','_').replace(')','_')
hls_neurons_str = ''
for i in range(len(hls)):
hls_neurons_str = hls_neurons_str + str(hls[i])+'_'
for av in alpha_values:
stations = ''
if ft[0]=="all":
stations = "all "
else:
for sta in ft:
stations = stations + sta + ' '
sts = stations.replace(' ','_')
prcnt = round(ts*0.7,2)
output_text = '/stations_' + sts + 'for_' + hp + '_prediction_horizon_' + str(prcnt) + '_train_size_' + len_hls + '_hidden_layers_with_' + hls_neurons_str + 'neurons'
print('Creating MLPregressor\n')
nn_model = MLPRegressor(hidden_layer_sizes=hls,alpha=av)
print('Fitting...\n'+output_text+'\n')
fit_start = time.time()
nn_model.fit(X_t,y_t)
fit_end = time.time()
fit_time = fit_end - fit_start
fit_min = int(fit_time / 60)
fit_sec = fit_time % 60
print('Fitting completed in {} minutes {} seconds. Saving model to .pkl file \n'.format(fit_min,fit_sec))
model_filename = model_folder + output_text + '_and_alpha' + str(av) + '.pkl'
joblib.dump(nn_model, model_filename)
print('Predicting...\n')
y_pred_train = nn_model.predict(X_t)
print('Validating...\n')
y_pred_val = nn_model.predict(X_val)
print('Getting scores\n')
scr = nn_model.score(X_t,y_t)
scr_val = nn_model.score(X_val,y_val)
scrs.append(scr)
scrs_val.append(scr_val)
rmse_train_pers = (np.mean((y_pred_persistence - y_t) **2)) ** 0.5 #our persistence score
rmse_val_pers = (np.mean((y_pred_persistence_val - y_val) **2)) ** 0.5
rmse_train_pers_scores.append(rmse_train_pers)
rmse_validation_pers_scores.append(rmse_val_pers)
rmse_val = (np.mean((y_pred_val - y_val) **2)) ** 0.5
rmse_train = (np.mean((y_pred_train - y_t) **2)) ** 0.5
nrmse_train = rmse_train / y_t.max() * 100
nrmse_val = rmse_val / y_val.max() * 100
rmse_train_scores.append(rmse_train)
rmse_validation_scores.append(rmse_val)
nrmse_train_scores.append(nrmse_train)
nrmse_validation_scores.append(nrmse_val)
nrmse_t_final.append(nrmse_train)
nrmse_v_final.append(nrmse_val)
skill_train = (1 - rmse_train / rmse_train_pers) * 100
skill_val = (1 - rmse_val / rmse_val_pers) * 100
skill_train_scores.append(skill_train)
skill_validation_scores.append(skill_val)
skill_t_final.append(skill_train)
skill_v_final.append(skill_val)
print('Saving figures and .csv file\n')
#SAVING DATA AS .CSV
scores = pd.DataFrame(scrs)
scores_validation = pd.DataFrame(scrs_val)
scores_k1_validation = pd.DataFrame(rmse_validation_scores)
scores_k1_train = pd.DataFrame(rmse_train_scores)
scores_kc_train = pd.DataFrame(rmse_train_pers_scores)
scores_kc_validation = pd.DataFrame(rmse_validation_pers_scores)
scores_nrmse_train = pd.DataFrame(nrmse_train_scores)
scores_nrmse_validation = pd.DataFrame(nrmse_validation_scores)
scores_k1_kc_validation = pd.DataFrame(skill_validation_scores)
scores_k1_kc_train = pd.DataFrame(skill_train_scores)
df_alphascores = pd.concat([scores,scores_validation,scores_k1_train,scores_k1_validation,scores_kc_train,scores_kc_validation,scores_nrmse_train,scores_nrmse_validation,scores_k1_kc_train,scores_k1_kc_validation],axis=1,ignore_index=True)
df_alphascores.columns = ['r2_train_sklearn','r2_validation_sklearn','rmse_train','rmse_validation','rmse_persistence_train','rmse_persistence_validation','nrmse_train','nrmse_validation','skill_train','skill_validation']
df_alphascores.to_csv(csvs_folder + output_text + '.csv',header=True,index=False)
#For use with ONE ts and ONE ft set
total_scores = pd.DataFrame({'forecast_prediction':forecast_prediction,'nrmse_train':nrmse_t_final,'nrmse_validation':nrmse_v_final,'skill_train':skill_t_final,'skill_validation':skill_v_final})
total_scores.to_csv(csvs_folder + '/scores_report_for_'+len_hls+'_hidden_layers_with_'+hls_neurons_str+'neurons.csv',header=True,index=False)
print('Figures and .csv generated!\n') | true |
9b7e9489f6b85b2eff1c4d7c6c75201b5c786c31 | Python | prachipainuly-rbei/devops-poc | /site-packages/cs.vp-15.5.0.12-py2.7.egg/cs/vp/variants/solvers.py | ISO-8859-1 | 26,839 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- python -*- coding: iso-8859-1 -*-
#
# Copyright (C) 1990 - 2012 CONTACT Software GmbH
# All rights reserved.
# http://www.contact.de/
#
"""
Module Solvers
This is the documentation for the Solvers module.
"""
import time
import sys
import hashlib
import re
from cdb import util
class ProblemVariable(object):
""" Base class and interface definition for problem variables. """
def __init__(self, variable_id, name, values, value_names):
self._id = variable_id
self._name = name
self._values = values
self._value_dict = dict(zip(values, value_names))
self._value_name_dict = dict(zip(value_names, values))
def getId(self):
""" Returns the unique id of the variable. """
return self._id
def getValues(self):
""" Returns the possible variable values as list. """
return self._values
def hasValues(self):
""" Returns True, if the variable has at least one value. """
return len(self._values) > 0
def getName(self):
""" Returns the variable name."""
return self._name
def getValueName(self, value):
""" Returns the value name for a given value.
Returns the passed value, if the value does not exist for this variable.
Returns an empty string, if value is None.
"""
return "" if value is None else self._value_dict.get(value, value)
def getValueNames(self):
""" Returns all value names. """
return self._value_dict.values()
def getValueByName(self, value_name):
""" Returns the value id for a given value display name.
Note that value names must be unique per property."""
return self._value_name_dict.get(value_name)
def printVariable(self):
""" Prints the variable. For logging and debugging purposes. """
print "%s (%s): %s" % (self._name, self._id, self._values)
class ProblemConstraint(object):
""" Abstract base class for problem constraints. """
def __init__(self, used_variable_ids):
self._variable_ids = used_variable_ids
def variables_used(self):
""" Returns a set of all variable ids used by the constraint """
return self._variable_ids
def compute(self):
""" Returns the compute function """
pass
class BasicProblemSolver(object):
def __init__(self, variables, constraints, solution_variables=[]):
self.variables = [v for v in variables if v.hasValues()]
self.variable_ids = set([v.getId() for v in variables])
self.constraints = [cons for cons in constraints if len(cons.variables_used()) > 1 and cons.variables_used() <= self.variable_ids]
self.metrics = {}
self.solution_variables = []
self.solution_variable_ids = []
if solution_variables:
solution_variable_ids = set([v.getId() for v in solution_variables])
if solution_variable_ids < self.variable_ids:
self.solution_variables = solution_variables
self.solution_variable_ids = solution_variable_ids
elif solution_variable_ids != self.variable_ids:
raise RuntimeError("solution_variables must be a subset of variables")
self.solution_keys = set()
self.problem = None
self.persistent_pager = None
self.solver_result_pager = None
def _init(self, presettings=None):
self._initMetrics()
self.problem = self._setupProblem(presettings)
self.solution_keys = set()
def getSolutionVariables(self):
return self.solution_variables if self.solution_variables else self.variables
def getSolutionVariableIds(self):
return self.solution_variable_ids if self.solution_variable_ids else self.variable_ids
def getAllVariables(self):
return self.getSolutionVariables()
def _translate_solution(self, solution):
""" Reduces the solution to self.solution_variables.
Returns None, if the reduced solution already occurred.
Returns the input solution, if self.solution_variables is empty.
"""
if self.solution_variable_ids:
checksum, signature = self.getSolutionChecksum(solution)
if checksum not in self.solution_keys:
self.solution_keys.add(checksum)
return dict(zip(self.solution_variable_ids,
[solution[vid] for vid in self.solution_variable_ids]))
else:
return solution
def getFilteredSolutionSignature(self, solution):
solution_ids = [var.getId() for var in self.getSolutionVariables()]
filtered_solution = ((x, y) for (x, y) in solution.iteritems() if x in solution_ids)
return self.getSolutionSignature(filtered_solution)
@classmethod
def getSolutionSignature(cls, solution):
if type(solution) == dict:
solution = solution.items()
return ','.join(("%s:%s" % item for item in sorted(solution, key=lambda (x, y): x)))
@classmethod
def parseSolutionSignature(cls, signature):
if not signature:
return {}
def parse_value(val):
if val in ["True", "False"]:
return val
else:
return int(val)
return dict((map(parse_value, re.findall(r'(\d+|True|False)', pair)) for pair in signature.split(',')))
def getSolutionChecksum(self, solution):
signature = self.getFilteredSolutionSignature(solution)
m = hashlib.md5()
m.update(signature)
return m.hexdigest(), signature
def solve(self, presettings=None):
""" Solves the problem completely. Returns a list of all solutions."""
self._init(presettings)
start = time.time()
solutions = list(self.problem.getSolutions())
solver_time = time.time() - start
self.metrics["solver_time"] = solver_time
if self.solution_variables:
solutions = filter(lambda s: s is not None, [self._translate_solution(s) for s in solutions])
self.metrics["n_all_solutions"] = len(solutions)
return solutions
def iterate(self, presettings=None):
""" Iterates over the solutions without solving all."""
self._init(presettings)
for solution in self.problem.getSolutionIter():
s = self._translate_solution(solution)
if s:
self.metrics["n_all_solutions"] += 1
yield s
def extract(self, solution_dict):
result = {}
for vid in self.getSolutionVariableIds():
val = solution_dict.get(vid, None)
if val is None:
result = {}
break
else:
result[vid] = val
return result
def containsAllVariables(self, solution_dict, exact=False):
""" Returns true, if the solution dictionary contains
all solution variables. If the exact flag ist set to True,
the solution_dict must exactly contain the solution variables,
not more or less."""
if exact:
return set(self.getSolutionVariableIds()) == set(solution_dict.keys())
else:
return set(self.getSolutionVariableIds()) <= set(solution_dict.keys())
def validate(self, solution_dict):
""" Returns True, if the solution described by solution_dict
is a valid solution. Returns False, if the solution is not valid
or contains more or less solution properties as defined by this solver."""
# if not self.containsAllVariables(solution_dict, True):
# return False
# FIXME: optimieren? auch bei NestedSolver?
validator = self._setupProblem(solution_dict)
if self.solution_variable_ids:
result = False
solution_keys = []
for s in validator.getSolutionIter():
checksum, signature = self.getSolutionChecksum(s)
if checksum not in solution_keys:
solution_keys.append(checksum)
if len(solution_keys) > 1:
break
result = len(solution_keys) == 1
else:
# FIXME: distinguish between False and incomplete
# check if the iterator is not empty
# http://stackoverflow.com/a/3114640/785542
result = any(True for _ in validator.getSolutionIter())
return result
def getMetrics(self):
return self.metrics
def getSolutionSpaceSize(self, with_presetted_variables=None):
size_solution_space = 1
for v in self.getSolutionVariables():
if with_presetted_variables:
if v not in with_presetted_variables:
size_solution_space *= len(v.getValues())
else:
size_solution_space *= len(v.getValues())
return size_solution_space
def printSolutions(self, presettings=None, max_solutions=0):
output_table = SolutionTable(self.getSolutionVariables())
output_table.make_header()
output_table.make_separator()
for n, solution in enumerate(self.iterate(presettings)):
if max_solutions and n >= max_solutions:
print "%s solutions displayed. More solutions are available but not shown." % n
break
output_table.make_line("", n + 1, lambda var: solution[var])
def printMetrics(self):
print "Solver metrics:"
for k, v in self.getMetrics().items():
sys.stdout.write("{0:{width}}".format(k, width=25) + unicode(v) + "\n")
def getPersistentPager(self, collection, forward_solved_pages):
if not self.persistent_pager:
from cs.vp.variants.pagers import ObjectCollectionPager
self.persistent_pager = ObjectCollectionPager(self, collection, forward_solved_pages)
return self.persistent_pager
def getSolverResultPager(self, collection, forward_solved_pages):
if not self.solver_result_pager:
from cs.vp.variants.pagers import SolverResultPager
self.solver_result_pager = SolverResultPager(self, forward_solved_pages,
self.getPersistentPager(collection, forward_solved_pages))
return self.solver_result_pager
# ---------- Internal methods to setup a problem -------------
def _initMetrics(self):
self.metrics = {"solver_time": 0,
"n_all_solutions": 0,
"size_solution_space": self.getSolutionSpaceSize(),
"n_properties": len(self.variables),
"n_constraints": len(self.constraints)}
def _setupProblem(self, presettings=None):
try:
import constraint
except ImportError:
raise util.ErrorMessage("cdbvp_constraint_library")
problem = constraint.Problem()
for variable in self.variables:
vid = variable.getId()
if presettings and vid in presettings:
problem.addVariable(vid, [presettings[vid]])
else:
problem.addVariable(vid, variable.getValues())
for cons in self.constraints:
problem.addConstraint(cons.compute(), cons.variables_used())
return problem
class InnerProblem(object):
MISSING_INNER_SOLUTION_VALUE = '-'
UNDEFINED_INNER_SOLUTION_VALUE = ''
def __init__(self, outer_variables, inner_variables, constraints):
self.inner_variables = [v for v in inner_variables if v.hasValues()]
self.inner_variable_ids = [v.getId() for v in self.inner_variables]
self.inner_variable_ids.sort()
self.all_variables = outer_variables + self.inner_variables
self.inner_solver = BasicProblemSolver(self.all_variables, constraints)
self.inner_validator = BasicProblemSolver(self.all_variables, constraints, self.inner_variables)
self.missing_inner_solution_template = dict(zip(self.inner_variable_ids,
list(len(self.inner_variable_ids) *
self.MISSING_INNER_SOLUTION_VALUE)))
self.undefined_inner_solution_template = dict(zip(self.inner_variable_ids,
list(len(self.inner_variable_ids) *
self.UNDEFINED_INNER_SOLUTION_VALUE)))
def solveInnerProblem(self, outer_solution):
""" Builds the inner problem with all variables (outer + inner) and preset
outer variables with values from outer solution.
Returns a list of inner solutions. """
self.inner_solver._init(outer_solution)
return self.inner_solver.problem.getSolutions()
def solveInnerProblemIter(self, outer_solution, presettings=None):
""" Builds the inner problem with all variables (outer + inner) and preset
outer variables with values from outer solution.
Returns a list of inner solutions. """
if presettings:
preset_variables = dict(outer_solution)
preset_variables.update(presettings)
self.inner_solver._init(preset_variables)
else:
self.inner_solver._init(outer_solution)
return self.inner_solver.problem.getSolutionIter()
def validateStandalone(self, inner_solution):
return self.inner_validator.validate(inner_solution)
def getVariables(self):
return self.inner_variables
def getFilteredSolutionSignature(self, solution):
elems = []
for vid in self.inner_variable_ids:
val = solution.get(vid, None)
if val in (self.MISSING_INNER_SOLUTION_VALUE, self.UNDEFINED_INNER_SOLUTION_VALUE):
elems = []
break
if val is not None:
elems.append("%s:%s" % (vid, val))
return ','.join(elems)
def getSolutionChecksum(self, solution):
signature = self.getFilteredSolutionSignature(solution)
m = hashlib.md5()
m.update(signature)
return m.hexdigest(), signature
def extract(self, solution):
inner_variant = {}
for vid in self.inner_variable_ids:
val = solution.get(vid, None)
if val in (None, self.MISSING_INNER_SOLUTION_VALUE, self.UNDEFINED_INNER_SOLUTION_VALUE):
inner_variant = {}
break
else:
inner_variant[vid] = val
return inner_variant
class NestedProblemSolver(BasicProblemSolver):
def __init__(self, outer_variables, inner_variables_sets, constraints):
super(NestedProblemSolver, self).__init__(outer_variables, constraints)
self.all_variables = list(self.variables)
self.inner_problems = []
for inner_variables in inner_variables_sets:
self.inner_problems.append(InnerProblem(outer_variables, inner_variables, constraints))
self.all_variables += inner_variables
self.complete_dummy = {}
for p in self.inner_problems:
self.complete_dummy.update(p.undefined_inner_solution_template)
def _initMetrics(self):
super(NestedProblemSolver, self)._initMetrics()
self.metrics["n_properties"] = len(self.all_variables)
n_constraints = 0
for p in self.inner_problems:
n_constraints += len(p.inner_solver.constraints)
self.metrics["n_constraints"] = n_constraints
def extractPresettingsForInnerProblem(self, presettings, inner_problem):
inner_presettings = {}
for vid in inner_problem.inner_variable_ids:
val = presettings.get(vid, None)
if val is not None:
inner_presettings[vid] = val
return inner_presettings
def extractPresettings(self, presettings):
result = {}
for i, inner_problem in enumerate(self.inner_problems):
inner_presettings = self.extractPresettingsForInnerProblem(
presettings, inner_problem)
if inner_presettings:
result[i] = inner_presettings
return result
def getCombinedSolutions(self, outer_solution):
""" Solves the inner problem for the outer solution
and returns a list of combined solutions. """
result = []
for solution in self.getCombinedSolutionsIter(outer_solution):
result.append(solution)
return result
def getCombinedSolutionsIter(self,
outer_solution,
inner_problems_presettings={},
invalid_persistent_inner_solutions={}):
# Alle inneren Lsungen mit genau einem (oder keinem = Dummy) Ergebnis zu einer ueren Lsung knnen
# zu einer Zeile zusammengefasst werden.
# Alle inneren Lsungen mit mehr als einem Ergebnis werden einzeln zur ueren Lsung dargestellt.
# Die Fehlenden anderen Lsungen werden mit Dummy Solution aufgefllt.
one_on_one_result = dict(outer_solution)
any_inner_result = False
any_one_on_one_result = False
one_on_one_filtered_out = False
for i, inner_problem in enumerate(self.inner_problems):
others_have_search_conditions = False
for inner_id, presettings in inner_problems_presettings.items():
if inner_id != i and presettings:
others_have_search_conditions = True
break
one_result = None
first_result = None
invalid_inner_solutions = invalid_persistent_inner_solutions.get(i, [])
num_inner_results = len(invalid_inner_solutions)
for n, complete_solution in enumerate(inner_problem.solveInnerProblemIter(outer_solution, inner_problems_presettings.get(i, None))):
num_inner_results += 1
any_inner_result = True
if num_inner_results == 1:
# Die erste Lsung zurckstellen, weil wir erst
# wissen mssen, ob es noch mehr Lsungen gibt.
first_result = complete_solution
one_result = complete_solution
else:
# Skip, if any other inner problem has search conditions.
# They cannot match because of all other inner solutions are undefined
# in this case.
if others_have_search_conditions:
break
one_result = None
# die ungltigen persistenten Lsungen rausgeben
invalid_inner_solutions.reverse()
while invalid_inner_solutions:
result = dict(self.complete_dummy)
result.update(outer_solution)
result.update(invalid_inner_solutions.pop())
yield result
# die erste zurckgestellte Lsung rausgeben
if first_result:
result = dict(self.complete_dummy)
result.update(first_result)
yield result
first_result = None
result = dict(self.complete_dummy)
result.update(complete_solution)
yield result
if num_inner_results == 1:
# genau eine innere Lsung -> in one_on_one_result integrieren
if one_result:
one_on_one_result.update(one_result)
else:
one_on_one_result.update(invalid_inner_solutions[0])
any_one_on_one_result = True
elif num_inner_results == 0:
if inner_problems_presettings.get(i, None):
one_on_one_filtered_out = True
one_on_one_result.update(inner_problem.missing_inner_solution_template)
else:
if inner_problems_presettings.get(i, None):
one_on_one_filtered_out = True
one_on_one_result.update(inner_problem.undefined_inner_solution_template)
# one_on_result nur dann rausgeben, wenn es entweder gar keine inneren
# Lsungen gab oder wenn mindestens eine Lsung enthalten ist.
if not one_on_one_filtered_out and (not any_inner_result or any_one_on_one_result):
yield one_on_one_result
def solve_combined(self, presettings=None):
return [s for s in self.iterate_combined(presettings)]
def iterate(self, presettings=None):
""" Iterates over the outer solutions."""
self._init(presettings)
for solution in self.problem.getSolutionIter():
self.metrics["n_all_solutions"] += 1
yield solution
def iterate_combined(self, presettings=None):
""" Iterates over the combined outer and inner solutions.
Returns a tuple containing the outer solution as first element,
the combined solution as second element and the outer solution number
as third element."""
self._init(presettings)
for n, outer_solution in enumerate(self.problem.getSolutionIter()):
self.metrics["n_all_solutions"] += 1
for complete_solution in self.getCombinedSolutionsIter(outer_solution):
yield (outer_solution, complete_solution, n + 1)
def validate_combined(self, outer_solution, inner_problem, inner_solution):
# first validate inner solution standalone
inner = inner_problem.validateStandalone(inner_solution)
combined = False
if inner:
solution = dict(outer_solution)
solution.update(inner_solution)
if len(inner_problem.all_variables) != len(solution):
return inner, False
for v in inner_problem.all_variables:
if v.getId() not in solution:
return inner, False
for n, complete_solution in enumerate(inner_problem.solveInnerProblemIter(solution)):
if n == 0:
combined = True
else:
combined = False
break
return inner, combined
def getAllVariables(self):
return self.all_variables
def printSolutions(self, presettings=None, max_solutions=0):
output_table = SolutionTable(self.all_variables)
output_table.make_header()
output_table.make_separator()
n = 0
for outer_solution, complete_solution, outer_solution_number in self.iterate_combined(presettings):
n += 1
output_table.make_line("", outer_solution_number, lambda var: complete_solution[var])
if max_solutions and n >= max_solutions:
print "%s solutions displayed. More solutions are available but not shown." % (n)
break
def getPersistentPager(self, collection, forward_solved_pages):
if not self.persistent_pager:
from cs.vp.variants.pagers import NestedObjectCollectionPager
self.persistent_pager = NestedObjectCollectionPager(self, collection, forward_solved_pages)
return self.persistent_pager
def getSolverResultPager(self, collection, forward_solved_pages):
if not self.solver_result_pager:
from cs.vp.variants.pagers import NestedSolverResultPager
self.solver_result_pager = NestedSolverResultPager(self, forward_solved_pages,
self.getPersistentPager(collection,
forward_solved_pages))
return self.solver_result_pager
class NestedFilteredProblemSolver(NestedProblemSolver):
def __init__(self, *args, **kwargs):
self._visited = {}
super(NestedFilteredProblemSolver, self).__init__(*args)
self._hidden_props = kwargs.get("hidden_props", [])
def _filter(self, prop_dict):
if not self._hidden_props:
return prop_dict
else:
return {prop: prop_dict[prop]
for prop in prop_dict.keys()
if prop not in self._hidden_props}
def getFilteredSignature(self, prop_dict):
return self.getSolutionSignature(self._filter(prop_dict))
def getPersistentPager(self, collection, forward_solved_pages):
if not self.persistent_pager:
from cs.vp.variants.pagers import NestedObjectCollectionFilteredPager
self.persistent_pager = NestedObjectCollectionFilteredPager(self, collection, forward_solved_pages)
return self.persistent_pager
def getSolverResultPager(self, collection, forward_solved_pages):
if not self.solver_result_pager:
from cs.vp.variants.pagers import NestedFilteredResultPager
self.solver_result_pager = NestedFilteredResultPager(self, forward_solved_pages,
self.getPersistentPager(collection, forward_solved_pages))
return self.solver_result_pager
class SolutionTable(object):
""" For debug and test purposes.
Print a solution table to stdout. """
def __init__(self, variables):
self._variables = variables
self._widths = {}
for v in self._variables:
self._widths[v.getId()] = 2 + max(
len(literal)
for literal in v.getValueNames() + [v.getName()])
def make_line(self, prefix, nr, lookup, sep="|"):
if prefix is None:
return
sys.stdout.write(prefix)
sys.stdout.write("{0:{width}}{sep}".format(nr, width=4, sep=sep))
for variable in self._variables:
if nr == "No": # header
display_text = variable.getName()
elif nr == "----": # separator
display_text = lookup(variable.getId())
else:
# get display text for value
val_id = lookup(variable.getId())
display_text = variable.getValueName(val_id)
sys.stdout.write("{0:{width}}".format(display_text,
width=self._widths[variable.getId()]))
sys.stdout.write("\n")
def make_header(self, prefix=""):
self.make_line(prefix, "No", lambda var: var)
def make_separator(self, prefix=""):
self.make_line(prefix, "----", lambda var: self._widths[var] * "-", sep="+")
| true |
1b0f059589cf9c67333d084de5ec6f406b73b31e | Python | shuang13/halfEdge | /FaceVertices.py | UTF-8 | 214 | 2.875 | 3 | [] | no_license | def FaceVertices(face):
originalHalfEdge = he = face.getHalfEdge()
vertices = []
while True:
vertices.append(he.getVertex())
he = he.getNextHalfEdge()
if(he == originalHalfEdge):
break;
return vertices | true |
66e8a8e26bc49efb9ea3ee0326cf8484f868e552 | Python | Tvallejos/CC5114-NeuralNetworks | /GeneticAlgorithm/plotHeatMap.py | UTF-8 | 1,838 | 3.3125 | 3 | [] | no_license | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def parseToIntList(lista):
nuevaLista = []
for row in lista:
l = []
row = row[:-1]
for item in row:
l.append(int(item))
nuevaLista.append(l)
return nuevaLista
def plotHeatMap(data):
yMutationRate = ["1.0","0.9","0.8","0.7","0.6","0.5","0.4","0.3","0.2","0.1","0"]
xPopulation = ["50","100","150","200","250","300","350","400","450","500"
,"550","600","650","700","750","800","850","900","950","1000"]
listOfRows = []
for line in data:
line = line.replace('\n',"")
line = line.split(" ")
listOfRows.append(line)
listOfRows = parseToIntList(listOfRows)
listOfRows = np.array(listOfRows)
fig, ax = plt.subplots()
im = ax.imshow(listOfRows)
# We want to show all ticks...
ax.set_xticks(np.arange(len(xPopulation)))
ax.set_yticks(np.arange(len(yMutationRate)))
# ... and label them with the respective list entries
ax.set_xticklabels(xPopulation)
ax.set_yticklabels(yMutationRate)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(yMutationRate)):
for j in range(len(xPopulation)):
text = ax.text(j, i, listOfRows[i, j],
ha="center", va="center", color="w")
#ax.set_title("harvest of local farmers (in tons/year)")
plt.xlabel('Population Size')
plt.ylabel('Mutation Rate')
plt.title('Heatmap of the solving problem iteration')
fig.tight_layout()
plt.show()
File = open("./heatMapNumFindData.txt",'r')
lines = File.readlines()
plotHeatMap(lines)
| true |
d144e3157ba6dea40b9e33b33fc7fa9b61306271 | Python | EuanOR/FYP | /LightController.py | UTF-8 | 557 | 2.890625 | 3 | [] | no_license | from Light import Light
import Firebase
class LightController(object):
def __init__(self, lights):
self._lights = lights
self.active = Firebase.get_lights_active()
if self.active:
self.power_on()
def power_on(self):
self.active = True
Firebase.set_lights_active(self.active)
for l in self._lights:
l.turn_on()
def power_off(self):
self.active = False
Firebase.set_lights_active(self.active)
for l in self._lights:
l.turn_off() | true |
465b7f4bfe8f1b2923e60c73ccc706d0a90a3841 | Python | Mc01/async-services | /client/integrations/parsers/michal.py | UTF-8 | 627 | 2.6875 | 3 | [] | no_license | from typing import List, Dict, Any, Union
from ..request_parser import RequestParser
class MichalRequestParser(RequestParser):
def prepare_request_data(self, keywords: List[str]) -> Union[List, Dict]:
"""
curl
-X POST http://localhost:8000/api/
-H 'Content-Type: application/json'
-d '["banana split", "how to make a sushi"]'
"""
return keywords
def parse_response_data(self, response_data: Union[List, Dict]) -> Dict[str, Any]:
"""
{
"keyword": "result",
...
}
"""
return response_data
| true |
022236afb8298c51264bd62b785c5ada38a57f99 | Python | theovincent/TweetSentimentExtraction | /src/classifiers/classifier_mlp/classifier_mlp.py | UTF-8 | 2,481 | 3 | 3 | [] | no_license | import numpy as np
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.activations import sigmoid
from tensorflow.python.keras.layers import (
Dense,
Flatten,
BatchNormalization,
Conv1D,
ReLU
)
class ClassifierDense(Model):
def __init__(self, word_size, sentence_size):
super(ClassifierDense, self).__init__()
# Parameters
self.word_size = word_size
self.sentence_size = sentence_size
# Functions
self.dense1 = Dense(sentence_size)
self.activation1 = ReLU()
self.dense2 = Dense(sentence_size)
self.sigmoid = sigmoid
def call(self, inputs):
# Layer 1
# WORD_SIZE * SENTENCE_SIZE
x = self.dense1(inputs)
x = self.activation1(x)
# SENTENCE_SIZE
# Layer 2
# SENTENCE_SIZE
x = self.dense2(x)
# SENTENCE_SIZE
x = self.sigmoid(x)
return x
class ClassifierConv(Model):
def __init__(self, word_size, sentence_size):
super(ClassifierConv, self).__init__()
# Parameters
self.word_size = word_size
self.sentence_size = sentence_size
# Functions
self.conv1d = Conv1D(word_size // 4, 5, strides=1, padding='same', dilation_rate=2)
self.batch_norm = BatchNormalization()
self.activation1 = ReLU()
self.flatten = Flatten()
self.dense1 = Dense(sentence_size)
self.sigmoid = sigmoid
def call(self, inputs):
# Layer 1
# SENTENCE_SIZE x WORD_SIZE
x = self.conv1d(inputs)
x = self.batch_norm(x)
x = self.activation1(x)
# SENTENCE_SIZE x WORD_SIZE // 4
x = self.flatten(x)
# SENTENCE_SIZE * WORD_SIZE // 4
# Layer 2
# SENTENCE_SIZE * WORD_SIZE // 4
x = self.dense1(x)
x = self.sigmoid(x)
# SENTENCE_SIZE
return x
if __name__ == "__main__":
# Parameters
WORD_SIZE = 50
SENTENCE_SIZE = 50
# Define the classifier
CLASSIFIFIER = ClassifierDense(WORD_SIZE, SENTENCE_SIZE)
# Data
DATA_SCALAR = np.random.random((10, WORD_SIZE * SENTENCE_SIZE))
DATA_SCALAR = np.array(DATA_SCALAR, dtype=np.float32)
# DATA_SCALAR_CLASS = np.reshape(DATA_SCALAR, (10, SENTENCE_SIZE, WORD_SIZE))
# print(DATA_SCALAR_CLASS)
# Apply the classifier
PREDICTION = CLASSIFIFIER(DATA_SCALAR)
CLASSIFIFIER.summary()
print(PREDICTION)
| true |
b6136c0fbee8837dbd843e7ac3cc5003c0992291 | Python | fhzhang/hackerrank | /30days/D12/creative.py | UTF-8 | 490 | 2.71875 | 3 | [] | no_license | import math
import os
import random
import re
import sys
if __name__ == '__main__':
arr = []
listSum = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
for i in range(4):
for j in range(4):
test = arr[i][j] + arr[i][j+1] + arr[i][j+2]\
+ arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1]\
+ arr[i+2][j+2]
listSum.append(test)
listSum.sort()
print(listSum[-1]) | true |
c5119372cc97feee3810efac8aef1abe155474fc | Python | pyaephyokyaw15/PythonFreeCourse | /chapter4/double_list2.py | UTF-8 | 213 | 3.953125 | 4 | [] | no_license | my_list = [10,20,30,40]
for i in my_list:
print("I is ",i)
i *= 2
for index,value in enumerate(my_list):
print("Index ",index, " value ",value)
my_list[index] *= 2
print("My list ",my_list) | true |
e3090e2795aca35070262386b23e5375ceb8355e | Python | usnistgov/dataplot | /lib/scripts/read_excel.py | UTF-8 | 4,506 | 3.234375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | # read_excel.py
#
# This Python script will read an Excel file and write the in
# contents as a CSV file to "dpst1f.dat". The name of the Excel
# file is retrieved from dpst5f.dat.
#
# It requires that the "pandas" package be installed.
#
#
# 2021/07: Option for header added
#
# Step 1: Import needed packages
#
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
#
iflagr = 0
iflagc = 0
nskip = 0
nlines = 0
cols = 'None'
iheader = "'None'"
#
# Step 1: Open the "dpst5f.dat" file and extract the names
# for the Excel file and the sheet to write to.
#
#print('In read_excel.py module')
try:
fhand = open('dpst5f.dat','r')
except:
print('File dpst5f.dat cannot be opened in write_excel.py')
exit()
#
# Read name of Excel file
#
try:
excel_name = fhand.readline()
excel_name = excel_name.rstrip()
# print('Name of Excel File: ',excel_name)
except:
print('Unable to read the name of the Excel file in dpst5f.dat')
exit()
#
# Read name of Excel sheet
#
try:
excel_sheet = fhand.readline()
excel_sheet = excel_sheet.rstrip()
# print('Name of Excel sheet: ',excel_sheet)
except:
excel_sheet = 'Sheet1'
#
# Read number of lines to skip
#
try:
iline = fhand.readline()
nskip = int(iline)
if nskip > 0:
iflagr = 1
except:
print('Unable to read the number of lines to skip in the Excel file in dpst5f.dat')
print('iline: ',iline)
print('nskip: ',nskip)
exit()
#
# Read number of lines to process
#
try:
iline = fhand.readline()
nread = int(iline)
if nread > 0:
iflagr= 2
if nskip < 0:
nskip = 0
except:
print('Unable to read the number of lines to process in the Excel file in dpst5f.dat')
print('iline: ',iline)
print('nread: ',nread)
exit()
#
# Option for header line (0 for first line being header, -1
# for no header
#
try:
iline = fhand.readline()
iheader = 0
if int(iline) < 0:
iheader = -1
except:
print('Unable to read the header option for the Excel file in dpst5f.dat')
print('iline: ',iline)
print('iheader: ',iheader)
exit()
#
# Read column list
#
# 2020/05: Having an issue getting the usecols option to work, so
# comment this out for now.
#
#try:
# iline = fhand.readline()
# if iline == "None":
# iflagc = 0
# else:
# if iline[0:0] == "[":
# iflagc = 1
# cols = iline
# else:
# iflagc = 1
# cols = iline
#
#except:
# print('Unable to read the column list for the Excel file in dpst5f.dat')
# exit()
iflagc = 0
#
# Close dpst5f.dat file
#
fhand.close()
#
# Step 2: Read the "dpst1f.dat" file with Pandas
#
#print ('iflagr, iflagc = ',iflagr,iflagc)
#print ('nskip,nread = ',nskip,nread)
#print ('cols = ',cols)
if iflagr == 0:
if iflagc == 0:
if iheader == -1:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,header=None,usecols=None)
else:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,usecols=None)
elif iflagc == 1:
if iheader == -1:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,header=None,usecols={cols})
else:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,usecols={cols})
elif iflagr == 1:
if iflagc == 0:
if iheader == -1:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,header=None,skiprows=nskip,usecols=None)
else:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,usecols=None)
elif iflagc == 1:
if iheader == -1:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,header=None,usecols={cols})
else:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,usecols={cols})
elif iflagr == 2:
if iflagc == 0:
if iheader == -1:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,nrows=nread,header=None,usecols=None)
else:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,nrows=nread,usecols=None)
elif iflagc == 1:
if iheader == -1:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,nrows=nread,header=None,usecols={cols})
else:
df = pd.read_excel(excel_name,sheet_name=excel_sheet,skiprows=nskip,nrows=nread,usecols={cols})
#
# Step 3: Now use Pandas to write the Excel file
df.to_csv("dpst1f.dat")
#
| true |
427fae549ac93c674b158c8273b7fc62b7b428ca | Python | hercules261188/model_test | /examples/sentiment_analysis/model_tests/test_vocab.py | UTF-8 | 2,763 | 3.078125 | 3 | [] | no_license | import random
from string import Formatter
import model_test
from model_test.schemas import Example
formatter = Formatter()
NAMES = ["John", "Cindy", "Trey", "Jordan", "Sam", "Taylor", "Charlie", "Veronica"]
COMPANIES = ["Target", "Amazon", "Google", "Lowes", "Macys"]
POS_ADJS = ["phenomenal", "great", "terrific", "helpful", "joyful"]
NEG_ADJS = ["terrible", "boring", "awful", "lame", "unhelpful", "lackluster"]
NOUNS = ["doctor", "nurse", "teacher", "server", "guide"]
LEXICON = {
"name": NAMES,
"company": COMPANIES,
"pos_adj": POS_ADJS,
"neg_adj": NEG_ADJS,
"noun": NOUNS,
}
def build_inv_pair_from_template(template: str, inv_field: str):
"""
Create a pair of two strings which substitue words from a lexicon into
the provided template. All fields will have the same value substituted
in both strings except for the provided invariance field.
"""
_, fields, _, _ = zip(*formatter.parse(template))
base_values = {field: random.choice(LEXICON[field]) for field in fields}
base_values[inv_field] = f"{{{inv_field}}}"
base_string = formatter.format(template, **base_values)
inv_field_selections = random.sample(LEXICON[inv_field], k=2)
inv_field_values = [{inv_field: value} for value in inv_field_selections]
string_a = formatter.format(base_string, **inv_field_values[0])
string_b = formatter.format(base_string, **inv_field_values[1])
return string_a, string_b
@model_test.mark.invariance
def test_name_invariance_positive_statements():
templates = [
("{name} was a {pos_adj} {noun}", 15),
("I had {name} as a {noun} and it was {pos_adj}", 20),
("{name} is {pos_adj}", 3),
]
examples = []
for template, n_examples in templates:
for _ in range(n_examples):
input_a, input_b = build_inv_pair_from_template(template, "name")
examples.append((Example(data=input_a), Example(data=input_b)))
return examples
@model_test.mark.invariance
def test_name_invariance_negative_statements():
templates = [
("I had an {neg_adj} experience with {name}", 15),
("{name} is a {neg_adj} {noun}", 15),
("are you kidding me? {name} is {neg_adj}", 5),
]
examples = []
for template, n_examples in templates:
for _ in range(n_examples):
input_a, input_b = build_inv_pair_from_template(template, "name")
examples.append((Example(data=input_a), Example(data=input_b)))
return examples
@model_test.mark.unit
def test_short_positive_phrases():
examples = []
sentences = ["I like you", "You look happy", "Great!", "ok :)"]
for sentence in sentences:
examples.append(Example(data=sentence, label="POSITIVE"))
return examples
| true |
149400b7e90810e728d96050916a239655057d9c | Python | adampbeardsley/tutorials | /Photo_FT_animation.py | UTF-8 | 3,078 | 3.1875 | 3 | [] | no_license | """
This simple script takes a photo, fourier transforms it, and creates an animation
where each frame successively adds fourier modes to the image (large scales to small)
A couple libraries are needed: python imaging library (PIL), and progressbar
(comment out if you don't want it). An FFMPEG writer is also required. I used:
conda install -c conda-forge ffmpeg
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import Image
import progressbar as pgb
# Some options
nframes = 150
filename = 'DSC_0003.JPG'
fps = 5 # frames per second
# Read in image, FT
raw_im = np.array(Image.open(filename)) # Probably not efficient, but gets us a numpy array to manipulate
raw_im_ft = np.fft.fftshift(np.fft.fft2(raw_im, axes=(0, 1)), axes=(0, 1))
log_power = np.log(np.sum(np.abs(raw_im_ft)**2, axis=2)) # Used to plot FT panel
# Prepare for masking
x0 = raw_im.shape[0] / 2
y0 = raw_im.shape[1] / 2
x = np.arange(raw_im.shape[0]) - x0
y = np.arange(raw_im.shape[1]) - y0
radius = np.sqrt(x.reshape((-1, 1))**2 + y.reshape((1, -1))**2)
rmax = radius.max()
rmin = rmax / 500.0 # This is used because we are going to log bin r for the frames
bin_size = (np.log(rmax) - np.log(rmin)) / nframes
rframes = np.exp(np.log(rmin) + bin_size * (np.arange(nframes) + 1))
# Set up figure for animation
frame = 0
fig, (ax_ft, ax_im) = plt.subplots(1, 2, figsize=(16, 8))
ax_ft.imshow(log_power, animated=True)
foo = ax_ft.axis('off')
ax_im.imshow(raw_im, animated=True)
foo = ax_im.axis('off')
def reset():
global log_power, raw_im_ft, frame, bar
frame = 0
masked_log_power = np.zeros_like(log_power)
masked_im = np.zeros_like(raw_im_ft, dtype='uint8')
ax_ft.images[0].set_array(masked_log_power)
ax_im.images[0].set_array(masked_im)
bar = pgb.ProgressBar(widgets=[pgb.Percentage(),
pgb.Bar(marker='-', left=' |', right='| '), pgb.Counter(),
'/{0:0d} frames '.format(nframes),
pgb.ETA()], maxval=nframes).start()
return ax_ft.images[0], ax_im.images[0]
def updatefig(*args):
global log_power, raw_im_ft, radius, rframes, frame, bar
frame += 1
r_ind = radius < rframes[frame]
masked_ft = raw_im_ft * r_ind[:, :, None]
masked_log_power = log_power * r_ind
masked_im = np.real(np.fft.ifft2(np.fft.ifftshift(masked_ft, axes=(0, 1)), axes=(0, 1)))
masked_im -= masked_im.min()
masked_im *= 255 / masked_im.max() # fits into image range
masked_im = np.uint8(np.round(masked_im))
ax_ft.images[0].set_array(masked_log_power)
ax_im.images[0].set_array(masked_im)
bar.update(frame)
return ax_ft.images[0], ax_im.images[0]
ani = animation.FuncAnimation(fig, updatefig, repeat=False, frames=nframes - 1,
interval=np.round(1.0 / fps * 1000), blit=False, init_func=reset)
# Write the animation
Writer = animation.writers['ffmpeg']
writer = Writer(fps=fps, metadata=dict(artist='Me'), bitrate=1800)
ani.save('animation.mp4', writer=writer)
bar.finish()
| true |
5f479e352d93f01994d607cd867c1cd1f5ef7c86 | Python | paulombcosta/twitbooks | /twitbooks/ner.py | UTF-8 | 2,101 | 2.65625 | 3 | [
"MIT"
] | permissive | import spacy
from twitbooks.console import download_progress
from urllib.request import urlopen
from functools import partial
from twitbooks.config import get_config_path
import tarfile
import os
BASE_URL = "https://github.com/explosion/spacy-models/releases/download/{model}/{model}.tar.gz"
MODEL_VERSION = "3.1.0"
def new(lang: str, size: str):
config_path = get_config_path()
version_path = f"{lang}_core_web_{size}-{MODEL_VERSION}"
model_name_path = f"{lang}_core_web_{size}"
model_path = config_path.joinpath(version_path, model_name_path, version_path)
nlp = spacy.load(model_path)
return NER(nlp)
def download_and_extract_model(lang: str, size: str):
model = "{}_core_web_{}-{}".format(lang, size, MODEL_VERSION)
_download_model(model)
_extract_model(model)
def is_model_downloaded(lang: str, size: str) -> bool:
model_root = get_config_path().joinpath(f"{lang}_core_web_{size}-{MODEL_VERSION}")
return model_root.is_dir()
def _download_model(model):
progress = download_progress()
progress.console.log(f"Downloading model")
task = progress.add_task("download", filename=model, start=False)
output = get_config_path().joinpath(f"{model}.tar.gz")
with progress:
response = urlopen(BASE_URL.format(model=model))
progress.update(task, total=int(response.info()["Content-length"]))
with output.open("wb") as dest_file:
progress.start_task(task)
for data in iter(partial(response.read, 32768), b""):
dest_file.write(data)
progress.update(task, advance=len(data))
progress.console.log(f"Downloaded {model}")
def _extract_model(model):
tar_file = get_config_path().joinpath(f"{model}.tar.gz")
tar = tarfile.open(tar_file, "r:gz")
tar.extractall(path=get_config_path())
tar.close()
os.remove(tar_file)
class NER:
def __init__(self, nlp):
self.nlp = nlp
def process(self, text: str):
doc = self.nlp(text)
ents = [e.text for e in doc.ents if e.label_ == 'WORK_OF_ART']
return ents
| true |
288232dbef01cf69c11a71973f1d9a389c56b37a | Python | Bytamine/ff-olymp | /14 primality test/6.py | UTF-8 | 266 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python3
n = int(input())
count = 0
if n == 1:
count = 1
else:
d = 1
while d * d <= n:
if n % d == 0:
if d * d == n:
count += 1
else:
count += 2
d += 1
print(count)
| true |
79a9e806d4704a46311f8bdf7c9e9410089f9ada | Python | bhawnachaudhary/Python-DataScience | /Classes/Constructor.py | UTF-8 | 894 | 3.53125 | 4 | [] | no_license | class Movie:
#constructor
def __init__(self,title,duration,year,director,cast=[],genre=[]):
# instance variables
self.title = title
self.duration = duration
self.year = year
self.director = director
self.cast = cast
self.genre = genre
print('yo subject completed')
# instance method
def info(self):
print(f'Movie Details>> {self.title}')
print(f'Release Year>> {self.year}')
print(f'Duration>> {self.duration}')
print(f'director>> {self.director}')
print(f'Caste>> ')
for people in self.cast:
print(f'-->>{people.genre}')
print(f"Genre>> {'/'.join(self.genre)}")
m1 = Movie('Rainmaker','95 mins',1997,'Francis Ford Coppola',genre = ['Crime','Drama'])
m2 = Movie('Avenger','100 mins',2015,'Josh Whedon')
# print(m1)
# print(m2)
m1.info()
m2.info() | true |
59980294bfa49f72862239abeffea3097a23645a | Python | iDragon-yang/xiaozhiquan | /software/mySQL/Create_table.py | UTF-8 | 1,612 | 2.75 | 3 | [] | no_license | import pymysql
'''
db = pymysql.connect(host='127.0.0.1', user='root', passwd='lyx0601', db='test', port=3306, charset='utf8')
cur = db.cursor()
cur.execute('DROP TABLE IF EXISTS EMPLOYEE')
sql = ("CREATE TABLE `employee`(\n"
"`first_name` varchar(255) DEFAULT NULL COMMENT '姓',\n"
"`last_name` varchar (255) DEFAULT NULL COMMENT '名',\n"
"`age` int(11) DEFAULT NULL COMMENT '年龄',\n"
"`sex` varchar (255) DEFAULT NULL COMMENT '性别',\n"
"`income` varchar (255) DEFAULT NULL COMMENT '收入'\n"
")ENGINE=InnoDB DEFAULT CHARSET=utf8;\n")
cur.execute(sql)
db.close()
'''
def create_table(name):
name='users'
db1=pymysql.connect(host='localhost', user='root', passwd='123456', db='test', port=3307, charset='utf8')
cur = db1.cursor()
cur.execute('DROP TABLE IF EXISTS '+name)
sql = ("CREATE TABLE `"+name+"`(\n"
"`ID` varchar(255) DEFAULT NULL COMMENT '昵称',\n"
"`password` varchar (255) DEFAULT NULL COMMENT '密码',\n"
"`friend_list` varchar (255) DEFAULT NULL COMMENT '朋友列表',\n"
"`tags` varchar (255) DEFAULT NULL COMMENT '兴趣标签',\n"
"`job` varchar (255) DEFAULT NULL COMMENT '身份'\n"
"`team` varchar (255) DEFAULT NULL COMMENT '身份'\n"
"`sim_job` varchar (255) DEFAULT NULL COMMENT '身份'\n"
"`clubs` varchar (255) DEFAULT NULL COMMENT '身份'\n"
"`classes` varchar (255) DEFAULT NULL COMMENT '身份'\n"
")ENGINE=InnoDB DEFAULT CHARSET=utf8;\n")
cur.execute(sql)
db1.close() | true |
d170978b22e5c0a75dd896bdaa2568cfef2eafe5 | Python | kartoria/lec-202007 | /HELLOPYTHON/day17/google/longSpeech.py | UTF-8 | 1,214 | 2.8125 | 3 | [] | no_license | """Transcribe the given audio file asynchronously."""
from google.cloud import speech
import os
import io
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="My Project 55374-0e1821e0267a.json"
client = speech.SpeechClient()
speech_file = "2Pac - Life Goes On.mp3"
with io.open(speech_file, "rb") as audio_file:
content = audio_file.read()
"""
Note that transcription is limited to 60 seconds audio.
Use a GCS file for audio longer than 1 minute.
"""
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=44100,
language_code="ko-kr",
audio_channel_count = 2
)
operation = client.long_running_recognize(config=config, audio=audio)
print("Waiting for operation to complete...")
response = operation.result(timeout=90)
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
print(u"Transcript: {}".format(result.alternatives[0].transcript))
print("Confidence: {}".format(result.alternatives[0].confidence))
| true |
32c134ef31833edf10c89d1f3dcc3f91aa716b85 | Python | victor-paltz/algorithms | /challenges/primers.xyz/challenge_10/solver/perm_functions.py | UTF-8 | 1,204 | 2.90625 | 3 | [] | no_license | import random
from os import walk
import numpy as np
def save_perm(file_name, p):
"""
Save a permutation in a file
"""
with open(file_name, "w") as f:
f.write("\n".join(str(x) for x in np.array(p)+1))
def load_perm(file_name):
"""
Load a permutation from a file
"""
with open(file_name, "r") as f:
s = f.read()
return np.array([int(x) for x in s.strip().split("\n")])-1
def load_best(folder):
"""
Load the permutation with the best score among the files in the format "{score}_{name}.txt"
"""
all_files = []
for (dirpath, dirnames, filenames) in walk(folder):
all_files.extend(filenames)
break
return load_perm(folder+"/"+min((int(f.split("_")[0]), f) for f in all_files if "_" in f)[1])
def rev_perm(p):
"""
Return the reversed permutation q of p (q verify p(q(x)) = q(p(x)) = x)
"""
p2 = np.zeros_like(p)
p2[p] = np.arange(len(p))
return p2
def apply_random_perm(p, k):
"""
Apply a random permutation on k elements of a permutation p
"""
support = np.random.choice(p, k, replace=False)
p[support] = p[np.random.permutation(support)]
return p, support
| true |
705eadb6aabf3b06a57a5a62246f0acca69d7bef | Python | yuya-inagaki/AOJ | /ITP1_10_B.py | UTF-8 | 185 | 3.140625 | 3 | [] | no_license | import math
a, b, C = map(int,input().split())
S = (1/2)*a*b*math.sin(math.radians(C))
L = (a**2+b**2-2*a*b*math.cos(math.radians(C)))**(1/2)+a+b
h = 2*S/a
print(S)
print(L)
print(h)
| true |
9f929c5c578202201d823e17c74eb1ae711b5408 | Python | jwyx3/practices | /leetcode/tree/delete-node-in-a-bst.py | UTF-8 | 1,459 | 3.625 | 4 | [] | no_license | # https://leetcode.com/problems/delete-node-in-a-bst/
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
if not root:
return None
# root may be changed
dummy = parent = TreeNode(0)
dummy.left = root
# find target
node = root
while node:
if node.val == key:
break
elif node.val > key:
parent = node
node = node.left
else:
parent = node
node = node.right
if not node:
return root
# left and right are present
if node.left and node.right:
new_node, parent = node.right, node
while new_node.left:
parent = new_node
new_node = new_node.left
node.val, node = new_node.val, new_node
# one child or no child
if node.left:
new_node = node.left
else:
new_node = node.right
if parent.left == node:
parent.left = new_node
else:
parent.right = new_node
return dummy.left
| true |
ddc1795393ff98661d7818df25799bf40b7c0dd4 | Python | ljZhang416/studypython | /lpthw/day06_3.py | UTF-8 | 524 | 3.859375 | 4 | [] | no_license | # 1.输入多行文字, 存入列表中.每次输入后回车算作一行, 任意输入多行, 当直接输入会从时结束
# a.打印刚才输入的行数
# b.按原输出的内容在屏幕上输出内容
# c.打印刚才共输出了多少字符
L = []
num = 0
i = 0
while True:
str1 = input("请输入:")
if str1 == "":
break
L.append(str1)
num = num + len(str1)
i = i + 1
print("输入的内容为:%s"% L)
for i in range(0, len(L)):
print(L[i])
print("输入了%s个字符" % num)
| true |
545ea3dbe6ea16910bb6bbab0fa1c8be14af8f4e | Python | vanzomerenc/rdf-drone-code | /blocks/signal_detection_method/variance.py | UTF-8 | 4,736 | 2.859375 | 3 | [] | no_license | import sys
import numpy
import math
from collections import OrderedDict
from math import ceil, floor
from gnuradio import gr
from movingaverage import MovingAverageEstimator
"""
A Gnu Radio block that detects the presence of a specified
signal frequency given a power spectrum.
The detection method used here is similar to that used by the 2015 project group.
We compare the frequency-domain variance right now to an estimate of what that
variance would be if we only had noise. If the variance right now reaches a given
threshold, then we assume we have a real signal and output the peak intensity
of that signal. Otherwise we output 0.
The one major difference from the 2015 project group's algorithm is that we use
an exponential moving average for the variance estimate. The 2015 algorithm
used a simple average, resetting it every few samples.
Configuration:
* resolution is the FFT resolution, in Hz
* num_bins is the number of FFT bins (bandwidth is resolution * n_bins)
* center_frequency is the center frequency of the power spectrum, in Hz
* desired_frequency is the frequency of the signal to detect, in Hz
* desired_bandwidth is the width of the signal detection band, in Hz
* decay_time is the time between transmitter pulses
* detection_threshold is the difference between
Inputs:
* a power spectrum having the specified resolution, center frequency,
and number of bins
Outputs:
* an estimated signal strength
. : : estimated
. : : ,- signal power
. : A : / density
. :`/`\`:``````````````````
~~~^~^~~^~^~^^~~~^~^~^~^^^~~^~.^~^~~~~:' ':~~~~^~^~^^^^~~^~
.
'------------------.-----------------''--.--''-------.------'
| . | |
ignored . signal band ignored
|
center
frequency
Variance of the input signal right now
, , ,
| | | . . .
,~,~'`~.~", ,.~.'~~~. | ,~~.
~^~' : ~' `~~~`~~~'~~' `~~~~
:
|
signal pulse
Delayed moving average
,. ,._ ,
| `~.._ | `. |`~... . . .
,,~~''' '''~~~' '~.. | `~..
~`~'' ````
"""
class Variance(gr.sync_block):
def __init__(self,
num_bins,
center_frequency,
signal_frequency,
signal_bandwidth,
threshold,
decay_time,
decay_strength,
**extra_args):
gr.sync_block.__init__(self,
name=self.__class__.__name__,
in_sig=[(numpy.float32, num_bins)],
out_sig=[(numpy.float32, 1)])
self.num_bins = num_bins
self.center_frequency = center_frequency
self.signal_frequency = signal_frequency
self.signal_bandwidth = signal_bandwidth
self.threshold = threshold
self.decay_time = decay_time
self.decay_strength = decay_strength
def set_sample_rate(self, sample_rate):
self.sample_rate = sample_rate
bin_width = self.sample_rate
center = self.num_bins / 2
offset = (self.signal_frequency - self.center_frequency) * 1.0 / bin_width
bandwidth = self.signal_bandwidth * 1.0 / bin_width
self.signal_min_bin = int(center + floor(offset - bandwidth / 2))
self.signal_max_bin = int(center + ceil(offset + bandwidth / 2))
decay_n_samples = self.decay_time * self.sample_rate
decay_constant = self.decay_strength**(-1.0/decay_n_samples)
self.expected_variance = MovingAverageEstimator(
rate=decay_constant, warmup=decay_n_samples)
def work(self, input_items, output_items):
signal_arr = numpy.array([])
noise_arr = numpy.array([])
a, b, c, d = 0, self.signal_min_bin, self.signal_max_bin, self.num_bins
signal_arr = numpy.array(input_items[0][0,b:c])
signal_variance = numpy.var(signal_arr)
if (self.expected_variance.remaining_warmup == 0
and signal_variance > self.threshold**2 * self.expected_variance.estimate
and self.expected_variance.estimate > 0):
output_items[0][0] = numpy.sqrt(signal_variance * 1.0 / self.expected_variance.estimate)
else:
output_items[0][0] = 0
self.expected_variance.update(signal_variance)
return 1
| true |
0c2fe3c8856a99f03d220f3784a97a5f599134f0 | Python | rafacarrascosa/samr | /samr/predictor.py | UTF-8 | 8,851 | 2.765625 | 3 | [
"BSD-3-Clause"
] | permissive | """
SAMR main module, PhraseSentimentPredictor is the class that does the
prediction and therefore one of the main entry points to the library.
"""
from collections import defaultdict
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import make_pipeline, make_union
from sklearn.metrics import accuracy_score
from samr.transformations import (ExtractText, ReplaceText, MapToSynsets,
Densifier, ClassifierOvOAsFeatures)
from samr.inquirer_lex_transform import InquirerLexTransform
_valid_classifiers = {
"sgd": SGDClassifier,
"knn": KNeighborsClassifier,
"svc": SVC,
"randomforest": RandomForestClassifier,
}
def target(phrases):
return [datapoint.sentiment for datapoint in phrases]
class PhraseSentimentPredictor:
"""
Main `samr` class. It implements a trainable predictor for phrase
sentiments. API is a-la scikit-learn, where:
- `__init__` configures the predictor
- `fit` trains the predictor from data. After calling `fit` the instance
methods should be free side-effect.
- `predict` generates sentiment predictions.
- `score` evaluates classification accuracy from a test set.
Outline of the predictor pipeline is as follows:
A configurable main classifier is trained with a concatenation of 3 kinds of
features:
- The decision functions of set of vanilla SGDClassifiers trained in a
one-versus-others scheme using bag-of-words as features.
- (Optionally) The decision functions of set of vanilla SGDClassifiers
trained in a one-versus-others scheme using bag-of-words on the
wordnet synsets of the words in a phrase.
- (Optionally) The amount of "positive" and "negative" words in a phrase
as dictated by the Harvard Inquirer sentiment lexicon
Optionally, during prediction, it also checks for exact duplicates between
the training set and the train set. """
def __init__(self, classifier="sgd", classifier_args=None, lowercase=True,
text_replacements=None, map_to_synsets=False, binary=False,
min_df=0, ngram=1, stopwords=None, limit_train=None,
map_to_lex=False, duplicates=False):
"""
Parameter description:
- `classifier`: The type of classifier used as main classifier,
valid values are "sgd", "knn", "svc", "randomforest".
- `classifier_args`: A dict to be passed as arguments to the main
classifier.
- `lowercase`: wheter or not all words are lowercased at the start of
the pipeline.
- `text_replacements`: A list of tuples `(from, to)` specifying
string replacements to be made at the start of the pipeline (after
lowercasing).
- `map_to_synsets`: Whether or not to use the Wordnet synsets
feature set.
- `binary`: Whether or not to count words in the bag-of-words
representation as 0 or 1.
- `min_df`: Minumim frequency a word needs to have to be included
in the bag-of-word representation.
- `ngram`: The maximum size of ngrams to be considered in the
bag-of-words representation.
- `stopwords`: A list of words to filter out of the bag-of-words
representation. Can also be the string "english", in which case
a default list of english stopwords will be used.
- `limit_train`: The maximum amount of training samples to give to
the main classifier. This can be useful for some slow main
classifiers (ex: svc) that converge with less samples to an
optimum.
- `max_to_lex`: Whether or not to use the Harvard Inquirer lexicon
features.
- `duplicates`: Whether or not to check for identical phrases between
train and prediction.
"""
self.limit_train = limit_train
self.duplicates = duplicates
# Build pre-processing common to every extraction
pipeline = [ExtractText(lowercase)]
if text_replacements:
pipeline.append(ReplaceText(text_replacements))
# Build feature extraction schemes
ext = [build_text_extraction(binary=binary, min_df=min_df,
ngram=ngram, stopwords=stopwords)]
if map_to_synsets:
ext.append(build_synset_extraction(binary=binary, min_df=min_df,
ngram=ngram))
if map_to_lex:
ext.append(build_lex_extraction(binary=binary, min_df=min_df,
ngram=ngram))
ext = make_union(*ext)
pipeline.append(ext)
# Build classifier and put everything togheter
if classifier_args is None:
classifier_args = {}
classifier = _valid_classifiers[classifier](**classifier_args)
self.pipeline = make_pipeline(*pipeline)
self.classifier = classifier
def fit(self, phrases, y=None):
"""
`phrases` should be a list of `Datapoint` instances.
`y` should be a list of `str` instances representing the sentiments to
be learnt.
"""
y = target(phrases)
if self.duplicates:
self.dupes = DuplicatesHandler()
self.dupes.fit(phrases, y)
Z = self.pipeline.fit_transform(phrases, y)
if self.limit_train:
self.classifier.fit(Z[:self.limit_train], y[:self.limit_train])
else:
self.classifier.fit(Z, y)
return self
def predict(self, phrases):
"""
`phrases` should be a list of `Datapoint` instances.
Return value is a list of `str` instances with the predicted sentiments.
"""
Z = self.pipeline.transform(phrases)
labels = self.classifier.predict(Z)
if self.duplicates:
for i, phrase in enumerate(phrases):
label = self.dupes.get(phrase)
if label is not None:
labels[i] = label
return labels
def score(self, phrases):
"""
`phrases` should be a list of `Datapoint` instances.
Return value is a `float` with the classification accuracy of the
input.
"""
pred = self.predict(phrases)
return accuracy_score(target(phrases), pred)
def error_matrix(self, phrases):
predictions = self.predict(phrases)
matrix = defaultdict(list)
for phrase, predicted in zip(phrases, predictions):
if phrase.sentiment != predicted:
matrix[(phrase.sentiment, predicted)].append(phrase)
return matrix
def build_text_extraction(binary, min_df, ngram, stopwords):
return make_pipeline(CountVectorizer(binary=binary,
tokenizer=lambda x: x.split(),
min_df=min_df,
ngram_range=(1, ngram),
stop_words=stopwords),
ClassifierOvOAsFeatures())
def build_synset_extraction(binary, min_df, ngram):
return make_pipeline(MapToSynsets(),
CountVectorizer(binary=binary,
tokenizer=lambda x: x.split(),
min_df=min_df,
ngram_range=(1, ngram)),
ClassifierOvOAsFeatures())
def build_lex_extraction(binary, min_df, ngram):
return make_pipeline(InquirerLexTransform(),
CountVectorizer(binary=binary,
tokenizer=lambda x: x.split(),
min_df=min_df,
ngram_range=(1, ngram)),
Densifier())
class DuplicatesHandler:
def fit(self, phrases, target):
self.dupes = {}
for phrase, label in zip(phrases, target):
self.dupes[self._key(phrase)] = label
def get(self, phrase):
key = self._key(phrase)
return self.dupes.get(key)
def _key(self, x):
return " ".join(x.phrase.lower().split())
class _Baseline:
def fit(self, X, y=None):
return self
def predict(self, X):
return ["2" for _ in X]
def score(self, X):
gold = target(X)
pred = self.predict(X)
return accuracy_score(gold, pred)
| true |
47840cbe096cd046c023596c2306e2eebc8c5256 | Python | HuzaifaQayyum/twitter_bot | /home.py | UTF-8 | 4,047 | 2.875 | 3 | [] | no_license | from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from playsound import playsound
import time
import random
from util import Utils
from twitter_urls import TwitterUrls
class Home:
def __init__(self, driver, username):
self.driver = driver
self.utils = Utils(driver)
self.username = username
def __find_post(self):
tries = 0
while True:
tries += 1
try:
post = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.XPATH, f'//div[ .//div[@data-testid="like"] and contains(@style, "position: absolute") and .//div[ contains( @class, "css-bfa6kz r-1re7ezh")]//span[text() != "{self.username}" ] ]')))
except Exception as error:
if (tries > 2):
playsound('./alert_sound.mp3')
print(error)
if input("Do you want to quitr? y/n").lower() == 'n':
tries = 0
continue
self.utils.quit()
self.utils.scroll_to_end()
continue
else:
return post
def __find_required_elements(self, post):
try:
username = post.find_element_by_xpath('.//div[ contains( @class, "css-bfa6kz r-1re7ezh")]//span').text
like_btn = post.find_element_by_xpath('.//div[@data-testid="like"]')
reply_btn = post.find_element_by_xpath('.//div[@data-testid="reply"]')
except:
self.utils.handle_error("Home: username, likebtn or reply btn xpath outdated")
return (username, like_btn, reply_btn)
def __reply(self, reply_btn, comment):
self.utils.click_js(reply_btn)
try:
reply_input = self.driver.find_element_by_xpath('(//div[@data-testid="tweetTextarea_0"])[1]')
except:
self.utils.handle_error("Home: Reply input field xpath outdated")
else:
reply_input.send_keys(comment, Keys.CONTROL + Keys.ENTER)
def __get_comment(self):
comments = [
f"Nice post",
f"Awesome work",
f"Impressive work",
f"Coool"
]
return comments[random.randint(0, len(comments) -1)]
def __handle_like_error(self, like_btn):
print(like_btn.get_attribute('data-testid'))
if like_btn.get_attribute('data-testid') != 'unlike':
try:
error = WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.XPATH, '//div[@data-testid="toast" and @role="alert"]//span'))).text
except NoSuchElementException:
error = """Home: Like error occured...\nPossible Reasons:\n1: Slow internet\n2: Xpath for error text of like is outdated OR error text is not displayed by twitter"""
except:
error = 'Home: Unexpected error'
self.utils.handle_error(error)
def like_and_comment(self, like_only=True, limit=1000):
self.utils.navigate(TwitterUrls.twitter_home_url)
current_iteration = 1
while current_iteration <= limit:
post = self.__find_post()
username, like_btn, reply_btn = self.__find_required_elements(post)
print(":"*20 + f"POST OF user {username} found" + ":"*20)
self.utils.click_js(like_btn)
print("-->post liked")
time.sleep(random.uniform(.5, .8))
self.__handle_like_error(like_btn)
if not like_only:
self.__reply(reply_btn, self.__get_comment())
print("-->replied to the post")
delay = random.uniform(2, 4)
print(f"Waiting {delay} seconds.")
time.sleep(delay)
| true |
f2869f845599fe43ada4ed6da493132d075bbf49 | Python | alexanderpinchao/BigData | /unificarCoordenadas.py | UTF-8 | 2,174 | 3.515625 | 4 | [] | no_license | #codigo creado por: Alexander Pinchao
#fecha: 10 de junio del 2016
#importamos el paquete de expresiones regulares
import re
#abrimos el archivo de texto con los valores de coordenadas del boundongbox y la fecha
file=open("compendio.txt")
#asignamos el texto del archivo a una variable
texto=file.read()
#cerramos el archivo
file.close()
#creamos la expresion regular para capturar el dia y la hora asi como las coordenadas del boundingbox
patron=re.compile(ur'(\D*\s+\D+\s+\d+\s+\d+:\d+:\d+\s\D\d+\s\d+),\s(\D\d+\D\d+),(\D\d+\D\d+),(\D*\d+\D\d+),(\D*\d+\D\d+),(\D*\d+\D\d+),(\D*\d+\D\d+),(\D*\d+\D\d+),(\D*\d+\D\d+)\s')
#damos el formato al texto de la variable
text='u'+texto
#comparamos el texto de la variable con el patron
m=re.findall(patron,text)
#recorremos la tupla creada
for x in m:
#asignamos el valor del elemento de la tupla a una variable
lat1=float(str(x[3]))
#asignamos el valor del elemento de la tupla a una variable
lat2=float(str(x[5]))
#comparamos los valores y deteminamos el orden de operacion
if(lat1>lat2):
#operamos los valores para obtener un solo valor unificado de latitud
lattot=lat2+(lat1-lat2)/2
#operamos el caso escepcion de los valores de latitud
else:
#operamos los valores para obtener un solo valor unificado de latitud
lattot=lat1-(lat1-lat2)/2
#asignamos el valor del elemento de la tupla a una variable
lon1=float(str(x[2]))
#asignamos el valor del elemento de la tupla a una variable
lon2=float(str(x[4]))
#comparamos los valores y deteminamos el orden de operacion
if(lon1>lon2):
#operamos los valores para obtener un solo valor unificado de longitud
lontot=lon2+(lon1-lon2)/2
else:
#operamos los valores para obtener un solo valor unificado de latitud con el caso complementario
lontot=lon1-(lon1-lon2)/2
#imprimimos los valores de fecha y magnitudes en consola
print str(x[0])+","+str(lattot)+","+str(lontot)
#los valores se imprimiran en consola en caso de desear un archivo de texto usar el modificador >>nomnbreArchivo.txt
#escrito a continuacion de la orden de compilacion ejemplo (~#pyhton archi.py>>archivo.txt)
| true |
aea441c8638ab2bbb186af0e07bfc6186a0ba649 | Python | dujiayao19950731/Python005-01 | /week01/函数及调用.py | UTF-8 | 860 | 2.703125 | 3 | [] | no_license | import logging
import datetime
import os
from pathlib import Path
def test_fuc():
print('I love python')
path = "C:\\Users\\杜家耀\\var"
log_path = os.path.isdir(path)
if log_path is not False:
print("This folder is already exist")
else:
os.makedirs(path + './var' + './log' + './python-20201128')
logging.basicConfig(filename="test.log",\
level = logging.DEBUG,\
datefmt="%Y-%m-%d %H-%M-%S",\
format = "%(asctime)s %(name)-8s %(levelname)-8s %(message)s",\
)
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
logging.critical('critical message')
test_fuc()
| true |
bdb1a78fa667141ac2abcb876abdbc4eede49aa1 | Python | PatrickPitts/Alexandria | /library_ui.py | UTF-8 | 20,693 | 2.796875 | 3 | [] | no_license | import tkinter as tk
import sqlite3 as sq
import LabelsAndEntries as LaE
import pollDb as Pdb
import build_db as bdb
class Book:
# an object that represents a book, to be worked with programmatically.
# typically, a Book object is created to collect and aggregate relevant data
# on a book identified by it's ISBN number. That data is then pulled from the Book
# object, usually to fill in the UI.
def __init__(self, isbn):
self.isbn = isbn
self.Authors = []
self.CondensedAuthorNames = []
self.BasicInfo = []
self.FullBookInfo = []
self.basic_book_info = []
self.full_book_info = []
self.cmd = '''SELECT Books.ISBN, Books.Title, Books.Subtitle,
Books.Publication_Date, Books.Genre, Books.Subgenre, Books.Series,
Books.Position_in_Series, Books.Format, Books.Publisher, Books.Owner, Books.Edition,
Authors.Author_First, Authors.Author_Middle, Authors.Author_Last
FROM BookToAuthors
INNER JOIN Authors ON
Authors.Author_ID = BookToAuthors.Author_ID
INNER JOIN BOOKS ON
Books.ISBN = BookToAuthors.ISBN WHERE Books.ISBN is %d''' % self.isbn
cur.execute(self.cmd)
self.DataResults = cur.fetchall()
# [ISBN 0, Title 1, Subtitle 2, Pub Date 3, Genre 4, Subgenre 5,
# Series 6, Pos. in Series 7, Book Format 8, Publisher 9, Owner 10, Edition 11,
# Author First 12, Author Middle 13, Author Last 14]
# self.DataResults will be a tuple containing a list this information from the database in this order.
# If there is more than one Author, DataResults will contain multiple lists, each of which will have different
# author name data, but otherwise will contain the same information
self.book_data = {
"ISBN": self.DataResults[0][0],
"Title": self.DataResults[0][1],
"Subtitle": self.DataResults[0][2],
"Publication Year": self.DataResults[0][3],
"Genre": self.DataResults[0][4],
"Subgenre": self.DataResults[0][5],
"Series": self.DataResults[0][6],
"Position in Series": self.DataResults[0][7],
"Format": self.DataResults[0][8],
"Publisher": self.DataResults[0][9],
"Owner": self.DataResults[0][10],
"Edition": self.DataResults[0][11]
}
for i in range(len(self.DataResults)):
ins = "Author %d" % (i+1)
name = ""
for item in self.DataResults[i][12:15]:
if item != u'None':
name += item + " "
self.book_data[ins] = name
def get_basic_data(self):
for item in basic_data:
if item == "Headline Author":
item = "Author 1"
self.basic_book_info.append(self.book_data[item])
return self.basic_book_info
def get_full_book_data(self):
self.full_book_info = []
for key in self.book_data:
self.full_book_info.append(self.book_data[key])
return self.full_book_info
def delete_book_data(self):
cmd = "DELETE FROM Books WHERE ISBN is %d" % self.isbn
cur.execute(cmd)
cmd = "SELECT Author_ID from BookToAuthors WHERE ISBN is %d" % self.isbn
cur.execute(cmd)
auth_id = cur.fetchone()[0]
cmd = "DELETE FROM BookToAuthors WHERE ISBN IS %d" % self.isbn
cur.execute(cmd)
cmd = "SELECT ISBN FROM BookToAuthors where Author_ID IS %d" % auth_id
cur.execute(cmd)
if not cur.fetchall():
cmd = "DELETE FROM Authors WHERE Author_ID is %d" % auth_id
cur.execute(cmd)
db.commit()
def get_title(self):
return self.book_data["Title"]
def test():
# b = Book(1111111111111)
pass
def create_connection():
# A function that generates a connection to the Alexandria.db database,
# which stores the book data for the library. Returns a connection object to the
# database itself, which is required to shut down the connection later, and
# a cursor object which is required to interact with the database.
d = sq.connect("Alexandria.db")
return d, d.cursor()
def close_connection(d):
# A function that takes a connection object, commits any changes to the
# connected database, then closes the connection.
d.commit()
d.close()
def search():
def build_search_command():
# The rest of the code in the Execute Search function relies on this
# select command returning these exact parameters, in this order. If changes
# are to be made to this SQL Command, then many other changes in this
# function will need to be made as well.
cmd = '''SELECT Books.ISBN FROM BookToAuthors
INNER JOIN Authors ON
Authors.Author_ID = BookToAuthors.Author_ID
INNER JOIN BOOKS ON
Books.ISBN = BookToAuthors.ISBN WHERE '''
for i in range(len(search_entries) - 1):
search_values = search_entries[i].get()
if search_values:
try:
search_values = int(search_values)
cmd += "Books.%s IS %d AND " % (sql_search_options[i], search_values)
except ValueError:
cmd += "Books.%s IS %r AND " % (sql_search_options[i], search_values)
author_name = search_entries[-1].get()
author_name_list = author_name.split()
# AuthorTableColumns = ["Author_First", "Author_Middle", "Author_Last"]
for name in author_name_list:
cmd += '''(Authors.Author_First IS %r
OR Authors.Author_Middle IS %r
OR Authors.Author_Last IS %r) OR ''' % (name, name, name)
results = []
try:
for row in cur.execute(cmd[:-5]):
results.append(row[0])
if 0 < len(results):
results = list(set(results))
build_basic_results_pane(get_basic_data(results))
else:
build_basic_results_pane(get_basic_data())
except sq.OperationalError:
build_basic_results_pane(get_basic_data())
search_pane = tk.Toplevel(bg=MainColor)
search_options = ["ISBN:", "Title:", "Subtitle:",
"Publication Date:", "Genre:", "Subgenre:", "Series:", "Edition:",
"Publisher:", "Format:", "Position in Series:", "Owner:", "Author Name:"]
search_loc = [(1, 1), (2, 1), (3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1),
(11, 1), (12, 1), (13, 1), (4, 1)]
search_entries_widths = [16, 32, 32, 6, 16, 16, 32, 4, 16, 6, 4, 16, 32]
search_entries = LaE.LEBuild(search_pane, search_options, search_loc, BackgroundColor=MainColor,
EntryWidths=search_entries_widths)
search_button = tk.Button(search_pane, text="Search!", command=build_search_command)
search_button.grid(column=5)
sql_search_options = []
for thing in search_options:
sql_search_options.append(thing.replace(" ", "_").replace(":", ""))
def cleanup_root():
# Strips all geometry and added visuals from the root frame,
# adds essential visual tools meant for every frame (ie menus)
# making way for more visuals without any clutter
for widget in root.winfo_children():
widget.destroy()
build_menus()
def build_menus():
# This function builds the Menu widget for the main window
menu_bar = tk.Menu(root)
search_menu = tk.Menu(menu_bar, tearoff=0)
search_menu.add_command(label="Comprehensive Search", command=search)
test_menu = tk.Menu(menu_bar, tearoff=0)
test_menu.add_command(label="Test Function", command=test)
test_menu.add_command(label="Reset Database", command=bdb.main)
test_menu.add_command(label="Poll Database", command=Pdb.main)
menu_bar.add_cascade(label="Search", menu=search_menu)
menu_bar.add_command(label="Add", command=build_add_pane)
menu_bar.add_cascade(label="TEST", menu=test_menu)
menu_bar.add_command(label="QUIT", command=quit)
root.config(menu=menu_bar)
def full_book_display(isbn):
results_pane = tk.Toplevel(bg=MainColor)
top_left = tk.Frame(results_pane, bg=SecondaryColor)
top_left.grid(row=0, column=0, padx=5, pady=5)
bottom_left = tk.Frame(results_pane, bg=SecondaryColor)
bottom_left.grid(row=1, column=0, padx=5, pady=5)
top_right = tk.Frame(results_pane, bg=SecondaryColor)
top_right.grid(row=0, column=1, columnspan=3, padx=5, pady=5)
b = Book(isbn)
book_data, author_data = b.get_full_book_data()[:12], b.get_full_book_data()[12:]
num_authors = len(author_data)
labels1 = ["Title:", "Subtitle:", "Series:", "Position in Series:", "Edition:"]
labels2 = ["Publisher:", "Publication Date:", "Format:", "ISBN:"]
labels3 = ["Author(s):", "Genre:", "Subgenre:", "Owner:"]
data1 = [book_data[1], book_data[2], book_data[6], book_data[7], book_data[11]]
data2 = [book_data[9], book_data[3], book_data[8], book_data[0]]
data3 = []
for name in author_data:
data3.append(name)
data3.append(book_data[4])
data3.append(book_data[5])
data3.append(book_data[10])
label1_loc = [(1, 1), (2, 1), (3, 1), (4, 1), (5, 1)]
label2_loc = [(1, 1), (2, 1), (3, 1), (4, 1)]
label3_loc = [(1, 1), (num_authors + 1, 1), (num_authors + 2, 1), (num_authors + 3, 1)]
data1_loc = [(1, 2), (2, 2), (3, 2), (4, 2), (5, 2)]
data2_loc = [(1, 2), (2, 2), (3, 2), (4, 2)]
data3_loc = []
for i in range(1, num_authors + 1):
data3_loc.append((i, 2))
data3_loc.append((num_authors + 1, 2))
data3_loc.append((num_authors + 2, 2))
data3_loc.append((num_authors + 3, 2))
LaE.LabelBuild(top_left, labels1, label1_loc, BackgroundColor=SecondaryColor)
LaE.LabelBuild(bottom_left, labels2, label2_loc, BackgroundColor=SecondaryColor)
LaE.LabelBuild(top_right, labels3, label3_loc, BackgroundColor=SecondaryColor)
LaE.LabelBuild(top_left, data1, data1_loc, BackgroundColor=SecondaryColor)
LaE.LabelBuild(bottom_left, data2, data2_loc, BackgroundColor=SecondaryColor)
LaE.LabelBuild(top_right, data3, data3_loc, BackgroundColor=SecondaryColor)
def confirm_delete(b):
def delete_and_close():
b.delete_book_data()
db.commit()
confirm.destroy()
build_basic_results_pane(get_basic_data())
confirm = tk.Toplevel(bg=MainColor)
txt = "Are you sure you want to delete %s from your collection?" % b.get_title()
msg = tk.Label(confirm, text=txt, bg=MainColor)
msg.grid(row=0, column=0, padx=10, pady=10, columnspan=2)
delete_button = tk.Button(confirm, text="Delete", command=delete_and_close)
delete_button.grid(row=1, column=0, padx=10, pady=10)
cancel_button = tk.Button(confirm, text="Cancel", command=confirm.destroy)
cancel_button.grid(row=1, column=1, padx=10, pady=10)
def get_basic_data(*results):
isbn_list = []
if not results or len(results) == 0:
cmd = '''SELECT ISBN FROM books ORDER BY Title'''
for row in cur.execute(cmd):
isbn_list.append(row[0])
else:
for row in results[0]:
isbn_list.append(row)
books_data = []
for num in isbn_list:
b = Book(num)
books_data.append(b.get_basic_data())
return books_data
def setup():
# Builds the starting frames and Tkinter windows, in which all other functionality is built
global root
root = tk.Tk()
root.title("Alexandria")
root.geometry("+0+0")
def build_basic_results_pane(records):
# This function takes the records list as created by the
# GetBasicData function, and generates the display that shows all that data
cleanup_root()
data_frame = tk.Frame(root, bg=SecondaryColor)
data_frame.grid(row=1, column=0)
header_frame = tk.Frame(root, bg="LightBlue")
header_frame.grid(row=0, column=0)
label_widths = [13, 64, 16, 16, 16, 32, 6]
more_buttons = []
delete_buttons = []
for i in range(len(basic_data)):
x = tk.Label(header_frame, text=basic_data[i], bg="LightBlue", width=label_widths[i])
x.grid(row=0, column=i)
# this extra header added to the header acts as a spacer to account for the extra width of each result frame,
# ensuring that the header frame has Light Blue all the way across the frame, above the buttons of the
# results.
x = tk.Label(header_frame, text="", bg="LightBlue", width=25)
x.grid(row=0, column=len(basic_data)+1, columnspan=2)
for i in range(len(records)):
count_of_book_data = len(records[i])
if i % 2 == 0:
color = SecondaryColor
else:
color = MainColor
f = tk.Frame(data_frame, bg=color, height=3)
f.grid()
for j in range(count_of_book_data):
msg = tk.Label(f, text=records[i][j], width=label_widths[j], bg=color)
msg.grid(row=i + 1, column=j)
isbn_to_pass = records[i][0]
more_buttons.append(tk.Button(f, text="More...", width=6,
command=lambda q=isbn_to_pass: full_book_display(q)))
more_buttons[i].grid(row=i + 1, column=count_of_book_data + 1, padx=10)
delete_buttons.append(tk.Button(f, text="Delete...", width=9,
command=lambda b=Book(isbn_to_pass): confirm_delete(b)))
delete_buttons[i].grid(row=i + 1, column=count_of_book_data + 2, padx=10)
def insert_book_data():
# Method that gathers the data from the the Add A Book pane,
# checks for duplicate data, and inserts it into the Alexandria database
flag = insert_data_sanitation_checks()
if flag:
print("Passed Sanitation Checks, Inserting (Not)")
# AuthorsData will be in a repeating format of
# (Author_First, Author_Middle, Author_Last, repeat those 3 for each author)
author_data = LaE.EntriesToTuple(AuthorFields)
# book_data will be in the format of
# (Title, Subtitle, ISBN Number, Series Name, Position in Series, Genre,
# Subgenre, Publication Year, Publisher, Book Format, Book Owner)
book_data = LaE.EntriesToTuple(BookFields)
for i in range(0, len(author_data), 3):
# checks to see if the entered Author data is already in the database.
first = author_data[i]
if not first:
first = "None"
middle = author_data[i + 1]
if not middle:
middle = "None"
last = author_data[i + 2]
if not last:
last = "None"
cmd = '''SELECT Author_ID FROM Authors WHERE Author_First IS %r AND
Author_Middle IS %r AND
Author_Last IS %r''' % (first, middle, last)
cur.execute(cmd)
author_id = cur.fetchall()
# this if statement will execute if the author name set is not already in
# the database
if not author_id:
print("That author isnt in the database!")
cmd = ''' INSERT INTO Authors(Author_First, Author_Middle, Author_Last)
VALUES(%r, %r, %r)''' % (first, middle, last)
cur.execute(cmd)
cmd = '''SELECT Author_ID FROM Authors WHERE Author_First IS %r AND
Author_Middle IS %r AND
Author_Last IS %r''' % (first, middle, last)
cur.execute(cmd)
author_id = cur.fetchall()
cmd = f'''INSERT INTO BookToAuthors (Author_ID, ISBN) VALUES
({author_id[0][0]!r}, {book_data[2]!r})'''
cur.execute(cmd)
# (Title, Subtitle, ISBN Number, Series Name, Position in Series, Genre,
# Subgenre, Publication Year, Publisher, Book Format, Book Owner)
cmd = '''INSERT INTO Books (Title, Subtitle, ISBN, Series, Position_in_Series,
Genre, Subgenre, Publication_Date, Publisher, Format, Owner) VALUES
%s ''' % (book_data,)
cur.execute(cmd)
db.commit()
else:
print("Problems with data, double check data")
def insert_data_sanitation_checks():
ErrorText.config(text="")
title = BookFields[0].get()
isbn = int(BookFields[2].get())
if not isbn or len(title) == 0:
ErrorText.config(text="You need at least a title and ISBN to add a book")
return False
cmd = "SELECT Title FROM Books WHERE ISBN = %d" % isbn
cur.execute(cmd)
if cur.fetchall():
ErrorText.config(text="Cannot add repeat ISBN Numbers.")
return False
if len(str(isbn)) is not 13 and len(str(isbn)) is not 10:
ErrorText.config(text="ISBN needs to be either 10 or 13 characters long")
return False
return True
def build_add_pane():
# A method that takes the add_fields Entries, gets the text from them,
# and inputs them into a String that represents an SQL command that will
# input that data into Alexandria.db database, books table
cleanup_root()
global NumAuthors
NumAuthors = 1
global AuthorFields
AuthorFields = []
global BookFields
global ErrorText
def more_authors():
global NumAuthors
global AuthorFields
auth_labels = ["Author, First: ", "Middle: ", "Last: "]
auth_locations = [(NumAuthors + 1, 1), (NumAuthors + 1, 2), (NumAuthors + 1, 3)]
AuthorFields += LaE.LEBuild(author_frame, auth_labels, auth_locations, BackgroundColor="thistle")
NumAuthors += 1
def build_main_menu():
build_basic_results_pane(get_basic_data())
master = tk.Frame(root, bg=SecondaryColor)
master.grid()
button_frame = tk.Frame(master, bg=SecondaryColor)
button_frame.grid(row=0, column=0)
data_frame = tk.Frame(master, bg=SecondaryColor)
data_frame.grid(row=0, column=1)
right_frame = tk.Frame(master, bg=SecondaryColor)
right_frame.grid(row=0, column=2)
author_frame = tk.Frame(right_frame, bg=SecondaryColor)
author_frame.grid(row=1, column=0)
author_button_frame = tk.Frame(right_frame, bg=SecondaryColor)
author_button_frame.grid(row=0, column=0)
back_button = tk.Button(button_frame, text="Back", command=build_main_menu)
back_button.grid(row=1, column=0)
insert_button = tk.Button(button_frame, text="Add Book!", padx=10, pady=10, command=insert_book_data)
insert_button.grid(row=2, column=0, pady=20)
more_authors_button = tk.Button(author_button_frame, text="Another Author", command=more_authors,
padx=10, pady=10)
more_authors_button.grid(row=0, column=1)
x = tk.Label(button_frame, text="ADD A BOOK TO THE LIBRARY", bg="LightBlue")
x.grid(row=0, column=0, padx=10, pady=10, ipadx=10, ipady=10)
book_label_texts = ["Title: ", "Subtitle: ", "ISBN: ", "Series: ", "Series #: ",
"Genre: ", "Subgenre: ", "Publication Year: ", "Publisher: "]
book_label_locations = [(1, 1), (1, 2), (1, 3), (3, 1), (3, 2), (5, 1), (5, 2), (7, 1), (7, 2), (7, 3)]
BookFields = LaE.LEBuild(data_frame, book_label_texts, book_label_locations, BackgroundColor="thistle")
x = tk.Label(data_frame, text="Format: ", bg="thistle")
x.grid(row=7, column=5)
format_option = tk.StringVar(data_frame)
format_option.set("Mass Market PB")
t = tk.OptionMenu(data_frame, format_option, "Mass Market PB", "Trade PB", "Hard Back")
t.grid(row=7, column=6)
BookFields.append(format_option)
x = tk.Label(data_frame, text="Owner: ", bg="thistle")
x.grid(row=9, column=1)
owner_option = tk.StringVar(data_frame)
owner_option.set("Patrick & Shelby")
t = tk.OptionMenu(data_frame, owner_option, "Patrick & Shelby", "John & Kathy")
t.grid(row=9, column=2)
BookFields.append(owner_option)
ErrorText = tk.Label(data_frame, text="", bg=SecondaryColor, fg="red", font="bold")
ErrorText.grid(row=10, column=1, columnspan=5)
more_authors()
def main():
setup()
build_basic_results_pane(get_basic_data())
global db
global cur
global MainColor
global SecondaryColor
global basic_data
root.mainloop()
if __name__ == '__main__':
db, cur = create_connection()
MainColor = "khaki1"
SecondaryColor = "LightGoldenrod2"
basic_data = ["ISBN", "Title", "Headline Author", "Publication Year", "Genre", "Series"]
main()
| true |
cdd4ea77646358b26786622ab5adfa8a06a1041c | Python | NelsonStevenSM/HackerRank | /NoIdea.py | UTF-8 | 367 | 3.34375 | 3 | [] | no_license | if __name__=="__main__":
n, m = raw_input().split()
array = list(map(int, raw_input().split()))
A = set(list(map(int, raw_input().split())))
B = set(list(map(int, raw_input().split())))
happiness = 0
for i in array:
if i in A :
happiness+=1
if i in B :
happiness-=1
print(happiness)
| true |
82cb089ae9d96a51ebd7649fd3f86212714a5fcb | Python | Epic-Doughnut/Dictionary-Subset | /dictionarySubset.py | UTF-8 | 551 | 3.203125 | 3 | [] | no_license | from english_words import english_words_lower_set
import sys
if len(sys.argv) == 1:
print('You must include a list of letters to include!\nFormat: <include> [exclude]')
exit()
# include words with every letter from include, exlcude words with any letter in exclude
# to leave include empty, type ''
include = sys.argv[1]
if len(sys.argv) > 2:
exclude = sys.argv[2]
else:
exclude = ''
for word in english_words_lower_set:
if all([ele in word for ele in include]) and not any([ele in word for ele in exclude]):
print(word)
| true |
3e3100c5751a3b30da55234300ef70267cffd40d | Python | inspirer/textmapper | /java/tm-tool/samples/js/src/org/textmapper/js/ts.spec | UTF-8 | 12,596 | 2.640625 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | # Typescript 1.8, Jan 2016
# A.1 Types
TypeParameters:
'<' TypeParameterList '>'
TypeParameterList:
TypeParameter
TypeParameterList ',' TypeParameter
TypeParameter:
BindingIdentifier Constraintopt
Constraint:
'extends' Type
TypeArguments:
'<' TypeArgumentList '>'
TypeArgumentList:
TypeArgument
TypeArgumentList ',' TypeArgument
TypeArgument:
Type
Type:
UnionOrIntersectionOrPrimaryType
FunctionType
ConstructorType
UnionOrIntersectionOrPrimaryType:
UnionType
IntersectionOrPrimaryType
IntersectionOrPrimaryType:
IntersectionType
PrimaryType
PrimaryType:
ParenthesizedType
PredefinedType
TypeReference
ObjectType
ArrayType
TupleType
TypeQuery
ThisType
ParenthesizedType:
'(' Type ')'
PredefinedType:
'any'
'number'
'boolean'
'string'
'symbol'
'void'
TypeReference:
TypeName /*no LineTerminator here*/ TypeArgumentsopt
TypeName:
IdentifierReference
NamespaceName '.' IdentifierReference
NamespaceName:
IdentifierReference
NamespaceName '.' IdentifierReference
ObjectType:
'{' TypeBodyopt '}'
TypeBody:
TypeMemberList ';'opt
TypeMemberList ','opt
TypeMemberList:
TypeMember
TypeMemberList ';' TypeMember
TypeMemberList ',' TypeMember
TypeMember:
PropertySignature
CallSignature
ConstructSignature
IndexSignature
MethodSignature
ArrayType:
PrimaryType /*no LineTerminator here*/ '[' ']'
TupleType:
'[' TupleElementTypes ']'
TupleElementTypes:
TupleElementType
TupleElementTypes ',' TupleElementType
TupleElementType:
Type
UnionType:
UnionOrIntersectionOrPrimaryType '|' IntersectionOrPrimaryType
IntersectionType:
IntersectionOrPrimaryType '&' PrimaryType
FunctionType:
TypeParametersopt '(' ParameterListopt ')' '=>' Type
ConstructorType:
'new' TypeParametersopt '(' ParameterListopt ')' '=>' Type
TypeQuery:
'typeof' TypeQueryExpression
TypeQueryExpression:
IdentifierReference
TypeQueryExpression '.' IdentifierName
ThisType:
'this'
PropertySignature:
PropertyName '?'opt TypeAnnotationopt
PropertyName:
IdentifierName
StringLiteral
NumericLiteral
TypeAnnotation:
':' Type
CallSignature:
TypeParametersopt '(' ParameterListopt ')' TypeAnnotationopt
ParameterList:
RequiredParameterList
OptionalParameterList
RestParameter
RequiredParameterList ',' OptionalParameterList
RequiredParameterList ',' RestParameter
OptionalParameterList ',' RestParameter
RequiredParameterList ',' OptionalParameterList ',' RestParameter
RequiredParameterList:
RequiredParameter
RequiredParameterList ',' RequiredParameter
RequiredParameter:
AccessibilityModifieropt BindingIdentifierOrPattern TypeAnnotationopt
BindingIdentifier ':' StringLiteral
AccessibilityModifier:
'public'
'private'
'protected'
BindingIdentifierOrPattern:
BindingIdentifier
BindingPattern
OptionalParameterList:
OptionalParameter
OptionalParameterList ',' OptionalParameter
OptionalParameter:
AccessibilityModifieropt BindingIdentifierOrPattern '?' TypeAnnotationopt
AccessibilityModifieropt BindingIdentifierOrPattern TypeAnnotationopt Initializer
BindingIdentifier '?' ':' StringLiteral
RestParameter:
'...' BindingIdentifier TypeAnnotationopt
ConstructSignature:
'new' TypeParametersopt '(' ParameterListopt ')' TypeAnnotationopt
IndexSignature:
'[' BindingIdentifier ':' 'string' ']' TypeAnnotation
'[' BindingIdentifier ':' 'number' ']' TypeAnnotation
MethodSignature:
PropertyName '?'opt CallSignature
TypeAliasDeclaration:
'type' BindingIdentifier TypeParametersopt '=' Type ';'
# A.2 Expressions
PropertyDefinition: /* Modified */
IdentifierReference
CoverInitializedName
PropertyName ':' AssignmentExpression
PropertyName CallSignature '{' FunctionBody '}'
GetAccessor
SetAccessor
GetAccessor:
'get' PropertyName '(' ')' TypeAnnotationopt '{' FunctionBody '}'
SetAccessor:
'set' PropertyName '(' BindingIdentifierOrPattern TypeAnnotationopt ')' '{' FunctionBody '}'
FunctionExpression: /* Modified */
'function' BindingIdentifieropt CallSignature '{' FunctionBody '}'
ArrowFormalParameters: /* Modified */
CallSignature
Arguments: /* Modified */
TypeArgumentsopt '(' ArgumentListopt ')'
UnaryExpression: /* Extension */
'<' Type '>' UnaryExpression
A.3 Statements
Declaration: /* Extension */
InterfaceDeclaration
TypeAliasDeclaration
EnumDeclaration
VariableDeclaration: /* Modified */
SimpleVariableDeclaration
DestructuringVariableDeclaration
SimpleVariableDeclaration:
BindingIdentifier TypeAnnotationopt Initializeropt
DestructuringVariableDeclaration:
BindingPattern TypeAnnotationopt Initializer
LexicalBinding: /* Modified */
SimpleLexicalBinding
DestructuringLexicalBinding
SimpleLexicalBinding:
BindingIdentifier TypeAnnotationopt Initializeropt
DestructuringLexicalBinding:
BindingPattern TypeAnnotationopt Initializeropt
A.4 Functions
FunctionDeclaration: /* Modified */
'function' BindingIdentifieropt CallSignature '{' FunctionBody '}'
'function' BindingIdentifieropt CallSignature ';'
A.5 Interfaces
InterfaceDeclaration:
'interface' BindingIdentifier TypeParametersopt InterfaceExtendsClauseopt ObjectType
InterfaceExtendsClause:
'extends' ClassOrInterfaceTypeList
ClassOrInterfaceTypeList:
ClassOrInterfaceType
ClassOrInterfaceTypeList ',' ClassOrInterfaceType
ClassOrInterfaceType:
TypeReference
A.6 Classes
ClassDeclaration: /* Modified */
'class' BindingIdentifieropt TypeParametersopt ClassHeritage '{' ClassBody '}'
ClassHeritage: /* Modified */
ClassExtendsClauseopt ImplementsClauseopt
ClassExtendsClause:
'extends' ClassType
ClassType:
TypeReference
ImplementsClause:
'implements' ClassOrInterfaceTypeList
ClassElement: /* Modified */
ConstructorDeclaration
PropertyMemberDeclaration
IndexMemberDeclaration
ConstructorDeclaration:
AccessibilityModifieropt 'constructor' '(' ParameterListopt ')' '{' FunctionBody '}'
AccessibilityModifieropt 'constructor' '(' ParameterListopt ')' ';'
PropertyMemberDeclaration:
MemberVariableDeclaration
MemberFunctionDeclaration
MemberAccessorDeclaration
MemberVariableDeclaration:
AccessibilityModifieropt 'static'opt PropertyName TypeAnnotationopt Initializeropt ';'
MemberFunctionDeclaration:
AccessibilityModifieropt 'static'opt PropertyName CallSignature '{' FunctionBody '}'
AccessibilityModifieropt 'static'opt PropertyName CallSignature ';'
MemberAccessorDeclaration:
AccessibilityModifieropt 'static'opt GetAccessor
AccessibilityModifieropt 'static'opt SetAccessor
IndexMemberDeclaration:
IndexSignature ';'
A.7 Enums
EnumDeclaration:
'const'opt 'enum' BindingIdentifier '{' EnumBodyopt '}'
EnumBody:
EnumMemberList ','opt
EnumMemberList:
EnumMember
EnumMemberList ',' EnumMember
EnumMember:
PropertyName
PropertyName '=' EnumValue
EnumValue:
AssignmentExpression
A.8 Namespaces
NamespaceDeclaration:
'namespace' IdentifierPath '{' NamespaceBody '}'
IdentifierPath:
BindingIdentifier
IdentifierPath '.' BindingIdentifier
NamespaceBody:
NamespaceElementsopt
NamespaceElements:
NamespaceElement
NamespaceElements NamespaceElement
NamespaceElement:
Statement
LexicalDeclaration
FunctionDeclaration
GeneratorDeclaration
ClassDeclaration
InterfaceDeclaration
TypeAliasDeclaration
EnumDeclaration
NamespaceDeclaration
AmbientDeclaration
ImportAliasDeclaration
ExportNamespaceElement
ExportNamespaceElement:
'export' VariableStatement
'export' LexicalDeclaration
'export' FunctionDeclaration
'export' GeneratorDeclaration
'export' ClassDeclaration
'export' InterfaceDeclaration
'export' TypeAliasDeclaration
'export' EnumDeclaration
'export' NamespaceDeclaration
'export' AmbientDeclaration
'export' ImportAliasDeclaration
ImportAliasDeclaration:
'import' BindingIdentifier '=' EntityName ';'
EntityName:
NamespaceName
NamespaceName '.' IdentifierReference
A.9 Scripts and Modules
SourceFile:
ImplementationSourceFile
DeclarationSourceFile
ImplementationSourceFile:
ImplementationScript
ImplementationModule
DeclarationSourceFile:
DeclarationScript
DeclarationModule
ImplementationScript:
ImplementationScriptElementsopt
ImplementationScriptElements:
ImplementationScriptElement
ImplementationScriptElements ImplementationScriptElement
ImplementationScriptElement:
ImplementationElement
AmbientModuleDeclaration
ImplementationElement:
Statement
LexicalDeclaration
FunctionDeclaration
GeneratorDeclaration
ClassDeclaration
InterfaceDeclaration
TypeAliasDeclaration
EnumDeclaration
NamespaceDeclaration
AmbientDeclaration
ImportAliasDeclaration
DeclarationScript:
DeclarationScriptElementsopt
DeclarationScriptElements:
DeclarationScriptElement
DeclarationScriptElements DeclarationScriptElement
DeclarationScriptElement:
DeclarationElement
AmbientModuleDeclaration
DeclarationElement:
InterfaceDeclaration
TypeAliasDeclaration
NamespaceDeclaration
AmbientDeclaration
ImportAliasDeclaration
ImplementationModule:
ImplementationModuleElementsopt
ImplementationModuleElements:
ImplementationModuleElement
ImplementationModuleElements ImplementationModuleElement
ImplementationModuleElement:
ImplementationElement
ImportDeclaration
ImportAliasDeclaration
ImportRequireDeclaration
ExportImplementationElement
ExportDefaultImplementationElement
ExportListDeclaration
ExportAssignment
DeclarationModule:
DeclarationModuleElementsopt
DeclarationModuleElements:
DeclarationModuleElement
DeclarationModuleElements DeclarationModuleElement
DeclarationModuleElement:
DeclarationElement
ImportDeclaration
ImportAliasDeclaration
ExportDeclarationElement
ExportDefaultDeclarationElement
ExportListDeclaration
ExportAssignment
ImportRequireDeclaration:
'import' BindingIdentifier '=' 'require' '(' StringLiteral ')' ';'
ExportImplementationElement:
'export' VariableStatement
'export' LexicalDeclaration
'export' FunctionDeclaration
'export' GeneratorDeclaration
'export' ClassDeclaration
'export' InterfaceDeclaration
'export' TypeAliasDeclaration
'export' EnumDeclaration
'export' NamespaceDeclaration
'export' AmbientDeclaration
'export' ImportAliasDeclaration
ExportDeclarationElement:
'export' InterfaceDeclaration
'export' TypeAliasDeclaration
'export' AmbientDeclaration
'export' ImportAliasDeclaration
ExportDefaultImplementationElement:
'export' 'default' FunctionDeclaration
'export' 'default' GeneratorDeclaration
'export' 'default' ClassDeclaration
'export' 'default' AssignmentExpression ';'
ExportDefaultDeclarationElement:
'export' 'default' AmbientFunctionDeclaration
'export' 'default' AmbientClassDeclaration
'export' 'default' IdentifierReference ';'
ExportListDeclaration:
'export' '*' FromClause ';'
'export' ExportClause FromClause ';'
'export' ExportClause ';'
ExportAssignment:
'export' '=' IdentifierReference ';'
A.10 Ambients
AmbientDeclaration:
'declare' AmbientVariableDeclaration
'declare' AmbientFunctionDeclaration
'declare' AmbientClassDeclaration
'declare' AmbientEnumDeclaration
'declare' AmbientNamespaceDeclaration
AmbientVariableDeclaration:
'var' AmbientBindingList ';'
'let' AmbientBindingList ';'
'const' AmbientBindingList ';'
AmbientBindingList:
AmbientBinding
AmbientBindingList ',' AmbientBinding
AmbientBinding:
BindingIdentifier TypeAnnotationopt
AmbientFunctionDeclaration:
'function' BindingIdentifier CallSignature ';'
AmbientClassDeclaration:
'class' BindingIdentifier TypeParametersopt ClassHeritage '{' AmbientClassBody '}'
AmbientClassBody:
AmbientClassBodyElementsopt
AmbientClassBodyElements:
AmbientClassBodyElement
AmbientClassBodyElements AmbientClassBodyElement
AmbientClassBodyElement:
AmbientConstructorDeclaration
AmbientPropertyMemberDeclaration
IndexSignature
AmbientConstructorDeclaration:
'constructor' '(' ParameterListopt ')' ';'
AmbientPropertyMemberDeclaration:
AccessibilityModifieropt 'static'opt PropertyName TypeAnnotationopt ';'
AccessibilityModifieropt 'static'opt PropertyName CallSignature ';'
AmbientEnumDeclaration:
EnumDeclaration
AmbientNamespaceDeclaration:
'namespace' IdentifierPath '{' AmbientNamespaceBody '}'
AmbientNamespaceBody:
AmbientNamespaceElementsopt
AmbientNamespaceElements:
AmbientNamespaceElement
AmbientNamespaceElements AmbientNamespaceElement
AmbientNamespaceElement:
'export'opt AmbientVariableDeclaration
'export'opt AmbientLexicalDeclaration
'export'opt AmbientFunctionDeclaration
'export'opt AmbientClassDeclaration
'export'opt InterfaceDeclaration
'export'opt AmbientEnumDeclaration
'export'opt AmbientNamespaceDeclaration
'export'opt ImportAliasDeclaration
AmbientModuleDeclaration:
'declare' 'module' StringLiteral '{' DeclarationModule '}'
| true |
5b93c154936cf66167fbc4fa376a7bbb7ff1bf08 | Python | DeepanshuAhuja/imdb | /tests/test_data.py | UTF-8 | 5,046 | 2.90625 | 3 | [] | no_license | # from util.setup import Setup
from selenium import webdriver
import pytest
from selenium.webdriver.support.select import Select
from actions.helper import Helper
@pytest.fixture(scope="session")
def open_browser():
"""
Validate that site opens or not.
:return: driver
"""
driver = webdriver.Chrome("/Users/deepanshu.ahuja/Documents/chromedriver")
driver.get("https://www.imdb.com/chart/top/?ref_=nv_mv_250")
return driver
def test_ranking(open_browser,request):
"""
Validate that top 250 movies are correctly sorted with their ranks
Description :- Preparing the golden data
parameters:
open_browser = driver
request = For caching the golden_data
"""
elements = open_browser.find_element_by_xpath("//tbody[@class='lister-list']")
superlist = elements.text.split("\n")
flag=0
for i in range(len(superlist)):
tmp=int(superlist[i].split(".")[0])
if (tmp != i + 1):
flag=1
break
dict1=Helper().super_data(superlist)
request.config.cache.set("golden_data", dict1)
Helper().write_into_file("target/ranking.txt",superlist)
assert (flag==0),"The list is not sorted with the ranking type"
def test_year(open_browser,request):
"""
Validate that top 250 movies are correctly sorted with the release date.
Description: In this we are validating that movies are sorted with the descending order and
also comparing with the golden data that movie name and imdb rating would be correct.
parameters:-
open_browser = driver
request = For caching the golden_data
"""
dict1 = request.config.cache.get("golden_data", None)
dropdown = Select(open_browser.find_element_by_id("lister-sort-by-options"))
dropdown.select_by_index(2)
elements = open_browser.find_element_by_xpath("//tbody[@class='lister-list']")
superlist = elements.text.split("\n")
flag = 0
for i in range(len(superlist)):
if (i != len(superlist) - 1):
if (int(superlist[i].split("(")[1].split(")")[0]) >= int(superlist[i + 1].split("(")[1].split(")")[0])):
if (dict1[str(superlist[i].split(".")[0])][0] == superlist[i].split(".")[1].split(" (")[0].strip() and
dict1[str(superlist[i].split(".")[0])][2] == float(superlist[i].split("(")[1].split(")")[1])):
continue
else:
flag = 1
break
else:
if (int(superlist[i - 1].split("(")[1].split(")")[0]) >= int(superlist[i].split("(")[1].split(")")[0])):
if (dict1[str(superlist[i].split(".")[0])][0] == superlist[i].split(".")[1].split(" (")[0].strip() and
dict1[str(superlist[0].split(".")[0])][2] == float(superlist[i].split("(")[1].split(")")[1])):
continue
else:
flag = 1
break
Helper().write_into_file("target/year.txt",superlist)
assert (flag==0),"Top 250 movies are not correctly sorted with the release date"
def test_imdb_rating(open_browser,request):
"""
Validate that top 250 movies are correctly sorted with the imdb ratings.
Description: In this we are validating that movies are sorted with the descending order and
also comparing with the golden data that movie name and release date would be correct.
parameters:-
open_browser = driver
request = For caching the golden_data
"""
dict1 = request.config.cache.get("golden_data", None)
dropdown = Select(open_browser.find_element_by_id("lister-sort-by-options"))
dropdown.select_by_index(1)
elements = open_browser.find_element_by_xpath("//tbody[@class='lister-list']")
superlist = elements.text.split("\n")
flag = 0
for i in range(len(superlist)):
if (i != len(superlist) - 1):
if (float(superlist[i].split("(")[1].split(")")[1]) >= float(superlist[i + 1].split("(")[1].split(")")[1])):
if (dict1[str(superlist[0].split(".")[0])][0] == superlist[i].split(".")[1].split(" (")[0].strip() and
dict1[str(superlist[0].split(".")[0])][1] == int(superlist[i].split("(")[1].split(")")[0])):
continue
else:
flag = 1
break
else:
if (float(superlist[i - 1].split("(")[1].split(")")[1]) >= float(superlist[i].split("(")[1].split(")")[1])):
if (dict1[str(superlist[0].split(".")[0])][0] == superlist[i].split(".")[1].split(" (")[0].strip() and
dict1[str(superlist[0].split(".")[0])][1] == int(superlist[i].split("(")[1].split(")")[0])):
continue
else:
flag = 1
break
Helper().write_into_file("target/imdbratings.txt",superlist)
assert (flag==0),"Top 250 movies are not correctly sorted with the imdb ratings"
| true |
5e09218f55f24a066529fc0e0f3400ad4ef0c80b | Python | krishnakumar98/krishna | /chk.py | UTF-8 | 157 | 3.375 | 3 | [] | no_license | n=int(input("Enter the n"))
s=int(input("Enter the s"))
c=1
for i in range(0,n):
a=int(input())
if(s==a):
c=0
if(c==1):
print("no")
else:
print("yes")
| true |
4d385785d90cb8845335d9489b52081e5cfbc503 | Python | analyticalmindsltd/smote_variants | /smote_variants/base/_metrics.py | UTF-8 | 6,519 | 2.6875 | 3 | [
"MIT"
] | permissive | """
This module implements some imbalanced classification metrics.
"""
import numpy as np
from sklearn.metrics import roc_auc_score, log_loss
__all__ = ['prediction_labels',
'calculate_atoms',
'calculate_label_scores',
'calculate_prob_scores',
'calculate_all_scores',
'all_scores']
all_scores = ['acc', 'sens', 'spec', 'ppv', 'npv', 'fpr', 'fdr',
'fnr', 'bacc', 'gacc', 'f1', 'mcc', 'l', 'ltp', 'lfp', 'lfn',
'ltn', 'lp', 'ln', 'uc', 'informedness', 'markedness', 'p_top20',
'brier', 'log_loss', 'auc']
def prediction_labels(probabilities_maj):
"""
Determine the labels from the probabilities.
Args:
probabilities_maj (np.array): the majority probabilities
Returns:
np.array: the labels row-by-row
"""
labels = (probabilities_maj > 0.5) * 1
equals = probabilities_maj == 0.5
indices = np.where(equals)[0]
if len(indices) <= 1:
return labels
half = int(len(indices)/2)
labels[indices[:half]] = 0
labels[indices[half:]] = 1
return labels
def calculate_atoms(test_labels, predicted_labels, min_label=1):
"""
Calculate the atoms used for the measures.
Args:
test_labels (np.array): the true labels
predicted_labels (np.array): the predicted labels
min_label (int): the minority label
Returns:
dict: the atoms
"""
atoms = {}
equals = np.equal(test_labels, predicted_labels)
not_equals = np.logical_not(equals)
min_sample = test_labels == min_label
maj_sample = np.logical_not(min_sample)
atoms['tp'] = int(np.sum(np.logical_and(equals, min_sample)))
atoms['tn'] = int(np.sum(np.logical_and(equals, maj_sample)))
atoms['fp'] = int(np.sum(np.logical_and(not_equals, maj_sample)))
atoms['fn'] = int(np.sum(np.logical_and(not_equals, min_sample)))
return atoms
def _log_score(multiplier, value):
"""
Calculates a log score and returs None if not computable.
Args:
multiplier (float): the multiplier
value (float): the value to take the log of
Returns:
float: the score
"""
if value > 0:
log_value = np.log(value)
else:
log_value = np.nan
if not np.isfinite(log_value):
return None
return float(multiplier * log_value)
def _log_score_div(numerator, denominator):
"""
Calculates a log score and returs None if not computable.
Args:
nominator (float): the nominator
denominator (float): the denominator
Returns:
float: the score
"""
if denominator > 0:
return _log_score(numerator, numerator / denominator)
return None
def calculate_label_scores(atoms):
"""
Calculate scores from labels.
Args:
atoms (dict): the atomic scores
Returns:
dict: the label scores
"""
atoms['p'] = atoms['tp'] + atoms['fn']
atoms['n'] = atoms['fp'] + atoms['tn']
atoms['acc'] = (atoms['tp'] + atoms['tn']) / (atoms['p'] + atoms['n'])
atoms['sens'] = atoms['tp'] / atoms['p']
atoms['spec'] = atoms['tn'] / atoms['n']
if atoms['tp'] + atoms['fp'] > 0:
atoms['ppv'] = atoms['tp'] / (atoms['tp'] + atoms['fp'])
else:
atoms['ppv'] = 0.0
if atoms['tn'] + atoms['fn'] > 0:
atoms['npv'] = atoms['tn'] / (atoms['tn'] + atoms['fn'])
else:
atoms['npv'] = 0.0
atoms['fpr'] = 1.0 - atoms['spec']
atoms['fdr'] = 1.0 - atoms['ppv']
atoms['fnr'] = 1.0 - atoms['sens']
atoms['bacc'] = (atoms['sens'] + atoms['spec'])/2.0
atoms['gacc'] = float(np.sqrt(atoms['sens']*atoms['spec']))
atoms['f1'] = 2 * atoms['tp'] / (2 * atoms['tp'] + atoms['fp'] + atoms['fn'])
tp_fp = (atoms['tp'] + atoms['fp'])
tp_fn = (atoms['tp'] + atoms['fn'])
tn_fp = (atoms['fp'] + atoms['tn'])
tn_fn = (atoms['fn'] + atoms['tn'])
mcc_num = atoms['tp']*atoms['tn'] - atoms['fp']*atoms['fn']
mcc_denom = float(np.prod([tp_fp, tp_fn, tn_fp, tn_fn]))
if mcc_denom == 0:
atoms['mcc'] = None
else:
atoms['mcc'] = float(mcc_num/np.sqrt(mcc_denom))
atoms['l'] = float((atoms['p'] + atoms['n']) * np.log(atoms['p'] + atoms['n']))
atoms['ltp'] = _log_score_div(atoms['tp'], tp_fp * tp_fn)
atoms['lfp'] = _log_score_div(atoms['fp'], tp_fp * tn_fp)
atoms['lfn'] = _log_score_div(atoms['fn'], tp_fn * tn_fn)
atoms['ltn'] = _log_score_div(atoms['tn'], tn_fp * tn_fn)
atoms['lp'] = float(atoms['p'] * np.log(atoms['p']/(atoms['p'] + atoms['n'])))
atoms['ln'] = float(atoms['n'] * np.log(atoms['n']/(atoms['p'] + atoms['n'])))
items = [atoms['ltp'], atoms['lfp'], atoms['lfn'], atoms['ltn']]
if np.all([item is not None for item in items]):
uc_num = atoms['l'] + np.sum(items)
uc_denom = atoms['l'] + atoms['lp'] + atoms['ln']
atoms['uc'] = uc_num / uc_denom
else:
atoms['uc'] = None
atoms['informedness'] = atoms['sens'] + atoms['spec'] - 1.0
atoms['markedness'] = atoms['ppv'] + atoms['npv'] - 1.0
return atoms
def calculate_prob_scores(test_labels, probabilities, min_label=1):
"""
Calculate scores from probabilities.
Args:
test_labels (np.array): the true labels
probabilities (np.array): the probabilities
min_label (int): the minority label
Returns:
dict: the probability scores
"""
results = {}
thres = max(int(0.2*len(test_labels)), 1)
results['p_top20'] = float(np.sum(test_labels[:thres] == min_label)/thres)
results['brier'] = float(np.mean((probabilities - test_labels)**2))
results['log_loss'] = float(log_loss(test_labels, probabilities))
results['auc'] = float(roc_auc_score(test_labels, probabilities))
return results
def calculate_all_scores(test_labels, probabilities, min_label=1):
"""
Calculate all scores.
Args:
test_labels (np.array): the true labels
probabilities (np.array): the probabilities
min_label (int): the minority label
Returns:
dict: all scores
"""
pred_labels = prediction_labels(probabilities)
results = calculate_atoms(test_labels, pred_labels, min_label)
results = calculate_label_scores(results)
results = {**results, **calculate_prob_scores(test_labels,
probabilities,
min_label)}
return results
| true |
8212441bd3ac519b43a0b6d7615dffccd35852cb | Python | albertyumol/kaizend | /session-5/challenge_5.py | UTF-8 | 1,526 | 3.015625 | 3 | [] | no_license | import random
import requests
from IPython import embed
from time import sleep
from bs4 import BeautifulSoup
BASE_URL = "https://sample-target-bucket-with-html-pages.s3-ap-southeast-1.amazonaws.com/group2/index.html"
base1 = "https://sample-target-bucket-with-html-pages.s3-ap-southeast-1.amazonaws.com"
def debug_input_output(function):
def wrapper(*args, **kwargs):
print(f'[START: {function.__name__}]')
output = function(*args, **kwargs)
print(f'[END: {function.__name__}]')
return output
return wrapper
@debug_input_output
def delay(seconds):
print(f"Sleeping for {seconds} second(s)")
sleep(seconds)
def get_random_number():
return random.randint(1,3)
def extract_html_content(target_url):
# print(f'Downloading HTML content of {target_url}')
response = requests.get(target_url)
return response.text
# @debug_input_output
def extract_target_value_from_page(html_string):
soup = BeautifulSoup(html_string, 'html.parser')
l_elements = soup.find_all('a')
a = []
for i in l_elements:
a += [i.get('href')]
return a
def extract_target_value_from_page1(html_string):
soup1 = BeautifulSoup(html_string, 'html.parser')
div_elements = soup1.find('div')
return div_elements.get_text()
def main():
htmls = extract_html_content(BASE_URL)
links = extract_target_value_from_page(htmls)
for page in links:
target_page = base1 + page
print(target_page)
if __name__ == "__main__":
main()
| true |
5efd4b02de6fa435c075201b248c393e0fba57ae | Python | sohammistri/RealTimeObjectDetection | /maskNoMaskcapture.py | UTF-8 | 1,146 | 2.515625 | 3 | [] | no_license | from imageCapture import captureHelper
import sys
import os
import shutil
target_dir = './Tensorflow/workspace/images/allImages'
mask_dir = 'masked'
no_mask_dir = 'nonMasked'
mask_path = os.path.join(target_dir, mask_dir)
non_mask_path = os.path.join(target_dir,no_mask_dir)
try:
os.makedirs(mask_path, exist_ok=True)
os.makedirs(non_mask_path,exist_ok=True)
print("Directories made")
except OSError as error:
print("Some error ocurred, cannot create directories")
sys.exit()
print("Capturing images without mask(Press space to capture image and Esc to exit)")
ret = captureHelper(non_mask_path)
if ret==0:
print("Some Error, please try again")
shutil.rmtree(mask_path)
shutil.rmtree(non_mask_path)
sys.exit()
print("Now please press Y/y if you are ready to take snaps for masked image:")
res = input()
if res!='Y' and res!='y':
print("Invalid response")
shutil.rmtree(mask_path)
shutil.rmtree(non_mask_path)
sys.exit()
ret = captureHelper(mask_path)
if ret == 0:
print("Some Error, please try again")
shutil.rmtree(mask_path)
shutil.rmtree(non_mask_path)
sys.exit()
| true |
e140c5c9b0bb56a3081a9d8c84f0cc8b81b335aa | Python | joaovictorino/calculadora | /main.py | UTF-8 | 723 | 3.453125 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
from calculadora import Calculadora
if __name__ == '__main__':
print("Informe as operações habilitadas (+) soma (-) subtração (*) multiplicação (/) divisão:")
operadores = input()
continuar = "S"
teste = Calculadora(operadores)
while continuar == "S":
print("Informe valor 1:")
valor1 = int(input())
print("Informe valor 2:")
valor2 = int(input())
print("Informe a operação:")
operador = input()
resultado = teste.calcular(valor1, valor2, operador)
print("Resultado: " + str(resultado))
print("Desejar continuar? S/N")
continuar = input()
| true |
862af09cf16b8dcddb25bb37df6d7c181e9975a7 | Python | NanoporeAna/EEGBaseDL | /data/load_delta_data10.py | UTF-8 | 4,474 | 2.875 | 3 | [] | no_license | import scipy.io as sio
import numpy as np
"""
alpha_data(num)表示去第num个经过alpha滤波的数据
return 对应mat数据集的batch
"""
## 本来是想将每个文件的路径放到一个txt文件中,通过列表提取每行赋的路径,但一直报错,'' 与""的问题--已解决
with open("H:/SpaceWork/EEG_Work/Delta_path10") as file_object:
lines = file_object.readlines() # 从文件中读取每一行,将获取的内容放到list里
delta_path = []
for line in lines:
delta_path.append(line.strip()) # 将每行地址追加到一个数组里
#
###怎么将mat数据分成单个500*9的数据矩阵,将128*9矩阵放到一个batch里##
def delta_data(num):
test_batch = []
load_data0 = sio.loadmat(delta_path[num]) # 鍔犺浇mat鏂囦欢
load_matrix = load_data0['data'] # 鎻愬彇鍑鸿鏁版嵁
shape = load_matrix.shape[0]
for i in range(0, int(shape/5000)):#存储第一个人的数据,将其作为测试集
batch = load_matrix[i * 5000:(i + 1) * 5000] # 取500*9的数据矩阵
test_batch.append(batch) # 取得的矩阵追加到list里
test_batch = np.array(test_batch)
return test_batch
def load_delta_data(path,count):
#path 表示第几个实验者,count表示要处理的通道
load_data = sio.loadmat(delta_path[path])
load_matrix = load_data['data']
shape = load_matrix.shape[0]
delta_data = load_matrix[0:shape,count]
data =np.array(delta_data)
return data
def load_delta_data_pool1(path,count):
#conv2也可以调用这个框架
#path 表示第几个实验者,count表示要处理的通道
temp = []
load_data = sio.loadmat(delta_path[path])
load_matrix = load_data['data']
shape = load_matrix.shape[0]
delta_data = load_matrix[0:shape,count]
for i in range(0,int(shape/2)-1):
tem = (delta_data[i*2]+delta_data[i*2+1]+delta_data[i*2+2])/3
temp.append(tem)
temp.append(delta_data[shape-1])
data =np.array(temp)
return data
def load_delta_data_pool2(path,count):
#path 表示第几个实验者,count表示要处理的通道
temp = []
load_data = sio.loadmat(delta_path[path])
load_matrix = load_data['data']
shape = load_matrix.shape[0]
delta_data = load_matrix[0:shape,count]
for i in range(0,int(shape/4)-1):
tem = (delta_data[i*4]+delta_data[i*4+1]+delta_data[i*4+2]+delta_data[i*4+3]+delta_data[i*4+4])/5
temp.append(tem)
temp.append(delta_data[shape-1])
data =np.array(temp)
return data
def load_delta_data_pool3(path,count):
# path 表示第几个实验者,count表示要处理的通道
temp = []
load_data = sio.loadmat(delta_path[path])
load_matrix = load_data['data']
shape = load_matrix.shape[0]
alpha_data = load_matrix[0:shape, count]
for i in range(0, int(shape / 8) - 1):
tem = (alpha_data[i * 8] + alpha_data[i * 8 + 1] + alpha_data[i * 8 + 2] + alpha_data[i * 8 + 3] + alpha_data[
i * 8 + 4] + alpha_data[i * 8 + 5] + alpha_data[i * 8 + 6] + alpha_data[i * 8 + 7] + alpha_data[
i * 8 + 8]) / 9
temp.append(tem)
temp.append(alpha_data[shape - 1])
data = np.array(temp)
return data
def load_delta_data_pool4(path,count):
# path 表示第几个实验者,count表示要处理的通道
temp = []
load_data = sio.loadmat(delta_path[path])
load_matrix = load_data['data']
shape = load_matrix.shape[0]
delta_data = load_matrix[0:shape, count]
flg = int(shape / 5000)
for j in range(0, flg):
for i in range(0, 312):
tem = (delta_data[i * 16] + delta_data[i * 16 + 1] + delta_data[i * 16 + 2] + delta_data[i * 16 + 3] +
delta_data[i * 16 + 4] + delta_data[i * 16 + 5] + delta_data[i * 16 + 6] + delta_data[i * 16 + 7] +
delta_data[i * 16 + 8] + delta_data[i * 16 + 9] + delta_data[i * 16 + 10] + delta_data[i * 16 + 11] +
delta_data[i * 16 + 12] + delta_data[i * 16 + 13] +delta_data[i * 16 + 14] + delta_data[i * 16 + 15]) / 16
temp.append(tem)
flag = (delta_data[j * 5000 + 4992] + delta_data[j * 5000 + 4993] + delta_data[j * 5000 + 4994] + delta_data[j * 5000 + 4995]
+delta_data[j * 5000 + 4996] + delta_data[j * 5000 + 4997] + delta_data[j * 5000 + 4998] + delta_data[j * 5000 + 4999]) / 8
temp.append(flag)
data = np.array(temp)
return data
| true |
7b82b2105726ed712ad1795efc2531260b0693f1 | Python | swapnilrupnawar/bmitasks | /bmitask.py | UTF-8 | 1,632 | 3.21875 | 3 | [] | no_license |
import numpy as np
import pandas as pd
data={"Gender": "Male", "HeightCm": 171, "WeightKg": 96 }, { "Gender": "Male", "HeightCm": 161,
"WeightKg": 85 }, { "Gender": "Male", "HeightCm": 180, "WeightKg": 77 },
{ "Gender": "Female", "HeightCm": 166, "WeightKg": 62}, {"Gender": "Female", "HeightCm": 150, "WeightKg": 70}, {"Gender": "Female", "HeightCm": 167, "WeightKg": 82}
data=pd.DataFrame(data)
print(data)
BMI = data['WeightKg']/((data['HeightCm']/100)**2)
type(BMI)
print(BMI)
for i in BMI:
if i< 18.4:
print('underweight')
elif i<24.9:
print('Normal weight')
elif i<29.9:
print('overweight')
elif i<34.9:
print('Moderately obese')
elif i<29.9:
print('Severely obese')
else:
print('very severely obese')
print(data)
data['BMI'] = BMI
print(data)
data['Gender']
data['BMI']
def category(x):
for i in BMI:
if i< 18.4:
print('underweight')
elif i<24.9:
print('Normal weight')
elif i<29.9:
print('overweight')
elif i<34.9:
print('Moderately obese')
elif i<39.9:
print('Severely obese')
else:
print('very severely obese')
category(BMI)
data['overweight']= data['BMI']<29.9
data['underweight']=data['BMI']<18.9
data['Normalweight']=data['BMI']<24.9
data['Moderetaly obese']=data['BMI']<34.9
data['Severely obese']=data['BMI']<39.9
#pd.concat(data['overweight'],data['underweight'],data['Normalweight'],data['Moderetaly obese'],data['Severely obese'])
print(data[data['overweight']>0])
| true |
00c2000d33b954b8d352425a0bdcc2ef6bf1aa47 | Python | mxtdsg/interview | /sortColors.py | UTF-8 | 312 | 3.3125 | 3 | [] | no_license | ####
#
# sort 3 colors 0 1 2 so same colors are adjacent.
#
# leetcode 75.
#
####
def sortColors(nums):
i = j = 0
for k in range(len(nums)):
v = nums[k]
nums[k] = 2
if v < 2:
nums[j] = 1
j += 1
if v == 0:
nums[i] = 0
i += 1 | true |
902b0a4994fcbb5030dae36d4cdb2f94b356ed7b | Python | cfarrow/diffpy.srfit | /diffpy/srfit/sas/sasparameter.py | UTF-8 | 2,442 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
########################################################################
#
# diffpy.srfit by DANSE Diffraction group
# Simon J. L. Billinge
# (c) 2008 Trustees of the Columbia University
# in the City of New York. All rights reserved.
#
# File coded by: Chris Farrow
#
# See AUTHORS.txt for a list of people who contributed.
# See LICENSE.txt for license information.
#
########################################################################
"""SAS profile generator.
The SASGenerator class wraps a sans.models.BaseModel object as a
ProfileGenerator.
"""
__all__ = ["SASParameter"]
from diffpy.srfit.fitbase.parameter import Parameter
class SASParameter(Parameter):
"""Class adapting a sansmodel parameter to srfit Parameter.
Attributes
name -- A name for this Parameter.
const -- A flag indicating whether this is considered a constant.
_value -- The value of the Parameter. Modified with 'setValue'.
value -- Property for 'getValue' and 'setValue'.
constrained -- A flag indicating if the Parameter is constrained
(default False).
bounds -- A 2-list defining the bounds on the Parameter. This can be
used by some optimizers when the Parameter is varied. These
bounds are unrelated to restraints on a Parameter.
_model -- The BaseModel to which the underlying parameter belongs.
_parname -- The name of the underlying BaseModel parameter.
"""
def __init__(self, name, model, parname = None):
"""Create the Parameter.
name -- Name of the Parameter
model -- The BaseModel to which the underlying parameter belongs
parname -- Name of parameter used by the model. If this is None
(default), then name is used.
"""
self._parname = parname or name
val = model.getParam(self._parname)
self._model = model
Parameter.__init__(self, name, val)
return
def getValue(self):
"""Get the value of the Parameter."""
value = self._model.getParam(self._parname)
return value
def setValue(self, value):
"""Set the value of the Parameter."""
if value != self.getValue():
self._model.setParam(self._parname, value)
self.notify()
return self
# End of class SASParameter
| true |
61fb5c4aceb829ee3b2bae554231a09e93306ec2 | Python | GianlucaPal/Python-crash-course | /chap2/name.py | UTF-8 | 338 | 3.71875 | 4 | [] | no_license | name= 'ada love'
print(name.title())
print(name.upper())
print(name.lower())
firstName= ' ada'
lastName= 'lovelace'
fullName=f'{firstName.strip()} {lastName}'
print(fullName)
print(f'\tHello, \n\t{fullName.title()}!')
print(f'Languages:\n\tPython\n\tJavaScript\n\tC')
print(14_22223_323)
x, y, z= 1,2,3
print(f'{x} {y} {z}') | true |
bc98e0e58f6ff2ee451664e61fbc2dfba720f3b3 | Python | CoEDL/elan-helpers | /elan-character-spacer/elan-insert-spaces.py | UTF-8 | 1,110 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/python3
import xml.etree.ElementTree as ET
import glob
import os
from pathlib import Path
def spaceMe(file_):
print(file_)
# Which tier?
tier_name = 'Phrase'
tree = ET.parse(file_)
root = tree.getroot()
for tier in root.iter('TIER'):
if tier.attrib['TIER_ID'] == tier_name:
for annotation in tier.iter('ANNOTATION_VALUE'):
# Get the original text
# OPTION: use this to not end up with double spaces between words
source_text = annotation.text.replace(" ", "")
# OR: use this to have double spaces between words
# source_text = annotation.text
insert_spaces = " ".join(source_text)
# update the annotation
annotation.text = str(insert_spaces)
# feedback
print("done")
# Save the file to output dir
tree.write(os.path.join("output", os.path.basename(file_)))
def main():
for path in Path('./input').rglob('*.eaf'):
spaceMe(path)
if __name__ == "__main__":
main()
| true |
6b4b7fc1a09de5bb72cd8bcb99d43e1691672faa | Python | jvanlier/Python2to3Workshop | /test.py | UTF-8 | 813 | 3.078125 | 3 | [] | no_license | import unittest
from example import hello_world, split_dollar_bills, dont_pass_42, SomeException
class TestExample(unittest.TestCase):
def test_hello_world(self):
assert hello_world() == "hello world!"
def test_split_dollar_bills(self):
self._split_dollar_bill_helper(n_persons=10, n_bills=19, expected=1)
self._split_dollar_bill_helper(n_persons=20, n_bills=41, expected=2)
def _split_dollar_bill_helper(self, n_persons, n_bills, expected):
result = split_dollar_bills(n_persons, n_bills)
self.assertTrue(result == expected,
"Expected {exp} but got {result}".
format(exp=expected, result=result))
def test_dont_pass_42(self):
with self.assertRaises(SomeException):
dont_pass_42(42)
| true |
0015299090fe56c8d6eeefc51e429ff68e1ebfd6 | Python | charlieyyy/News-Cluster-Algorithm | /skr/service/cluster.py | UTF-8 | 4,101 | 2.84375 | 3 | [] | no_license | import datetime
import dateparser
from skr.model.article import Article
from skr.common import algorithm
from skr.model.cluster import Cluster
class ClusterService():
def __init__(self, time_range=None):
"""
ClusterService init
@param time_range: 需要聚类的日期,格式为'2018-08-22'
@type time_range: str of day
@return: None
@rtype: None
"""
if not time_range:
self.time_range = str(datetime.date.today())
else:
self.time_range = time_range
self.base_list = []
def get_day_news(self):
"""
得到当天的news,存入data
@return: None
@rtype: None
"""
start_time = dateparser.parse(self.time_range)
end_time_str = f'{self.time_range} 23:59:59'
end_time = dateparser.parse(end_time_str)
# 如果 self.time_range 是 2018-08-22,那么实际查询的是美国洛杉矶
# `2018-08-21 05:00` ~ `2018-08-22 05:00`(美西,UTC-8)
start_time = start_time + datetime.timedelta(hours=-3)
end_time = end_time + datetime.timedelta(hours=-3)
query = {'date__gte': start_time, 'date__lte': end_time}
article_list = Article.objects(**query)
data = list(article_list)
data = [{'title': d['title'], 'content': d['content'],
'source': d['source'], 'url': d['url'], 'date': d['date']}
for d in data]
return data
def run_cluster(self):
"""
跑算法,存入算法返回的row_data(原始数据), top_cluster(聚类结果),
X(fidf vector), features(关键词)
@return: None
@rtype: None
"""
data = self.get_day_news()
return algorithm.get_cluster_news(data)
def save_to_db(self):
"""
把聚类结构存入数据库
@return: None
@rtype: None
"""
row_data, top_cluster, X, features = self.run_cluster()
for topic in top_cluster:
base_index = topic[0]
base_title = row_data[base_index]['title']
base_url = row_data[base_index]['url']
base_source = row_data[base_index]['source']
base_date = row_data[base_index]['date']
base_content = row_data[base_index]['content']
cluster = Cluster.get_by_topic_url(base_url)
# day_ = base_date.strftime('%Y%m%d')
day_ = dateparser.parse(self.time_range).strftime('%Y%m%d')
if not cluster:
cluster = Cluster(topic={'title': base_title,
'url': base_url,
'source': base_source,
'source_cn': base_source,
'date': base_date
})
cluster.summary = ''
cluster.content = base_content
cluster.type = 'Day'
cluster.parent_id = day_
cluster.tag = []
news_list = []
for item in topic:
titles = row_data[item]['title']
url = row_data[item]['url']
source = row_data[item]['source']
date = row_data[item]['date']
news = {
'title': titles,
'url': url,
'source': source,
'source_cn': source,
'date': date
}
news_list.append(news)
cluster.news = news_list
cluster.news_count = len(topic)
cluster.added = datetime.datetime.now()
cluster.save()
def save_cluster_data():
"""
保存当日聚类数据到数据库
@return: None
@rtype: None
"""
service = ClusterService()
service.save_to_db()
if __name__ == '__main__':
import mongoengine
mongoengine.connect('skr', host='localhost')
save_cluster_data()
| true |
08eecf6da0efbc14beb257db0611e21d4389b619 | Python | Lauren-dot/deargrief | /routes.py | UTF-8 | 4,506 | 2.59375 | 3 | [] | no_license | from flask import request, render_template, url_for, flash, redirect
import random
from deargrief import app, db, Bcrypt
from deargrief.forms import RegistrationForm, LogInForm #, NewJournalForm, NewEntryForm
from deargrief.basicmodel import Bereaved #, JournalEntry
from flask_login import login_user
#Tells the browser what to do when the client requests the "/" (root) page; And! Two routes can be handled by the same function
@app.route("/")
@app.route("/home")
def greeting():
return render_template("home.html") #Tells computer to look in the "template" folder in this directory for a file that matches the file inside the ()
@app.route("/about")
def about():
return render_template("about.html", title="About")
@app.route("/process")
def process():
return render_template("process.html", title="Process")
@app.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
flash("An account already exists for this email; please log in.")
return redirect(url_for("login"))
form = RegistrationForm() #Instance of the Registration Form (check login.py for class)
#Note: I migragted and integrated the guts of my "create_bereaved" function here; it was not connecting consistently when I called it from crud.py
if form.validate_on_submit():
hashed_password = Bcrypt.generate_password_hash(form.password.data).decode("utf-8") #.decode turns this into a string (instead of dealing with bytes)
bereaved = Bereaved(
id=random.uniform(0.1, 10000.1),
firstname=form.firstname.data,
lastname=form.lastname.data,
email=form.email.data,
password=hashed_password,
)
db.session.add(bereaved)
db.session.commit()
# Created the user in the database
flash(f"Welcome {form.firstname.data}! Your account has been created. Please log in!", "success") #creates temp window with message; bootstrap class of message: success
return redirect(url_for("login"))
return render_template("register.html", title="Register", form=form)
@app.route("/login", methods=["GET", "POST"])
def login():
form = LogInForm() #Instance of the Log In Form (check login.py for class)
if form.validate_on_submit():
bereaved = Bereaved.query.filter_by(email=form.email.data).first()
if bereaved and Bcrypt.check_password_hash(bereaved.password, form.password.data):
login_user(bereaved, remember=form.remember.data)
return redirect(url_for("my_account"))
else:
flash("Oh no! That did not work. Please check your email and password.")
return render_template("login.html", title="Log In", form=form)
# @app.route("/my_account")
# @login_required
# def welcome_to_main_account():
# entry = JournalEntry.query.all()
# return render_template("my_account.html", title="Hello, {{ bereaved }}", methods=["GET", "POST"])
# @app.route("/new_journal_registration")
# @login_required
# def register_new_journal():
# form = NewJournalForm()
# if form.validate_on_submit():
# create_deceased()
# flash("Your new grief process has been started. Thank you for taking the next step on your path.", "success")
# return render_template(url_for("my_account"))
# return render_template("new_journal_registration.html", title="New Journal Registration", methods=["GET", "POST"])
# @app.route("/daily_journal_entry", methods=["GET", "POST"])
# @login_required
# def new_entry():
# form = NewEntryForm()
# if form.validate_on_submit():
# # entry = create_journal_entry
# flash("Thank you for making another step on your journey through the grief process.", "success")
# return redirect(url_for("my_account"))
# return render_template("daily_journal_entry.html")
# #To Do
# #Match number of days to database containing the day-by-day prompts
# #Calendar counter corresponds to the index of days of journaling
# #Each calendar counter starts with a unique deceased and bereaved combination
# #return render_template("daily_journal_entry.html", prompts=prompts)
# #when database "prompts" and html are ready, take out the first ) to access the database
# #and be able to pass it to the html file
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for("home")) | true |
7140ca9494eab48da4562bfcd457281c86f3795f | Python | WatchdogeDAO/opendata-backup | /argentina/generate_snapshot.py | UTF-8 | 1,237 | 3.078125 | 3 | [] | no_license | import requests
import csv
import os.path
from os import path
# Checks if an url is a downloadable file
def is_downloadable(url):
h = requests.head(url, allow_redirects=True, verify=False)
header = h.headers
content_type = header.get('content-type')
if 'text' in content_type.lower():
return False
if 'html' in content_type.lower():
return False
return True
# # Leer el CSV con dataset links.
f = open('distribuciones.csv')
reader = csv.reader(f)
# # Select headers and skip fist line for analysis.
headers = next(reader)
# # Poner todos los links de descarga en un solo lugar.
download_links = []
for row in reader:
download_links.append([row[15], row[13]])
# # Crear una nueva sub-carpeta llamada "data".
if not path.exists('new_data'):
os.mkdir('new_data')
# # Descargar todos los datasets a la carpeta hija.
for i, link in enumerate(download_links):
file_name = f'{i}_{link[1]}'
file_path = os.path.join('new_data', file_name)
if not path.exists(file_path):
try:
r = requests.get(link[0], allow_redirects=True, verify=False)
open(file_path, 'wb').write(r.content)
except:
print(f'Could not save url: {link[0]}')
| true |
2280850981b6ebbd5b510b09e0102b24fbac6eb2 | Python | TankOs/donabot | /sopel/donabot.py | UTF-8 | 3,031 | 2.65625 | 3 | [] | no_license | """
Donation Bot module.
Configuration:
[donabot]
mode = <sandbox or live>
client_id = <PayPal client ID>
client_secret = <PayPal client secret>
web_endpoint = <Flask site endpoint (host:port)>
"""
from paypalrestsdk import Api, Payment, ResourceNotFound
import sopel.module
from pprint import pprint
def create_api(config):
return Api({
"mode": config.mode,
"client_id": config.client_id,
"client_secret": config.client_secret,
})
def setup(bot):
pass
@sopel.module.interval(60 * 60 * 24)
def remind_people(bot):
message = (
"*** Like this chat? Want to support it? Type \".donate 5\" " +
"(or any other amount you like)!"
)
bot.msg(bot.config.donabot.channel, message)
@sopel.module.rule(r"\.donate (\d+)")
def donate(bot, trigger):
api = create_api(bot.config.donabot)
currency = "EUR"
amount = float(trigger.group(1))
bot.reply("Just a moment, contacting PayPal...")
return_url = "http://{}/return?nickname={}" \
.format(bot.config.donabot.web_endpoint, bot.nick)
cancel_url = "http://{}/cancel".format(bot.config.donabot.web_endpoint)
payment = Payment({
"intent": "authorize",
"payer": {"payment_method": "paypal"},
"redirect_urls": {
"return_url": return_url,
"cancel_url": cancel_url,
},
"transactions": [
{
"description": "Donation for Stefan Schindler",
"amount": {
"total": amount,
"currency": currency,
},
},
],
}, api=api)
create_result = payment.create()
if create_result is True:
links = [link for link in payment.links if link.method == "REDIRECT"]
if len(links) < 1:
bot.msg(trigger.nick, "An error occured. :-(")
else:
link = links[0]
bot.msg(
trigger.nick,
"Please visit the following URL to authorize the donation: {}" \
.format(link.href),
)
else:
bot.msg(trigger.nick, "Payment couldn't be created.")
@sopel.module.rule(r"DBP-(.+)")
def finish_payment(bot, trigger):
api = create_api(bot.config.donabot)
payment_id = trigger.group(1)
bot.msg(trigger.nick, "Hold on, checking your payment...")
try:
payment = Payment.find(payment_id, api=api)
except ResourceNotFound:
payment = None
if payment is not None:
payer_id = payment["payer"]["payer_info"]["payer_id"]
result = payment.execute({"payer_id": payer_id})
if result is True:
amount = float(payment["transactions"][0]["amount"]["total"])
currency = payment["transactions"][0]["amount"]["currency"]
channel = bot.config.donabot.channel
bot.write(("MODE", channel, "+v", trigger.nick))
bot.msg(
channel,
"*** {} just donated {:.2f} {}!".format(trigger.nick, amount, currency)
)
bot.msg(trigger.nick, "Thank you for your support!")
else:
bot.msg(trigger.nick, "Unable to execute the payment. :-(")
else:
bot.msg(trigger.nick, "I'm sorry, but I can't find your payment. :-(")
| true |
610d6bb1c41e703192e450131870a61d52b609c9 | Python | aodag/python-async-sample | /app.py | UTF-8 | 503 | 2.671875 | 3 | [
"MIT"
] | permissive | # -*- coding:utf-8 -*-
import os
import asyncio
from aiohttp import web
async def greeting():
await asyncio.sleep(1)
return "Hello"
async def handle(request):
name = request.match_info.get('name', "Anonymous")
text = "Hello, " + name
return web.Response(body=text.encode('utf-8'))
def main():
app = web.Application()
app.router.add_route('GET', '/{name}', handle)
port = int(os.environ['PORT'])
web.run_app(app, port=port)
if __name__ == '__main':
main()
| true |
be206de4d4bf847b9ffb07da18af528de5ce2b9f | Python | salceson/mro | /lab2/taskA.py | UTF-8 | 3,233 | 3 | 3 | [] | no_license | # coding=utf-8
from sklearn.decomposition import PCA, KernelPCA
import numpy as np
from matplotlib import pyplot as plt
__author__ = 'Michał Ciołczyk'
def generate_dataset_circles(n):
def generate_circle(n1, r_max, r_min, cls):
n1 = int(n1)
ts = np.random.uniform(0, 2 * np.pi, n1)
rs = np.random.uniform(r_min, r_max, n1)
points = np.array([[rs[i] * np.cos(ts[i]), rs[i] * np.sin(ts[i])] for i in range(n1)])
ys = [cls for _ in range(n1)]
return points, ys
X1, y1 = generate_circle(3 * n / 7, 0, 0.4, 0)
X2, y2 = generate_circle(4 * n / 7, 0.4, 1, 1)
return np.concatenate((X1, X2)), np.concatenate((y1, y2)).ravel()
def generate_cross_shape_dataset(n):
xs = np.random.uniform(-1, 1, n)
drags = np.random.uniform(-0.1, 0.1, n)
n_over_2 = int(n/2)
X1 = np.array([[xs[i], xs[i] + drags[i]] for i in range(n_over_2)])
X2 = np.array([[xs[i], -xs[i] + drags[i]] for i in range(n_over_2, n)])
X = np.concatenate((X1, X2))
y1 = [0 for _ in range(n_over_2)]
y2 = [1 for _ in range(n_over_2, n)]
y = np.concatenate((y1, y2)).ravel()
return X, y
def draw_dataset(X, y, ax, description):
ax.set_title(description)
colors = {0: 'r', 1: 'b'}
for i in range(2):
idx = np.where(y == i)
ax.scatter(X[idx, 0], X[idx, 1], c=colors[i])
def process_dataset(X, y, ax, pca=False, kernel=None, gamma=None, description=''):
if not pca:
pca_obj = PCA(2)
draw_dataset(X, y, ax, description)
pca_obj = pca_obj.fit(X)
mean = pca_obj.mean_
components = pca_obj.components_
variances = pca_obj.explained_variance_ratio_
vec1 = [mean[i] + components[0][i] for i in range(len(mean))]
vec2 = [mean[i] + components[1][i] for i in range(len(mean))]
ax.arrow(mean[0], mean[1], 2 * vec1[0] * variances[0], 2 * vec1[1] * variances[0], fc='y', ec='y')
ax.arrow(mean[0], mean[1], 2 * vec2[0] * variances[1], 2 * vec2[1] * variances[1], fc='y', ec='y')
else:
kwargs = {'gamma': gamma} if gamma else {}
pca_obj = KernelPCA(2, kernel=kernel, **kwargs) if kernel else PCA(2)
X_transformed = pca_obj.fit_transform(X)
draw_dataset(X_transformed, y, ax, description)
if __name__ == '__main__':
X1, y1 = generate_dataset_circles(500)
fig, (row1, row2) = plt.subplots(2, 4, figsize=(15, 15), dpi=80)
(ax1, ax2, ax3, ax4) = row1
process_dataset(X1, y1, ax1, description='Set 1 (no PCA)')
process_dataset(X1, y1, ax2, pca=True, description='Set 1 (Linear PCA)')
process_dataset(X1, y1, ax3, pca=True, kernel='cosine', description='Set 1 (cosine PCA)')
process_dataset(X1, y1, ax4, pca=True, kernel='rbf', gamma=3, description='Set 1 (rbf PCA)')
X2, y2 = generate_cross_shape_dataset(500)
(ax1, ax2, ax3, ax4) = row2
process_dataset(X2, y2, ax1, description='Set 2 (no PCA)')
process_dataset(X2, y2, ax2, pca=True, description='Set 2 (Linear PCA)')
process_dataset(X2, y2, ax3, pca=True, kernel='cosine', description='Set 2 (cosine PCA)')
process_dataset(X2, y2, ax4, pca=True, kernel='rbf', gamma=3, description='Set 2 (rbf PCA)')
plt.savefig('taskA.png')
plt.show()
| true |
112b880034d691339eb1d5eb8fd1d8cbd54ffee6 | Python | deepabalan/googles_python_class | /dict_and_files/1.py | UTF-8 | 216 | 2.96875 | 3 | [] | no_license |
dict = {}
dict['a'] = 'alpha'
dict['g'] = 'gamma'
dict['o'] = 'omega'
print dict
print dict['a']
dict['a'] = 6
print dict
print 'a' in dict
#print dict['z']
if 'z' in dict:
print dict['z']
print dict.get('z')
| true |
88f5c5588b3230ddf4838394af933c983ccfd0da | Python | yuchen-he/algorithm016 | /leetcode/editor/cn/[103]二叉树的锯齿形层序遍历.py | UTF-8 | 1,532 | 3.84375 | 4 | [] | no_license | # 给定一个二叉树,返回其节点值的锯齿形层序遍历。(即先从左往右,再从右往左进行下一层遍历,以此类推,层与层之间交替进行)。
#
# 例如:
# 给定二叉树 [3,9,20,null,null,15,7],
#
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
# 返回锯齿形层序遍历如下:
#
#
# [
# [3],
# [20,9],
# [15,7]
# ]
#
# Related Topics 栈 树 广度优先搜索
# 👍 396 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
"""
解法一: BFS,计算层数count,通过判断count的奇偶性来决定往queue中添加顺序(从左添加还是从右添加)
"""
if not root: return []
res = []
q = collections.deque()
q.append(root)
count = 0
while q:
layer = []
for _ in range(len(q)):
pre = q.popleft()
layer.append(pre.val)
if pre.left: q.append(pre.left)
if pre.right: q.append(pre.right)
if count % 2 == 0: layer.reverse()
res.append(layer)
count += 1
return res
# leetcode submit region end(Prohibit modification and deletion)
| true |
c06e70d2ebb007d393431c0af5d56f293ecd7511 | Python | omarSuarezRodriguez/Python | /Portafolio/programasInterfazGrafica/guiProgram_15.py | UTF-8 | 1,445 | 3.140625 | 3 | [] | no_license | from tkinter import *
from programasInterfazGrafica import guiMetodos
class frame1(Frame):
def __init__(self, miVentana):
Frame.__init__(self, miVentana)
self.label_1 = Label(miVentana, text="Número 1:")
self.box_1 = Entry(miVentana)
self.label_2 = Label(miVentana, text="Número 2:")
self.box_2 = Entry(miVentana)
self.boton_1 = Button(miVentana, text="Sumar", width=20, command=self.clicked)
self.label_3 = Label(miVentana, text="Resultado:")
self.box_3 = Entry(miVentana)
#Prueb
self.label_1.grid(row=0, column=0, padx=5, pady=5)
self.box_1.grid(row=0, column=1, padx=5, pady=5)
self.label_2.grid(row=1, column=0, padx=5, pady=5)
self.box_2.grid(row=1, column=1, padx=5, pady=5)
self.boton_1.grid(row=2, column=0, padx=5, pady=5, columnspan=2) # Se posiciona en dos columnas
self.label_3.grid(row=3, column=0, padx=5, pady=5)
self.box_3.grid(row=3, column=1, padx=5, pady=5)
self.box_1.focus()
self.box_3["state"] = "disabled"
def clicked(self):
resultado = int(self.box_1.get()) + int(self.box_2.get())
self.box_3.delete(0, END)
self.box_3["state"] = "normal"
self.box_3.insert(INSERT, resultado)
ventana = Tk()
ventana.title("Programa 15")
frame_1 = frame1(ventana)
frame_1.grid(row=0, column=0)
guiMetodos.centrar(ventana)
ventana.mainloop()
| true |
237c3ea49632535099afd740e10b667faa5d6782 | Python | AayushSugandhi0203/Scheduling-Algorithms-in-Python | /nonprepprio.py | UTF-8 | 2,897 | 2.65625 | 3 | [] | no_license | import pandas
datContent = [read.strip().split() for read in open("inputnon_premp_prio.dat").readlines()]
n = int(datContent[0][0])
j =1
arrival = []
burst = []
arrival2=[]
burst2=[]
process= []
priority = []
priority2=[]
for i in range(0,n):
a ,b,c = datContent[j][0] , datContent[j][1], datContent[j][2]
a = int(a)
b = int(b)
c = int(c)
arrival.append(a)
arrival2.append(a)
burst.append(b)
burst2.append(b)
priority.append(c)
priority2.append(c)
j = j+1
quantum = int(datContent[n + 1][0])
queue = int(datContent[n + 2][0])
for i in range(1,n+1):
process.append(i)
quantum = 1
zipp = zip(process,arrival,burst,priority)
tag = sorted((zipp),key=lambda x: (x[1],x[3]))
arr2 = []
bur2 = []
proc2 =[]
prio2= []
for i in tag:
arr2.append(i[1])
bur2.append(i[2])
proc2.append(i[0])
prio2.append(i[3])
progress = []
time = min(arrival)
temp_arr= []
temp_bur = []
temp_prio = []
exit_time = []
p =n
while(len(progress)!= sum(burst)):
temp_arr = []
temp_bur = []
temp_prio = []
for i in range(0,len(arr2)):
if arr2[i] <= time:
temp_arr.append(arr2[i])
for j in range(0,len(temp_arr)):
temp_bur.append(bur2[j])
temp_prio.append(prio2[j])
for j in range(0, len(temp_bur)):
if temp_bur[j]==0:
temp_prio[j]=1000
min_prio = min(temp_prio)
ind = temp_prio.index(min_prio)
while(bur2[ind]!=0):
ind = temp_prio.index(min_prio)
bur2[ind] = bur2[ind] - 1
progress.append(proc2[ind])
time = time + 1
print("Gantt Chart is(no. show the process no.)",progress)
rev_progress = progress[::-1]
print("Order in which following process outputs are written",end =' ')
for i in range(0,n):
print(i+1,end=' ')
print(" ")
counter_exit = []
counter_start = []
for j in range(1,n+1):
a = rev_progress.index(j)
b = progress.index(j)
counter_exit.append(len(progress)-a)
counter_start.append(b)
print("Completion Time",counter_exit)
# Turnaround Time = Exit time - Arrival Time
turn_around = []
for j in range(0,n):
turn_around.append(counter_exit[j]- arrival[j])
print("Turn-Around",turn_around)
# Waiting Time
wait_time = []
for j in range(0,n):
wait_time.append(turn_around[j]- burst[j])
print("Waiting-Time",wait_time)
#Response Time = Start Time - Arrival Time
print("Start-time",counter_start)
response_time = []
for j in range(0,n):
response_time.append(counter_start[j] - arrival[j])
print("Response Time",response_time)
print("Avg Turn-Around",sum(turn_around)/n)
print("Avg Waiting",sum(wait_time)/n)
print("Avg Response",sum(response_time)/n)
| true |
7fd6a42dfe770502c9929a3bb6918ce5c2f65c7d | Python | dongttang/baekjoon_algorithm | /1075.py | UTF-8 | 221 | 3.28125 | 3 | [] | no_license | def calc(n, f):
base_num = n - n % 100
for i in range(0, f):
target_num = base_num + i
if target_num % f == 0:
return i
n = int(input())
f = int(input())
print("%02d" % calc(n, f))
| true |
b3e1bf3962225e9f0dc7a27ee4f8e659067f1425 | Python | Cadene/im2recipe | /pyscripts/plotcurve.py | UTF-8 | 4,542 | 2.75 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import os
import sys
import csv
from params import get_parser
def parseLossLog(filename,params):
if params.nlosscurves == 1:
train_losses = []
val_losses = []
else:
train_losses = {'cos':[],'class_im':[],'class_rec':[],'total':[]}
val_losses = {'cos':[],'class_im':[],'class_rec':[],'total':[]}
totaltime = 0
with open(filename, 'rU') as csvfile:
lines = csv.reader( csvfile, delimiter='\n')
for i,line in enumerate(lines):
if len(line)>0: # if line is not empty
line = line[0]
if 'loss' in line and not 'val' in line and not 'Val' in line:
line = line.split('\t')
cos_loss = line[0]
if params.nlosscurves==1:
train_losses.append(float(cos_loss.split(': ')[1]))
else:
cls_loss1 = line[1]
cls_loss2 = line[2]
total = line[3]
train_losses['cos'].append(float(cos_loss.split(': ')[1]))
train_losses['class_im'].append(float(cls_loss1.split(': ')[1]))
train_losses['class_rec'].append(float(cls_loss2.split(': ')[1]))
train_losses['total'].append(float(total.split(': ')[1]))
elif ('(val)' in line or 'Val' in line) and not 'valfreq' in line:
line = line.split('\t')
cos_loss = line[0]
if params.nlosscurves==1:
val_losses.append(float(cos_loss.split(': ')[1]))
else:
cls_loss1 = line[1]
cls_loss2 = line[2]
total = line[3]
val_losses['cos'].append(float(cos_loss.split(': ')[1]))
val_losses['class_im'].append(float(cls_loss1.split(': ')[1]))
val_losses['class_rec'].append(float(cls_loss2.split(': ')[1]))
val_losses['total'].append(float(total.split(': ')[1]))
elif 'Time' in line:
time = line.split('Time:')[-1].split(' ')[0]
totaltime+=float(time)
print "Running time:",totaltime,'seconds:'
h = totaltime/3600
m = totaltime%3600/60
print int(h), 'hours and',int(m), 'minutes.'
return train_losses,val_losses
if __name__ == "__main__":
parser = get_parser()
params = parser.parse_args()
filename = params.logfile
train_losses,val_losses = parseLossLog(filename,params)
fs = 9
if params.nlosscurves==1:
t_train = range(params.dispfreq,len(train_losses)*params.dispfreq+1,params.dispfreq)
t_val = range(params.valfreq,len(val_losses)*params.valfreq+1,params.valfreq) # validation loss is displayed after each epoch only
plt.plot(t_train,train_losses,'b-*',label='loss (t)')
plt.plot(t_val,val_losses,'r-*',label='loss (v)')
else:
t_train = range(params.dispfreq,len(train_losses['cos'])*params.dispfreq+1,params.dispfreq)
fig, axarr = plt.subplots(1,3)
t_val = range(params.valfreq,len(val_losses['cos'])*params.valfreq+1,params.valfreq)
axarr[0].plot(t_train,train_losses['cos'],'b-*',label='Cos loss (t)')
axarr[0].plot(t_val,val_losses['cos'],'r-*',label='Cos loss (v)')
#axarr[0].set_xticklabels(t_train, fontsize=fs,rotation=90)
axarr[1].plot(t_train,train_losses['class_im'],'y-*',label='Cls-im loss (t)')
axarr[1].plot(t_val,val_losses['class_im'],'g-*',label='Cls-im loss (v)')
axarr[1].plot(t_train,train_losses['class_rec'],'b-*',label='Cls-rec loss (t)')
axarr[1].plot(t_val,val_losses['class_rec'],'r-*',label='Cls-rec loss (v)')
axarr[2].plot(t_train,train_losses['total'],'b-*',label='Total (t)')
axarr[2].plot(t_val,val_losses['total'],'r-*',label='Total (v)')
axarr[0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
axarr[1].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
axarr[2].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
plt.show()
| true |
d36593271b524a3feea2fd0437bcdaa59bdc7c70 | Python | Ajay2521/Python | /ADVANCE/Exception/raise.py | UTF-8 | 515 | 4.34375 | 4 | [] | no_license | # Lets see about "Exception handling" in python.
# raise = Used to raise an exception forcefully in python.
# synatx for raise is :
# raise Exception_class,value
# Here is the program for "raise" in python.
try:
age = int( input("\nEnter age : ") )
if(age < 18):
# calling raise
raise ValueError
else:
print("\nAge is valid for the process...")
# defining a raise
except ValueError:
print("\nAge is not valid for the process...")
| true |
bad02f4836a0fdbac1e26e5c8f9ceeec31f55f59 | Python | firetee13/sftp_walk | /sftp_walk.py | UTF-8 | 1,629 | 3.046875 | 3 | [] | no_license | import paramiko
import os
paramiko.util.log_to_file('/tmp/sftp_data_transfer.log')
from stat import S_ISDIR
host = "sftp.example.com"
port = 22
username = "sftp_walk"
pkey = paramiko.RSAKey.from_private_key(open("./id_rsa")) #your private ssh key
remote_path = '/' #remote path to copy
local_path = os.getcwd()+"/data" # local path to copy to
def connect_to_sftp():
#initiate the connection
transport = paramiko.Transport((host, port))
transport.connect(username=username, pkey=pkey)
sftp = paramiko.SFTPClient.from_transport(transport)
def get_sftp_data():
# walk through the folder structure and create the generators
def sftp_walk(remotepath):
path = remotepath
files = []
folders = []
for f in sftp.listdir_attr(remotepath):
if S_ISDIR(f.st_mode):
folders.append(f.filename)
else:
files.append(f.filename)
if files:
yield path, files
for folder in folders:
new_path = os.path.join(remotepath, folder)
for x in sftp_walk(new_path):
yield x
for path, files in sftp_walk(remote_path):
# create the folder sturcture localy
try:
print "creating "+local_path+path
os.makedirs(local_path+path)
except:
pass
# copy the files from the sftp to the local folders
for file in files:
print "copying "+file
sftp.get(path+"/"+file, local_path+path+"/"+file)
def main():
connect_to_sftp()
get_sftp_data()
if __name__=="__main__":
main()
| true |
9b8f5c3532645b4c1482d214063d5cb824c31acd | Python | HeNine/PyMoronBot | /Commands/Actions.py | UTF-8 | 1,212 | 2.59375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import re
from CommandInterface import CommandInterface
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
class Actions(CommandInterface):
acceptedTypes = ['ACTION']
help = 'Responds to various actions'
def shouldExecute(self, message):
"""
@type message: IRCMessage
"""
if message.Type in self.acceptedTypes:
return True
def execute(self, message):
"""
@type message: IRCMessage
"""
actions = ['pokes',
'gropes',
'molests',
'slaps',
'kicks',
'rubs',
'hugs',
'cuddles',
'glomps']
regex = r"^(?P<action>({0})),?[ ]{1}([^a-zA-Z0-9_\|`\[\]\^-]|$)"
match = re.search(
regex.format('|'.join(actions), self.bot.nickname),
message.MessageString,
re.IGNORECASE)
if match:
return IRCResponse(ResponseType.Do,
'%s %s' % (match.group('action'), message.User.Name),
message.ReplyTo)
| true |
5e49b188ea7c1768d7b489d914831ae1cf799459 | Python | kelvinnlois/jakarta_houseprice | /data/lamudi/extra/data_collector.py | UTF-8 | 2,699 | 2.71875 | 3 | [] | no_license | # coding : utf-8
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import pandas as pd
import numpy as np
source = []
df = pd.DataFrame([])
with open('rumah.txt','r') as f:
lines = f.readlines()
for i in lines:
source.append(i.rstrip())
feature_dict={}
k=0
for i in source:
try:
url = i
driver = webdriver.Chrome()
driver.get(url)
time.sleep(2)
soup = BeautifulSoup(driver.page_source,'lxml')
#Feature
#1
try:
xpath = "//div[@class='medium-6 small-6 columns PriceSection']/span/span"
feature_dict['price'] = driver.find_element_by_xpath(xpath).text.split(' ')[1]
except:
feature_dict['price'] = np.nan
#2
try:
xpath = "//div[@class='Navigation-wrapper']/div[4]/a"
feature_dict['lokasi'] = driver.find_element_by_xpath(xpath).text.strip().rstrip()
except:
feature_dict['lokasi'] = np.nan
#3
try:
feature_dict['lb'] = soup.find('span',{'class':'Overview-attribute icon-livingsize-v4'}).string.strip().split(' ')[0]
except:
feature_dict['lb'] = np.nan
#4
try:
feature_dict['lt'] = soup.find('span',{'class':'Overview-attribute icon-land_size-v4'}).string.strip().split(' ')[0]
except:
feature_dict['lt'] = np.nan
#5
try:
rincian = soup.find_all('div',{'class':'columns-2'})
for j in rincian:
child = j.findChildren('div',recursive = False)
feature = child[0]['data-attr-name']
val = child[1].string.strip()
feature_dict[feature] = val
except:
pass
#6
try:
xpath = "//*[@id='listing-description']/div/div/div"
feature_dict['deskripsi'] = driver.find_element_by_xpath(xpath).text.strip().rstrip()
except:
pass
#7
try:
xpath = "//*[@id='listing-amenities']/div/div"
feature_dict['fasilitas'] = driver.find_element_by_xpath(xpath).text.strip().rstrip()
except:
pass
try:
xpath = "//*[@id='js-landmark-accordion-head']/div/ul"
feature_dict['terdekat'] = driver.find_element_by_xpath(xpath).text.strip().rstrip()
except:
pass
df = df.append(feature_dict,ignore_index=True)
driver.close()
time.sleep(2)
except:
pass
k+=1
if k%20==0:
print(f'{k} rows have been collected')
print('complete')
print(df)
df.to_csv('jakarta_houseprice.csv')
| true |
9ad69696b4218224d21aca921603ddaee5277aa4 | Python | AdamZhouSE/pythonHomework | /Code/Cases/2225/.mooctest/answer.py | UTF-8 | 476 | 2.96875 | 3 | [] | no_license | class Solution(object):
def flipLights(self, n, m):
"""
:type n: int
:type m: int
:rtype: int
"""
if n==0:
return 0
if m==0:
return 1
if n==1:
return 2
if n==2:
return 3 if m==1 else 4
if m==1:
return 4
if m==2:
return 7
return 8
a = input()
b = input()
s = Solution()
print(s.flipLights(int(a), int(b))) | true |
fd07f1e6eb731dde44362f8dc7a65e173df15179 | Python | polyneikis11/pyHAM | /hamExtTest.py | UTF-8 | 1,739 | 2.703125 | 3 | [] | no_license | from hamSolver import HamSolver
from matplotlib import pyplot as plt
from hamBcs import HamBcs
from hamResult import HamResult
def pyham_test_algebraic_1():
my_solver = HamSolver()
my_solver.set_equation("exp(-x**2)*cos(4*x)")
my_solver.set_initial_guess("0.4")
my_solver.set_ftol(1e-8)
my_solver.set_approx_order(2)
my_solver.validate()
# print('equation to solve:', my_solver.get_equation())
# print('initial guess:', my_solver.get_initial_guess())
# print('linear op:', my_solver.get_lin_op())
# print('C0:', my_solver.get_C0())
# print('ftol:', my_solver.get_ftol())
# print('Order of Approximation:', my_solver.get_approx_order())
# print('validation:', my_solver.validate())
# print('type of problem:', my_solver.get_type())
ans = my_solver.solve()
print('value:', ans.get_value())
print('error:', ans.get_error())
print('tol:', ans.get_tolerance())
print('RESOLUTION:', ans.resolution())
def pyham_test_ode_1():
my_solver = HamSolver()
my_solver.set_equation("fx1 - 1/x**2 + fx0/x + fx0**2")
my_solver.set_initial_guess("-1/x**2")
my_solver.set_ftol(1e-8)
my_solver.set_approx_order(2)
my_solver.set_C0(-1.)
my_solver.set_lin_op("fx1")
a_bc = HamBcs("1.", "-1.", 0)
my_solver.set_condition(a_bc)
print('validation:', my_solver.validate(display_error=True))
print('type is', my_solver.ham_type)
res = my_solver.solve()
print('resolution:', res.resolution())
print('value:', res.value)
print('error:', res.error)
print('tol:', res.tolerance)
def run_external_pyham_tests():
# pyham_test_algebraic_1()
pyham_test_ode_1()
pass
| true |
f1d6f9b6290731bedf555f99e6d24ac4a1ece221 | Python | fagan2888/pyDataCube | /columndensitydistribution_v3.py | UTF-8 | 9,528 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
#filename=column density distribution
#2014/05/21
#---------------------------------------by Gao John
from __future__ import division
from pylab import *
import pyfits
import pywcsgrid2
import mpl_toolkits.axisartist as axisartist
from mpl_toolkits.axes_grid import make_axes_locatable
import matplotlib.colors as mcolors
import pywcs
from kapteyn import wcs
def locate_line(nx,ny,k,b,d_limit):
if (b>=0 and b<ny-1):
index_x = [0]
index_y = [int(round(b))]
elif (b<0):
index_x = [int(round(-b/k))]
index_y = [0]
else:
index_x = [int(round((ny-1-b)/k))]
index_y = [ny-1]
y_now = index_y[0]
for i2 in xrange(nx):
i = i2+index_x[0] #x axis not begin at 0, but index_x[0]
if (k>0 and y_now<ny-1):
upper_run = True
if (i2!=0): y_now-=1
else:
upper_run = False
if (k<0 and y_now>0):
down_run = True
if (i2!=0): y_now+=1 #last y_now is not what I need
else:
down_run = False
d = (1.+1./(k*k))*(k*i+b-y_now)*(k*i+b-y_now) #distance^2
# print k,b,d,d_limit
# print i,y_now,upper_run,down_run
if (i2==0 and d>2*d_limit):
print 'warning initial d>d_limit'
# if (d<2*d_limit):
# index_x.append(i)
# index_y.append(y_now)
#
#only one direction considering k
if ((i!=0 and not (upper_run or down_run)) or (i>=nx)):
break
# print index_x,index_y
# print upper_run,down_run
running = True
while running:
# print i,y_now,k*i+b
if (upper_run and y_now<ny-1):
upper_run = True
else:
upper_run = False
if (down_run and y_now>0):
down_run = True
else:
down_run = False
if (upper_run):
y_now+=1
d = (1.+1./(k*k))*(k*i+b-y_now)*(k*i+b-y_now) #distance^2
if (d>2*d_limit):
if (y_now>k*i+b):
upper_run = False
else:
pass
else:
index_x.append(i)
index_y.append(y_now)
if (down_run):
y_now-=1
d = (1.+1./(k*k))*(k*i+b-y_now)*(k*i+b-y_now) #distance^2
if (d>2*d_limit):
if (y_now<k*i+b):
down_run = False
else:
down_run = True
else:
index_x.append(i)
index_y.append(y_now)
# print upper_run, down_run
running = upper_run or down_run
# print array(index_y)
return array(index_x),array(index_y)
#co_file===================================================
co_file = pyfits.open("kes41_12_-70--40.fits")
co_header = co_file[0].header
co_data = co_file[0].data
(ny, nx) = co_data.shape
co_wcs = pywcs.WCS(co_header)
print nx,ny
#pick x, y=========================================
proj = wcs.Projection(co_header)
pick1_x1_fk5 = 249.825264
pick1_y1_fk5 = -46.907573
pick1_x2_fk5 = 249.6933
pick1_y2_fk5 = -47.00511
pick2_x1_fk5 = 249.72957
pick2_y1_fk5 = -46.928096
pick2_x2_fk5 = 249.815
pick2_y2_fk5 = -46.992042
delt_sky_x = co_header['CDELT1']
delt_sky_y = co_header['CDELT2']
if (abs(co_header['CDELT1']) != abs(co_header['CDELT2'])):
print 'warning: different between x and y'
#trans = wcs.Transformation(wcs.galactic, proj.skysys)
#(pick_x_co, pick_y_co) = trans.transform((pick_x_gal,pick_y_gal))
(pick1_x1, pick1_y1) = proj.topixel((pick1_x1_fk5, pick1_y1_fk5))
(pick1_x2, pick1_y2) = proj.topixel((pick1_x2_fk5, pick1_y2_fk5))
(pick2_x1, pick2_y1) = proj.topixel((pick2_x1_fk5, pick2_y1_fk5))
(pick2_x2, pick2_y2) = proj.topixel((pick2_x2_fk5, pick2_y2_fk5))
#(pick_x_co_tmp, pick_y_co_tmp) = trans.transform((pick_x_gal+5.*delt_sky_x,pick_y_gal))
#(pick_x_tmp, pick_y_tmp) = proj.topixel((pick_x_co_tmp, pick_y_co_tmp))
#if (pick_x_tmp == pick_x):
# print 'Error: vertical line on the map, you can get it a simpler way!'
# raise
k1 = (pick1_y2-pick1_y1)/(pick1_x2-pick1_x1)
b1 = pick1_y2-k1*pick1_x2
k2 = (pick2_y2-pick2_y1)/(pick2_x2-pick2_x1)
b2 = pick2_y2-k2*pick2_x2
#(pick_x_co_tmp, pick_y_co_tmp) = trans.transform((pick_x_gal,pick_y_gal+5.*delt_sky_y))
#(pick_x_tmp, pick_y_tmp) = proj.topixel((pick_x_co_tmp, pick_y_co_tmp))
#if (pick_x_tmp == pick_x):
# print 'Error: vertical line on the map, you can get it a simpler way!'
# raise
#
#k2 = (pick_y_tmp-pick_y)/(pick_x_tmp-pick_x)
#b2 = pick_y-k2*pick_x
#
#^lines of 2 different slope
d_limit = 1./4. #pixel
output1_x, output1_y = locate_line(nx,ny,k1,b1,d_limit)
output2_x, output2_y = locate_line(nx,ny,k2,b2,d_limit)
figure(0)
plot(output1_x,output1_y,'b-',output2_x,output2_y,'r--')
show()
output1 = zip(output1_x,output1_y)
output2 = zip(output2_x,output2_y)
output1_fk5 = proj.toworld(output1)
output2_fk5 = proj.toworld(output2)
#trans_r = wcs.Transformation(proj.skysys,wcs.galactic)
#
#output1_gal = trans_r.transform(output1_fk5)
#output2_gal = trans_r.transform(output2_fk5)
#
#output1_gal_ar = array(output1_gal)
#output2_gal_ar = array(output2_gal)
#
print len(output1)
output1_val = zeros(len(output1))
output2_val = zeros(len(output2))
for ind in xrange(len(output1)):
output1_val[ind] = co_data.transpose()[output1[ind]]
for ind in xrange(len(output2)):
output2_val[ind] = co_data.transpose()[output2[ind]]
#output2_val = co_data.transpose()[output2]
#plot(output1_gal_ar[:,0],output1_val,drawstyle='steps')
#angle from center
if k1==k2:
print "k1==k2!"
raise
center_of_line = proj.toworld(((b2-b1)/(k1-k2),(k1*b2-k2*b1)/(k1-k2)))
print center_of_line
output1_angle = zeros(len(output1))
output2_angle = zeros(len(output2))
for ind in xrange(len(output1)):
# output_angle[ind] = sqrt((output_fk5[ind][0]-center_of_line[0])* \
# (output_fk5[ind][0]-center_of_line[0])+ \
# (output_fk5[ind][1]-center_of_line[1])*(output_fk5[ind][1]-center_of_line[1]))* \
# (output_fk5[ind][0]-center_of_line[0])
output1_angle[ind] = linalg.norm(array(output1_fk5[ind])-\
#distance:linalg.norm
array(center_of_line))*\
(-sign(output1_fk5[ind][0]-center_of_line[0]))
for ind in xrange(len(output2)):
output2_angle[ind] = linalg.norm(array(output2_fk5[ind])-\
array(center_of_line))*\
(-sign(output2_fk5[ind][0]-center_of_line[0]))
fig = figure(1)
fig.set_figheight(6.125)
fig.set_figwidth(16.25)
ax1 = fig.add_subplot(121,adjustable='box')
ax1.plot(output1_angle*60.,output1_val/0.42*1.8e20/1e21,'b-',drawstyle='steps')
ax1.set_xlim(min(output1_angle*60.),max(output1_angle*60.))
#ax1.set_ylim(22*1.8e20,45*1.8e20)
ax1.set_xlabel('Offset from the reference point (arcmin)',fontsize='x-large')
ax1.set_ylabel(r'N(H$_2$) (10$^{21}$ cm$^{-2}$)',fontsize='x-large')
#angle1 = 60.*sqrt((pick1_x1_fk5-center_of_line[0])*(pick1_x1_fk5-center_of_line[0])+\
# (pick1_y1_fk5-center_of_line[1])*(pick1_y1_fk5-center_of_line[1]))
angle1 = 60.*linalg.norm(array([pick1_x1_fk5,pick1_y1_fk5])-array(center_of_line))*sign(pick1_x1_fk5-center_of_line[0])
#in unit of minute
angle2 = 60.*linalg.norm(array([pick1_x2_fk5,pick1_y2_fk5])-array(center_of_line))*sign(pick1_x2_fk5-center_of_line[0])
#angle2 = -60.*sqrt((pick_x2_fk5-center_of_line[0])*(pick_x2_fk5-center_of_line[0])+\
# (pick_y2_fk5-center_of_line[1])*(pick_y2_fk5-center_of_line[1]))
sp1 = ax1.axvspan(angle1,angle2,facecolor='#d5d5d5',edgecolor='w',alpha=0.3)
xlist = [-6.0,-3.0,0.0,3.0,6.0]
xlist_label = ['-6.0','-3.0','0.0','3.0','6.0']
ax1.set_xticks(xlist)
ax1.set_xticklabels(xlist_label,size='large')
ylist = [28.0,32.0,36.0,40.0,44.0]
ylist_label = ['28.0','32.0','36.0','40.0','44.0']
ax1.set_yticks(ylist)
ax1.set_yticklabels(ylist_label,size='large')
text1="NE-SW"
ax1.text(-6,41.5,text1,fontsize=14,ha='left')
ax2 = fig.add_subplot(122,adjustable='box')
ax2.plot(output2_angle*60.,output2_val/0.42*1.8e20/1e21,'b-',drawstyle='steps')
ax2.set_xlim(min(output2_angle*60.),max(output2_angle*60.))
#ax1.set_ylim(22*1.8e20,45*1.8e20)
ax2.set_xlabel('Offset from the reference point (arcmin)',fontsize='x-large')
#ax2.set_ylabel(r'N(H$_2$) (10$^{21}$ cm$^{-2}$)',fontsize='x-large')
#angle1 = 60.*sqrt((pick1_x1_fk5-center_of_line[0])*(pick1_x1_fk5-center_of_line[0])+\
# (pick1_y1_fk5-center_of_line[1])*(pick1_y1_fk5-center_of_line[1]))
angle1 = 60.*linalg.norm(array([pick2_x1_fk5,pick2_y1_fk5])-array(center_of_line))*sign(pick2_x1_fk5-center_of_line[0])
#in unit of minute
angle2 = 60.*linalg.norm(array([pick2_x2_fk5,pick2_y2_fk5])-array(center_of_line))*sign(pick2_x2_fk5-center_of_line[0])
#angle2 = -60.*sqrt((pick_x2_fk5-center_of_line[0])*(pick_x2_fk5-center_of_line[0])+\
# (pick_y2_fk5-center_of_line[1])*(pick_y2_fk5-center_of_line[1]))
sp2 = ax2.axvspan(angle1,angle2,facecolor='#d5d5d5',edgecolor='w',alpha=0.3)
xlist = [-6.0,-3.0,0.0,3.0,6.0]
xlist_label = ['-6.0','-3.0','0.0','3.0','6.0']
ax2.set_xticks(xlist)
ax2.set_xticklabels(xlist_label,size='large')
ylist = [15.0,20.0,25.0,30.0,35.0,40.0,45.0]
ylist_label = ['15.0','20.0','25.0','30.0','35.0','40.0','45.0']
ax2.set_yticks(ylist)
ax2.set_yticklabels(ylist_label,size='large')
text2='NW-SE'
ax2.text(-6.5,40,text2,fontsize=14,ha='left')
fig.savefig('columndensity_v3.eps')
#show()
| true |
086f02434f4640f9f25e2fc163a32c7bd0f2604e | Python | pappavis/EasyLab-retro-synth-SN76489 | /src/commando_esp32.py | UTF-8 | 1,123 | 3.15625 | 3 | [] | no_license | # the first 15 seconds of the chiptune Commando by Rob Hubbard and play it using 3 analoge pins on a ESP32
from machine import Pin, PWM
import time
# Define the analog pins
analog_pins = [26, 27, 32]
# Define the PWM frequency
freq = 440
# Define the PWM duty cycle
duty = 512
# Define the duration of the note
duration = 0.25
# Define the square wave function
def square_wave(freq, duty):
return int(duty < 1024 * freq / 2)
# Initialize the PWM pins
pwms = [PWM(Pin(pin), freq=freq, duty=duty) for pin in analog_pins]
# Define the notes for the melody
notes = ['C', 'D', 'E', 'C', 'C', 'D', 'E', 'C', 'E', 'F', 'G', 'E', 'F', 'G', 'C']
# Define the durations for the melody
durations = [0.25] * 15
# Define the tempo of the melody
tempo = 120
# Define the time per beat based on the tempo
tpb = 60 / tempo
# Play the melody
for note, duration in zip(notes, durations):
freq = 261.63 * 2 ** (ord(note) - ord('C') + 3) / 12
for t in range(int(duration / tpb * 1000)):
for i, pwm in enumerate(pwms):
pwm.duty(1023 * square_wave(freq, duty))
time.sleep_us(1000 // len(pwms))
| true |
66eec142f16c33c23501e751b9c90f9254e3fa39 | Python | BarryZM/Workspace-of-NLU | /solutions/d_semantic/word_embedding/gensim/train_d2v_gen.py | UTF-8 | 7,945 | 2.71875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import sys
import os
import numpy as np
import gensim
from gensim.models.doc2vec import Doc2Vec,LabeledSentence
from sklearn.cross_validation import train_test_split
LabeledSentence = gensim.models.doc2vec.LabeledSentence
##读取并预处理数据
def get_dataset(pos_file, neg_file, unsup_file):
# 读取数据
# pos_reviews = []
# neg_reviews = []
# unsup_reviews = []
#
# for fname in os.listdir(pos_file):
# for line in open(os.path.join(pos_file, fname), 'r'):
# pos_reviews.append(line)
# for fname in os.listdir(neg_file):
# for line in open(os.path.join(neg_file, fname), 'r'):
# neg_reviews.append(line)
# for fname in os.listdir(unsup_file):
# for line in open(os.path.join(unsup_file, fname), 'r'):
# unsup_reviews.append(line)
with open(pos_file,'r') as infile:
pos_reviews = infile.readlines()
with open(neg_file,'r') as infile:
neg_reviews = infile.readlines()
with open(unsup_file,'r') as infile:
unsup_reviews = infile.readlines()
#使用1表示正面情感,0为负面
y = np.concatenate((np.ones(len(pos_reviews)), np.zeros(len(neg_reviews))))
#将数据分割为训练与测试集
x_train, x_test, y_train, y_test = train_test_split(np.concatenate((pos_reviews, neg_reviews)), y, test_size=0.2)
#对英文做简单的数据清洗预处理,中文根据需要进行修改
def cleanText(corpus):
punctuation = """.,?!:;(){}[]"""
corpus = [z.lower().replace('\n','') for z in corpus]
corpus = [z.replace('<br />', ' ') for z in corpus]
#treat punctuation as individual words
for c in punctuation:
corpus = [z.replace(c, ' %s '%c) for z in corpus]
corpus = [z.split() for z in corpus]
return corpus
x_train = cleanText(x_train)
x_test = cleanText(x_test)
unsup_reviews = cleanText(unsup_reviews)
#Gensim的Doc2Vec应用于训练要求每一篇文章/句子有一个唯一标识的label.
#我们使用Gensim自带的LabeledSentence方法. 标识的格式为"TRAIN_i"和"TEST_i",其中i为序号
def labelizeReviews(reviews, label_type):
labelized = []
for i,v in enumerate(reviews):
label = '%s_%s'%(label_type,i)
labelized.append(LabeledSentence(v, [label]))
return labelized
x_train = labelizeReviews(x_train, 'TRAIN')
x_test = labelizeReviews(x_test, 'TEST')
unsup_reviews = labelizeReviews(unsup_reviews, 'UNSUP')
return x_train,x_test,unsup_reviews,y_train, y_test
##读取向量
def getVecs(model, corpus, size):
vecs = [np.array(model.docvecs[z.tags[0]]).reshape((1, size)) for z in corpus]
return np.concatenate(vecs)
##对数据进行训练
def train(x_train,x_test,unsup_reviews,size = 400,epoch_num=10):
#实例DM和DBOW模型
model_dm = gensim.models.Doc2Vec(min_count=1, window=10, size=size, sample=1e-3, negative=5, workers=3)
model_dbow = gensim.models.Doc2Vec(min_count=1, window=10, size=size, sample=1e-3, negative=5, dm=0, workers=3)
#使用所有的数据建立词典
model_dm.build_vocab(np.concatenate((x_train, x_test, unsup_reviews)))
model_dbow.build_vocab(np.concatenate((x_train, x_test, unsup_reviews)))
#进行多次重复训练,每一次都需要对训练数据重新打乱,以提高精度
all_train_reviews = np.concatenate((x_train, unsup_reviews))
for epoch in range(epoch_num):
perm = np.random.permutation(all_train_reviews.shape[0])
model_dm.train(all_train_reviews[perm])
model_dbow.train(all_train_reviews[perm])
#训练测试数据集
x_test = np.array(x_test)
for epoch in range(epoch_num):
perm = np.random.permutation(x_test.shape[0])
model_dm.train(x_test[perm])
model_dbow.train(x_test[perm])
return model_dm,model_dbow
##将训练完成的数据转换为vectors
def get_vectors(model_dm,model_dbow):
#获取训练数据集的文档向量
train_vecs_dm = getVecs(model_dm, x_train, size)
train_vecs_dbow = getVecs(model_dbow, x_train, size)
train_vecs = np.hstack((train_vecs_dm, train_vecs_dbow))
#获取测试数据集的文档向量
test_vecs_dm = getVecs(model_dm, x_test, size)
test_vecs_dbow = getVecs(model_dbow, x_test, size)
test_vecs = np.hstack((test_vecs_dm, test_vecs_dbow))
return train_vecs,test_vecs
##使用分类器对文本向量进行分类训练
def Classifier(train_vecs,y_train,test_vecs, y_test):
#使用sklearn的SGD分类器
from sklearn.linear_model import SGDClassifier
lr = SGDClassifier(loss='log', penalty='l1')
lr.fit(train_vecs, y_train)
print('Test Accuracy: %.2f'%lr.score(test_vecs, y_test))
return lr
##绘出ROC曲线,并计算AUC
def ROC_curve(lr,y_test):
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
pred_probas = lr.predict_proba(test_vecs)[:,1]
fpr,tpr,_ = roc_curve(y_test, pred_probas)
roc_auc = auc(fpr,tpr)
plt.plot(fpr,tpr,label='area = %.2f' %roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.show()
## 运行模块
if __name__ == "__main__":
# 设置向量维度和训练次数
size,epoch_num = 400,10
from train.utils.util import root_path
# 定义file 路径
p_file = root_path + 'data/aclImdb/train/pos_all.txt'
n_file = root_path + 'data/aclImdb/train/neg_all.txt'
u_file = root_path + 'data/aclImdb/train/unsup_all.txt'
#获取训练与测试数据及其类别标注
x_train,x_test,unsup_reviews,y_train, y_test = get_dataset(pos_file=p_file, neg_file=n_file, unsup_file=u_file)
#对数据进行训练,获得模型
model_dm,model_dbow = train(x_train,x_test,unsup_reviews,size,epoch_num)
#从模型中抽取文档相应的向量
train_vecs,test_vecs = get_vectors(model_dm,model_dbow)
#使用文章所转换的向量进行情感正负分类训练
lr=Classifier(train_vecs,y_train,test_vecs, y_test)
#画出ROC曲线
ROC_curve(lr,y_test)
# /home/apollo/softwares/anaconda3/bin/python3.6 /home/apollo/craft/projects/Holy-Miner/train/Embeddings/doc2vec/Gensim/train_d2v_gen.py
# /home/apollo/softwares/anaconda3/lib/python3.6/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
# "This module will be removed in 0.20.", DeprecationWarning)
# /home/apollo/softwares/anaconda3/lib/python3.6/site-packages/gensim/models/doc2vec.py:366: UserWarning: The parameter `size` is deprecated, will be removed in 4.0.0, use `vector_size` instead.
# warnings.warn("The parameter `size` is deprecated, will be removed in 4.0.0, use `vector_size` instead.")
# Traceback (most recent call last):
# File "/home/apollo/craft/projects/Holy-Miner/train/Embeddings/doc2vec/Gensim/train_d2v_gen.py", line 164, in <module>
# model_dm,model_dbow = train(x_train,x_test,unsup_reviews,size,epoch_num)
# File "/home/apollo/craft/projects/Holy-Miner/train/Embeddings/doc2vec/Gensim/train_d2v_gen.py", line 88, in train
# model_dm.build_vocab(np.concatenate((x_train, x_test, unsup_reviews)))
# File "/home/apollo/softwares/anaconda3/lib/python3.6/site-packages/gensim/models/doc2vec.py", line 729, in build_vocab
# documents, self.docvecs, progress_per=progress_per, trim_rule=trim_rule)
# File "/home/apollo/softwares/anaconda3/lib/python3.6/site-packages/gensim/models/doc2vec.py", line 809, in scan_vocab
# if isinstance(document.words, string_types):
# AttributeError: 'numpy.ndarray' object has no attribute 'words' | true |
4635ba09d7bd64c422fd1963727743f557a5b141 | Python | warproxxx/CryptoTrader | /src/data_utils/twitter_data/libs/writing_utils.py | UTF-8 | 1,250 | 2.9375 | 3 | [] | no_license | import os
import inspect
import logging
def get_locations(datadir="twitter_data"):
'''
Returns the directory of located script along with the root directory (eg: where twitter_data is located)
Parameters:
___________
datadir: The current root directory
Returns:
________
dir_location, root_dir_location
'''
path = get_name()
dir_location = os.path.dirname(path)
root_dir_location = dir_location.split(datadir)[0] + datadir
return dir_location, root_dir_location
def get_name():
frame = inspect.stack()[1]
module = inspect.getmodule(frame[0])
filename = module.__file__
full_path = os.path.realpath(filename)
return full_path
def get_logger(fullLocation):
'''
fullLocation (string):
Name of file along with Full location. Alternatively just file name
'''
dirName = os.path.dirname(fullLocation)
if not(os.path.isdir(dirName)):
os.makedirs(dirName)
try:
loggerName = fullLocation.split("/")[-1]
except:
loggerName = fullLocation
logger = logging.getLogger(loggerName)
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(fullLocation, 'w')
logger.addHandler(handler)
return logger | true |
5bd8d6cfd7ac2c57991b3a3f7cab0ca361865dcb | Python | MorS25/ARDroneIPN | /src/func_control.py | UTF-8 | 1,696 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
import roslib
import rospy
roslib.load_manifest('ardrone_test')
# Import the messages we're interested in
from ardrone_test.msg import Control
direc = [0]
mag = [0.0]
def ReceiveData(data):
if data.direccion == 1:
direc[0] = 1
mag[0] = data.magnitud
elif data.direccion == 2:
direc[0] = 2
mag[0] = data.magnitud
elif data.direccion == 3:
direc[0] = 3
mag[0] = data.magnitud
elif data.direccion == 4:
direc[0] = 4
mag[0] = data.magnitud
elif data.direccion == 5:
direc[0] = 5
mag[0] = data.magnitud
elif data.direccion == 6:
direc[0] = 6
mag[0] = data.magnitud
rospy.init_node('func_control')
sub_Navdata = rospy.Subscriber('/ardrone/ctrl_inter', Control, ReceiveData)
r = rospy.Rate(1)
while not rospy.is_shutdown():
if direc[0] == 1:
print "La direccion del vuelo sera derecha"
print "La magnitud del vuelo sera: {0:.3f}".format(mag[0]) + "\n"
direc = [0]
elif direc[0] == 2:
print "La direccion del vuelo sera izquierda"
print "La magnitud del vuelo sera: {0:.3f}".format(mag[0]) + "\n"
direc = [0]
elif direc[0] == 3:
print "La direccion del vuelo sera adelante"
print "La magnitud del vuelo sera: {0:.3f}".format(mag[0]) + "\n"
direc = [0]
elif direc[0] == 4:
print "La direccion del vuelo sera atras"
print "La magnitud del vuelo sera: {0:.3f}".format(mag[0]) + "\n"
direc = [0]
elif direc[0] == 5:
print "La direccion del vuelo sera giro horario"
print "La magnitud del vuelo sera: {0:.3f}".format(mag[0]) + "\n"
direc = [0]
elif direc[0] == 6:
print "La direccion del vuelo sera giro antihorario"
print "La magnitud del vuelo sera: {0:.3f}".format(mag[0]) + "\n"
direc = [0]
r.sleep()
| true |
97abfbeb65a5241f732a8b31ca693c326ec9ddcb | Python | Web-Dev-Collaborative/Medium-API-Clone | /Level 02 (Modularization)/api/comments.py | UTF-8 | 523 | 2.671875 | 3 | [] | no_license | from flask import Blueprint, request
bp = Blueprint("comments", __name__, url_prefix="/comments")
@bp.route("", methods=["POST"])
def comments():
"""
Endpoint to create comments
"""
return "create comment!"
@bp.route("/<id>", methods=["GET", "PUT", "DELETE"])
def comment():
"""
Endpoints to manage a single comment
"""
if request.method == "PUT":
return "edit comment!"
elif request.method == "DELETE":
return "delete comment!"
else:
return "comment!"
| true |
96a38cbb8c202df56889686609921d17f90c2101 | Python | Baltimer/Ejercicios_Python | /POO/tarjetaPrepago.py | UTF-8 | 1,062 | 2.78125 | 3 | [] | no_license | #coding = utf-8
class tarjetaPrepago:
def __init__(self, numeroTelefono, nif, saldo, consumo):
self.numeroTelefono = numeroTelefono
self.nif = nif
self.saldo = saldo
self.consumo = consumo
def __repr__(self):
return self.numeroTelefono + ", " + self.nif + ", " + self.saldo + ", " + self.consumo
def setNumeroTelefono(self):
self.numeroTelefono = numeroTelefono
def getNumeroTelefono(self):
return self.numeroTelefono
def setNif(self):
self.nif = nif
def getNif(self):
return self.nif
def setSaldo(self):
self.saldo = saldo
def getSaldo(self):
return self.saldo
def setConsumo(self):
self.consumo = consumo
def getConsumo(self):
return self.consumo
def ingresarSaldo(self, saldo):
self.saldo += saldo
def enviarMensaje(self, mensajes):
if self.saldo >= mensajes*0.09:
self.saldo -= mensajes*0.09
else:
print("No dispones de suficiente saldo.")
def realizarLlamada(self, segundos):
self.saldo -= 0.15 + segundos*0.01
## PENDIENTE ACTALIZAR CONSUMO
def consultarTarjeta(self):
return print (self) | true |
2bb0ed372c2a6b852555bbd3af3808ae52b842c5 | Python | gmnamra/parking_spot_public | /analyzecars.py | UTF-8 | 3,613 | 2.546875 | 3 | [] | no_license | import argparse
import os
import requests
from fetchandextract import fetch_first_frame
from hascar import process, compare
from pathlib import Path
from spot import SpotObserver, pState
from common import __downloads_folder__, __base_url__
class analyzecars:
def __init__(self, base_url, index_url, first_index, last_index, compare_process=False):
self.base_url = base_url
self.index_url = index_url
self.cwd = os.getcwd()
if Path(index_url).exists():
fo = open(index_url, "r+")
self.listing = fo.readlines()
if len(self.listing) == 0:
r = requests.get(self.base_url + '.txt')
decoded_content = r.content.decode('utf-8')
self.listing = decoded_content.splitlines()
# Create list of video files we have to process
self.first = int(first_index)
self.last = int(last_index)
self.nlisting = list(map(lambda x: int(x.rstrip()[:-3]), self.listing))
self.nbatch = [x for x in self.nlisting if x >= self.first and x <= self.last]
self.batch = list(map(lambda x: str(x) + '.ts', self.nbatch))
self.compare_process = compare_process
# Instantiate a spot observer
self.pp = SpotObserver()
dwp = Path(__downloads_folder__)
if not dwp.exists():
dwp.mkdir()
car = 'car'
last_out = []
for idx, file in enumerate(self.batch):
down_info = fetch_first_frame(file)
if down_info[0]:
cur_out = process(down_info[1], False)
## If the very first,
if idx == 0:
self.pp.update(cur_out[0], self.nbatch[idx])
last_out = cur_out
continue
# Verify / Validate / Improve using the last detection
compare_result = compare(cur_out, last_out)
if compare_result:
# If images are similar and detection results were identical:
# that is both empty or both with car, good to go
if cur_out[0] == last_out[0]:
self.pp.update(cur_out[0], self.nbatch[idx])
else:
# if images are similar but we detection result are different
# accept the earlier one
self.pp.update(last_out[0], self.nbatch[idx])
else:
# If images are dissimilar
if cur_out[0] != last_out[0]:
# and detection results were also different
# accept the new
self.pp.update(last_out[0], self.nbatch[idx])
else:
# detection results were the same but images are dis-similar
# go for the old one
self.pp.update(last_out[0], self.nbatch[idx])
last_out = cur_out
# print(self.pp.report())
self.pp.reportAll()
def main():
parser = argparse.ArgumentParser(description='analyze-cars')
parser.add_argument('--index', '-i', required=True, help='Index File ')
parser.add_argument('--start', '-s', required=True, help='Starting time stamp')
parser.add_argument('--end', '-e', required=True, help='Last time stamp')
args = parser.parse_args()
place = analyzecars(__base_url__, args.index, args.start, args.end)
if __name__ == '__main__':
main()
# def fetch_frames(index_array, start_index, end_index, sampling=None):
| true |